--- documentViewer/documentViewer.py 2010/05/19 16:24:16 1.53 +++ documentViewer/documentViewer.py 2011/08/12 14:41:39 1.175.2.24 @@ -1,78 +1,109 @@ - from OFS.Folder import Folder from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate from Products.PageTemplates.PageTemplateFile import PageTemplateFile -from Products.PythonScripts.standard import url_quote from AccessControl import ClassSecurityInfo from AccessControl import getSecurityManager from Globals import package_home -from Ft.Xml.Domlette import NonvalidatingReader -from Ft.Xml.Domlette import PrettyPrint, Print -from Ft.Xml import EMPTY_NAMESPACE, Parse - -from xml.dom.minidom import parse, parseString +#from Ft.Xml import EMPTY_NAMESPACE, Parse +#import Ft.Xml.Domlette +import xml.etree.ElementTree as ET - -import Ft.Xml.XPath -import cStringIO -import xmlrpclib import os.path import sys -import cgi import urllib import logging import math - import urlparse -from types import * +import re +import string + +from SrvTxtUtils import getInt, getText, getHttpData def logger(txt,method,txt2): """logging""" logging.info(txt+ txt2) -def getInt(number, default=0): - """returns always an int (0 in case of problems)""" - try: - return int(number) - except: - return int(default) - -def getTextFromNode(nodename): - """get the cdata content of a node""" - if nodename is None: - return "" - nodelist=nodename.childNodes - rc = "" - for node in nodelist: - if node.nodeType == node.TEXT_NODE: - rc = rc + node.data - return rc - -def serializeNode(node, encoding='utf-8'): +def serializeNode(node, encoding="utf-8"): """returns a string containing node as XML""" - buf = cStringIO.StringIO() - Print(node, stream=buf, encoding=encoding) - s = buf.getvalue() - buf.close() + s = ET.tostring(node) + + # 4Suite: + # stream = cStringIO.StringIO() + # Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding) + # s = stream.getvalue() + # stream.close() return s - -def getParentDir(path): - """returns pathname shortened by one""" - return '/'.join(path.split('/')[0:-1]) - - -import socket - -def urlopen(url,timeout=2): - """urlopen mit timeout""" - socket.setdefaulttimeout(timeout) - ret=urllib.urlopen(url) - socket.setdefaulttimeout(5) - return ret +def browserCheck(self): + """check the browsers request to find out the browser type""" + bt = {} + ua = self.REQUEST.get_header("HTTP_USER_AGENT") + bt['ua'] = ua + bt['isIE'] = False + bt['isN4'] = False + bt['versFirefox']="" + bt['versIE']="" + bt['versSafariChrome']="" + bt['versOpera']="" + + if string.find(ua, 'MSIE') > -1: + bt['isIE'] = True + else: + bt['isN4'] = (string.find(ua, 'Mozilla/4.') > -1) + # Safari oder Chrome identification + try: + nav = ua[string.find(ua, '('):] + nav1=ua[string.find(ua,')'):] + nav2=nav1[string.find(nav1,'('):] + nav3=nav2[string.find(nav2,')'):] + ie = string.split(nav, "; ")[1] + ie1 =string.split(nav1, " ")[2] + ie2 =string.split(nav3, " ")[1] + ie3 =string.split(nav3, " ")[2] + if string.find(ie3, "Safari") >-1: + bt['versSafariChrome']=string.split(ie2, "/")[1] + except: pass + # IE identification + try: + nav = ua[string.find(ua, '('):] + ie = string.split(nav, "; ")[1] + if string.find(ie, "MSIE") > -1: + bt['versIE'] = string.split(ie, " ")[1] + except:pass + # Firefox identification + try: + nav = ua[string.find(ua, '('):] + nav1=ua[string.find(ua,')'):] + if string.find(ie1, "Firefox") >-1: + nav5= string.split(ie1, "/")[1] + logging.debug("FIREFOX: %s"%(nav5)) + bt['versFirefox']=nav5[0:3] + except:pass + #Opera identification + try: + if string.find(ua,"Opera") >-1: + nav = ua[string.find(ua, '('):] + nav1=nav[string.find(nav,')'):] + bt['versOpera']=string.split(nav1,"/")[2] + except:pass + + bt['isMac'] = string.find(ua, 'Macintosh') > -1 + bt['isWin'] = string.find(ua, 'Windows') > -1 + bt['isIEWin'] = bt['isIE'] and bt['isWin'] + bt['isIEMac'] = bt['isIE'] and bt['isMac'] + bt['staticHTML'] = False + + return bt + +def getParentPath(path, cnt=1): + """returns pathname shortened by cnt""" + # make sure path doesn't end with / + path = path.rstrip('/') + # split by /, shorten, and reassemble + return '/'.join(path.split('/')[0:-cnt]) ## @@ -80,14 +111,15 @@ def urlopen(url,timeout=2): ## class documentViewer(Folder): """document viewer""" - #textViewerUrl="http://127.0.0.1:8080/HFQP/testXSLT/getPage?" - meta_type="Document viewer" security=ClassSecurityInfo() manage_options=Folder.manage_options+( {'label':'main config','action':'changeDocumentViewerForm'}, ) + + metadataService = None + """MetaDataFolder instance""" # templates and forms viewer_main = PageTemplateFile('zpt/viewer_main', globals()) @@ -95,16 +127,18 @@ class documentViewer(Folder): toc_text = PageTemplateFile('zpt/toc_text', globals()) toc_figures = PageTemplateFile('zpt/toc_figures', globals()) page_main_images = PageTemplateFile('zpt/page_main_images', globals()) + page_main_double = PageTemplateFile('zpt/page_main_double', globals()) page_main_text = PageTemplateFile('zpt/page_main_text', globals()) page_main_text_dict = PageTemplateFile('zpt/page_main_text_dict', globals()) + page_main_gis =PageTemplateFile ('zpt/page_main_gis', globals()) page_main_xml = PageTemplateFile('zpt/page_main_xml', globals()) + page_main_pureXml = PageTemplateFile('zpt/page_main_pureXml', globals()) head_main = PageTemplateFile('zpt/head_main', globals()) docuviewer_css = PageTemplateFile('css/docuviewer.css', globals()) info_xml = PageTemplateFile('zpt/info_xml', globals()) - + + thumbs_main_rss = PageTemplateFile('zpt/thumbs_main_rss', globals()) - security.declareProtected('View management screens','changeDocumentViewerForm') - changeDocumentViewerForm = PageTemplateFile('zpt/changeDocumentViewer', globals()) def __init__(self,id,imageScalerUrl=None,textServerName=None,title="",digilibBaseUrl=None,thumbcols=2,thumbrows=5,authgroups="mpiwg"): @@ -121,12 +155,13 @@ class documentViewer(Folder): #self['template'] = templateFolder # Zope-2.12 style self._setObject('template',templateFolder) # old style try: - from Products.XMLRpcTools.XMLRpcTools import XMLRpcServerProxy - xmlRpcClient = XMLRpcServerProxy(id='fulltextclient', serverUrl=textServerName, use_xmlrpc=False) + import MpdlXmlTextServer + textServer = MpdlXmlTextServer.MpdlXmlTextServer(id='fulltextclient',serverName=textServerName) #templateFolder['fulltextclient'] = xmlRpcClient - templateFolder._setObject('fulltextclient',xmlRpcClient) + templateFolder._setObject('fulltextclient',textServer) except Exception, e: - logging.error("Unable to create XMLRpcTools for fulltextclient: "+str(e)) + logging.error("Unable to create MpdlXmlTextServer for fulltextclient: "+str(e)) + try: from Products.zogiLib.zogiLib import zogiLib zogilib = zogiLib(id="zogilib", title="zogilib for docuviewer", dlServerURL=imageScalerUrl, layout="book") @@ -134,8 +169,71 @@ class documentViewer(Folder): templateFolder._setObject('zogilib',zogilib) except Exception, e: logging.error("Unable to create zogiLib for zogilib: "+str(e)) + + try: + # assume MetaDataFolder instance is called metadata + self.metadataService = getattr(self, 'metadata') + except Exception, e: + logging.error("Unable to find MetaDataFolder 'metadata': "+str(e)) + + if digilibBaseUrl is not None: + self.digilibBaseUrl = digilibBaseUrl + + # proxy text server methods to fulltextclient + def getTextPage(self, **args): + """get page""" + return self.template.fulltextclient.getTextPage(**args) + + def getOrigPages(self, **args): + """get page""" + return self.template.fulltextclient.getOrigPages(**args) + + def getOrigPagesNorm(self, **args): + """get page""" + return self.template.fulltextclient.getOrigPagesNorm(**args) + + def getQuery(self, **args): + """get query in search""" + return self.template.fulltextclient.getQuery(**args) + + def getSearch(self, **args): + """get search""" + return self.template.fulltextclient.getSearch(**args) + + def getGisPlaces(self, **args): + """get gis places""" + return self.template.fulltextclient.getGisPlaces(**args) + + def getAllGisPlaces(self, **args): + """get all gis places """ + return self.template.fulltextclient.getAllGisPlaces(**args) + + def getWordInfo(self, **args): + """get translate""" + return self.template.fulltextclient.getWordInfo(**args) + + def getLemma(self, **args): + """get lemma""" + return self.template.fulltextclient.getLemma(**args) + + def getLemmaQuery(self, **args): + """get query""" + return self.template.fulltextclient.getLemmaQuery(**args) + + def getLex(self, **args): + """get lex""" + return self.template.fulltextclient.getLex(**args) + + def getToc(self, **args): + """get toc""" + return self.template.fulltextclient.getToc(**args) + + def getTocPage(self, **args): + """get tocpage""" + return self.template.fulltextclient.getTocPage(**args) + security.declareProtected('View','thumbs_rss') def thumbs_rss(self,mode,url,viewMode="auto",start=None,pn=1): ''' @@ -146,7 +244,7 @@ class documentViewer(Folder): ''' logging.debug("HHHHHHHHHHHHHH:load the rss") - logger("documentViewer (index)", logging.INFO, "mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn)) + logging.debug("documentViewer (index) mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn)) if not hasattr(self, 'template'): # create template folder if it doesn't exist @@ -156,29 +254,32 @@ class documentViewer(Folder): self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary" docinfo = self.getDocinfo(mode=mode,url=url) - pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo) + #pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo) + pageinfo = self.getPageinfo(start=start,current=pn, docinfo=docinfo) + ''' ZDES ''' pt = getattr(self.template, 'thumbs_main_rss') if viewMode=="auto": # automodus gewaehlt - if docinfo.get("textURL",'') and self.textViewerUrl: #texturl gesetzt und textViewer konfiguriert + if docinfo.has_key("textURL") or docinfo.get('textURLPath',None): #texturl gesetzt und textViewer konfiguriert viewMode="text" else: viewMode="images" return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode) + security.declareProtected('View','index_html') - def index_html(self,url,mode="texttool",viewMode="auto",tocMode="thumbs",start=None,pn=1,mk=None, query=None, querySearch=None): - ''' - view it - @param mode: defines how to access the document behind url + def index_html(self,url,mode="texttool",viewMode="auto",viewType=None,tocMode="thumbs",start=1,pn=1): + """ + view page @param url: url which contains display information - @param viewMode: if images display images, if text display text, default is auto (text,images or auto) + @param mode: defines how to access the document behind url + @param viewMode: 'images': display images, 'text': display text, default is 'auto' + @param viewType: sub-type of viewMode, e.g. 'dict' for viewMode='text' @param tocMode: type of 'table of contents' for navigation (thumbs, text, figures, none) - @param querySearch: type of different search modes (fulltext, fulltextMorph, xpath, xquery, ftIndex, ftIndexMorph, fulltextMorphLemma) - ''' + """ - logging.debug("documentViewer (index) mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn)) + logging.debug("documentViewer(index_html) mode=%s url=%s viewMode=%s viewType=%s start=%s pn=%s"%(mode,url,viewMode,viewType,start,pn)) if not hasattr(self, 'template'): # this won't work @@ -186,41 +287,69 @@ class documentViewer(Folder): return "ERROR: template folder missing!" if not getattr(self, 'digilibBaseUrl', None): - self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary" + self.digilibBaseUrl = self.findDigilibUrl() or "http://digilib.mpiwg-berlin.mpg.de/digitallibrary" docinfo = self.getDocinfo(mode=mode,url=url) - if tocMode != "thumbs": # get table of contents docinfo = self.getToc(mode=tocMode, docinfo=docinfo) - - pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo,viewMode=viewMode,tocMode=tocMode) - - if viewMode=="auto": # automodus gewaehlt - if docinfo.get("textURL",''): #texturl gesetzt und textViewer konfiguriert - viewMode="text" + + # auto viewMode: text if there is a text else images + if viewMode=="auto": + if docinfo.get('textURL', None) or docinfo.get('textURLPath', None): + viewMode = "text" + viewType = "dict" else: - viewMode="images" + viewMode = "images" - pt = getattr(self.template, 'viewer_main') - return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode,mk=self.generateMarks(mk)) + elif viewMode == "text_dict": + # legacy fix + viewMode = "text" + viewType = "dict" + + # stringify viewType + if isinstance(viewType, list): + logging.debug("index_html: viewType is list:%s"%viewType) + viewType = ','.join([t for t in viewType if t]) + + pageinfo = self.getPageinfo(start=start, current=pn, docinfo=docinfo, viewMode=viewMode, viewType=viewType, tocMode=tocMode) + + # get template /template/viewer_$viewMode + pt = getattr(self.template, 'viewer_%s'%viewMode, None) + if pt is None: + logging.error("No template for viewMode=%s!"%viewMode) + # TODO: error page? + return "No template for viewMode=%s!"%viewMode + + # and execute with parameters + return pt(docinfo=docinfo, pageinfo=pageinfo) def generateMarks(self,mk): ret="" if mk is None: return "" - if type(mk) is not ListType: - mk=[mk] + if not isinstance(mk, list): + mk=[mk] for m in mk: ret+="mk=%s"%m return ret - - + + + def getBrowser(self): + """getBrowser the version of browser """ + bt = browserCheck(self) + logging.debug("BROWSER VERSION: %s"%(bt)) + return bt + def findDigilibUrl(self): """try to get the digilib URL from zogilib""" url = self.template.zogilib.getDLBaseUrl() return url + + def getDocumentViewerURL(self): + """returns the URL of this instance""" + return self.absolute_url() def getStyle(self, idx, selected, style=""): """returns a string with the given style and append 'sel' if path == selected.""" @@ -230,44 +359,63 @@ class documentViewer(Folder): else: return style - def getLink(self,param=None,val=None): - """link to documentviewer with parameter param set to val""" - params=self.REQUEST.form.copy() + def getParams(self, param=None, val=None, params=None, duplicates=None): + """returns dict with URL parameters. + + Takes URL parameters and additionally param=val or dict params. + Deletes key if value is None.""" + # copy existing request params + newParams=self.REQUEST.form.copy() + # change single param if param is not None: if val is None: - if params.has_key(param): - del params[param] + if newParams.has_key(param): + del newParams[param] else: - params[param] = str(val) - - if params.get("mode", None) == "filepath": #wenn beim erst Aufruf filepath gesetzt wurde aendere das nun zu imagepath - params["mode"] = "imagepath" - params["url"] = getParentDir(params["url"]) + newParams[param] = str(val) - # quote values and assemble into query string - ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()]) - url=self.REQUEST['URL1']+"?"+ps + # change more params + if params is not None: + for (k, v) in params.items(): + if v is None: + # val=None removes param + if newParams.has_key(k): + del newParams[k] + + else: + newParams[k] = v + + if duplicates: + # eliminate lists (coming from duplicate keys) + for (k,v) in newParams.items(): + if isinstance(v, list): + if duplicates == 'comma': + # make comma-separated list of non-empty entries + newParams[k] = ','.join([t for t in v if t]) + elif duplicates == 'first': + # take first non-empty entry + newParams[k] = [t for t in v if t][0] + + return newParams + + def getLink(self, param=None, val=None, params=None, baseUrl=None, paramSep='&', duplicates='comma'): + """returns URL to documentviewer with parameter param set to val or from dict params""" + urlParams = self.getParams(param=param, val=val, params=params, duplicates=duplicates) + # quote values and assemble into query string (not escaping '/') + ps = paramSep.join(["%s=%s"%(k,urllib.quote_plus(unicode(v),'/')) for (k, v) in urlParams.items()]) + if baseUrl is None: + baseUrl = self.getDocumentViewerURL() + + url = "%s?%s"%(baseUrl, ps) return url - def getLinkAmp(self,param=None,val=None): + def getLinkAmp(self, param=None, val=None, params=None, baseUrl=None, duplicates='comma'): """link to documentviewer with parameter param set to val""" - params=self.REQUEST.form.copy() - if param is not None: - if val is None: - if params.has_key(param): - del params[param] - else: - params[param] = str(val) - - # quote values and assemble into query string - logging.info("XYXXXXX: %s"%repr(params.items())) - ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()]) - url=self.REQUEST['URL1']+"?"+ps - return url + return self.getLink(param=param, val=val, params=params, baseUrl=baseUrl, paramSep='&', duplicates=duplicates) + def getInfo_xml(self,url,mode): """returns info about the document as XML""" - if not self.digilibBaseUrl: self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary" @@ -275,371 +423,288 @@ class documentViewer(Folder): pt = getattr(self.template, 'info_xml') return pt(docinfo=docinfo) - def isAccessible(self, docinfo): """returns if access to the resource is granted""" access = docinfo.get('accessType', None) - logger("documentViewer (accessOK)", logging.INFO, "access type %s"%access) - if access is not None and access == 'free': - logger("documentViewer (accessOK)", logging.INFO, "access is free") + logging.debug("documentViewer (accessOK) access type %s"%access) + if access == 'free': + logging.debug("documentViewer (accessOK) access is free") return True + elif access is None or access in self.authgroups: # only local access -- only logged in users user = getSecurityManager().getUser() + logging.debug("documentViewer (accessOK) user=%s ip=%s"%(user,self.REQUEST.getClientAddr())) if user is not None: #print "user: ", user return (user.getUserName() != "Anonymous User") else: return False - logger("documentViewer (accessOK)", logging.INFO, "unknown access type %s"%access) + logging.error("documentViewer (accessOK) unknown access type %s"%access) return False - - def getDirinfoFromDigilib(self,path,docinfo=None,cut=0): - """gibt param von dlInfo aus""" - num_retries = 3 - if docinfo is None: - docinfo = {} - - for x in range(cut): - - path=getParentDir(path) - - infoUrl=self.digilibBaseUrl+"/dirInfo-xml.jsp?mo=dir&fn="+path - - logger("documentViewer (getparamfromdigilib)", logging.INFO, "dirInfo from %s"%(infoUrl)) - - for cnt in range(num_retries): - try: - # dom = NonvalidatingReader.parseUri(imageUrl) - txt=urllib.urlopen(infoUrl).read() - dom = Parse(txt) - break - except: - logger("documentViewer (getdirinfofromdigilib)", logging.ERROR, "error reading %s (try %d)"%(infoUrl,cnt)) - else: - raise IOError("Unable to get dir-info from %s"%(infoUrl)) - - sizes=dom.xpath("//dir/size") - logger("documentViewer (getparamfromdigilib)", logging.INFO, "dirInfo:size"%sizes) - - if sizes: - docinfo['numPages'] = int(getTextFromNode(sizes[0])) - else: - docinfo['numPages'] = 0 - - # TODO: produce and keep list of image names and numbers - - return docinfo - + + + def getDocinfo(self, mode, url): + """returns docinfo depending on mode""" + logging.debug("getDocinfo: mode=%s, url=%s"%(mode,url)) + # look for cached docinfo in session + if self.REQUEST.SESSION.has_key('docinfo'): + docinfo = self.REQUEST.SESSION['docinfo'] + # check if its still current + if docinfo is not None and docinfo.get('mode', None) == mode and docinfo.get('url', None) == url: + logging.debug("getDocinfo: docinfo in session. keys=%s"%docinfo.keys()) + return docinfo - def getIndexMeta(self, url): - """returns dom of index.meta document at url""" - num_retries = 3 - dom = None - metaUrl = None - if url.startswith("http://"): - # real URL - metaUrl = url - else: - # online path - server=self.digilibBaseUrl+"/servlet/Texter?fn=" - metaUrl=server+url.replace("/mpiwg/online","") - if not metaUrl.endswith("index.meta"): - metaUrl += "/index.meta" - logging.debug("METAURL: %s"%metaUrl) - for cnt in range(num_retries): - try: - # patch dirk encoding fehler treten dann nicht mehr auf - # dom = NonvalidatingReader.parseUri(metaUrl) - txt=urllib.urlopen(metaUrl).read() - dom = Parse(txt) - break - except: - logger("ERROR documentViewer (getIndexMeta)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2]) - - if dom is None: - raise IOError("Unable to read index meta from %s"%(url)) - - return dom - - def getPresentationInfoXML(self, url): - """returns dom of info.xml document at url""" - num_retries = 3 - dom = None - metaUrl = None - if url.startswith("http://"): - # real URL - metaUrl = url + # new docinfo + docinfo = {'mode': mode, 'url': url} + # add self url + docinfo['viewerUrl'] = self.getDocumentViewerURL() + docinfo['digilibBaseUrl'] = self.digilibBaseUrl + # get index.meta DOM + docUrl = None + metaDom = None + if mode=="texttool": + # url points to document dir or index.meta + metaDom = self.metadataService.getDomFromPathOrUrl(url) + docUrl = url.replace('/index.meta', '') + if metaDom is None: + raise IOError("Unable to find index.meta for mode=texttool!") + + elif mode=="imagepath": + # url points to folder with images, index.meta optional + # asssume index.meta in parent dir + docUrl = getParentPath(url) + metaDom = self.metadataService.getDomFromPathOrUrl(docUrl) + + elif mode=="filepath": + # url points to image file, index.meta optional + # asssume index.meta is two path segments up + docUrl = getParentPath(url, 2) + metaDom = self.metadataService.getDomFromPathOrUrl(docUrl) + else: - # online path - server=self.digilibBaseUrl+"/servlet/Texter?fn=" - metaUrl=server+url.replace("/mpiwg/online","") + logging.error("documentViewer (getdocinfo) unknown mode: %s!"%mode) + raise ValueError("Unknown mode %s! Has to be one of 'texttool','imagepath','filepath'."%(mode)) - for cnt in range(num_retries): - try: - # patch dirk encoding fehler treten dann nicht mehr auf - # dom = NonvalidatingReader.parseUri(metaUrl) - txt=urllib.urlopen(metaUrl).read() - dom = Parse(txt) - break - except: - logger("ERROR documentViewer (getPresentationInfoXML)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2]) + docinfo['documentUrl'] = docUrl + # process index.meta contents + if metaDom is not None and metaDom.tag == 'resource': + # document directory name and path + resource = self.metadataService.getResourceData(dom=metaDom) + if resource: + docinfo = self.getDocinfoFromResource(docinfo, resource) + + # texttool info + texttool = self.metadataService.getTexttoolData(dom=metaDom) + if texttool: + docinfo = self.getDocinfoFromTexttool(docinfo, texttool) + + # bib info + bib = self.metadataService.getBibData(dom=metaDom) + if bib: + docinfo = self.getDocinfoFromBib(docinfo, bib) + else: + # no bib - try info.xml + docinfo = self.getDocinfoFromPresentationInfoXml(docinfo) - if dom is None: - raise IOError("Unable to read infoXMLfrom %s"%(url)) - - return dom - - - def getAuthinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0): - """gets authorization info from the index.meta file at path or given by dom""" - logger("documentViewer (getauthinfofromindexmeta)", logging.INFO,"path: %s"%(path)) - - access = None - - if docinfo is None: - docinfo = {} - - if dom is None: - for x in range(cut): - path=getParentDir(path) - dom = self.getIndexMeta(path) - - acctype = dom.xpath("//access-conditions/access/@type") - if acctype and (len(acctype)>0): - access=acctype[0].value - if access in ['group', 'institution']: - access = getTextFromNode(dom.xpath("//access-conditions/access/name")[0]).lower() - - docinfo['accessType'] = access - return docinfo - - - def getBibinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0): - """gets bibliographical info from the index.meta file at path or given by dom""" - logging.debug("documentViewer (getbibinfofromindexmeta) path: %s"%(path)) - - if docinfo is None: - docinfo = {} - - if dom is None: - for x in range(cut): - path=getParentDir(path) - dom = self.getIndexMeta(path) - - logging.debug("documentViewer (getbibinfofromindexmeta cutted) path: %s"%(path)) - # put in all raw bib fields as dict "bib" - bib = dom.xpath("//bib/*") - if bib and len(bib)>0: - bibinfo = {} - for e in bib: - bibinfo[e.localName] = getTextFromNode(e) - docinfo['bib'] = bibinfo - - # extract some fields (author, title, year) according to their mapping - metaData=self.metadata.main.meta.bib - bibtype=dom.xpath("//bib/@type") - if bibtype and (len(bibtype)>0): - bibtype=bibtype[0].value - else: - bibtype="generic" + # auth info + access = self.metadataService.getAccessData(dom=metaDom) + if access: + docinfo = self.getDocinfoFromAccess(docinfo, access) + + # attribution info + attribution = self.metadataService.getAttributionData(dom=metaDom) + if attribution: + logging.debug("getDocinfo: attribution=%s"%repr(attribution)) + docinfo['attribution'] = attribution + #docinfo = self.getDocinfoFromAccess(docinfo, access) + + # copyright info + copyright = self.metadataService.getCopyrightData(dom=metaDom) + if copyright: + logging.debug("getDocinfo: copyright=%s"%repr(copyright)) + docinfo['copyright'] = copyright + #docinfo = self.getDocinfoFromAccess(docinfo, access) + + # image path + if mode != 'texttool': + # override image path from texttool with url + docinfo['imagePath'] = url.replace('/mpiwg/online/', '', 1) + - bibtype=bibtype.replace("-"," ") # wrong typesiin index meta "-" instead of " " (not wrong! ROC) - docinfo['bib_type'] = bibtype - bibmap=metaData.generateMappingForType(bibtype) - # if there is no mapping bibmap is empty (mapping sometimes has empty fields) - if len(bibmap) > 0 and len(bibmap['author'][0]) > 0: - try: - docinfo['author']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['author'][0])[0]) - except: pass - try: - docinfo['title']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['title'][0])[0]) - except: pass - try: - docinfo['year']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['year'][0])[0]) - except: pass - logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype) - try: - docinfo['lang']=getTextFromNode(dom.xpath("//bib/lang")[0]) - except: - docinfo['lang']='' + # number of images from digilib + if docinfo.get('imagePath', None): + docinfo['imageURL'] = self.digilibBaseUrl + "/servlet/Scaler?fn=" + docinfo['imagePath'] + docinfo = self.getDocinfoFromDigilib(docinfo, docinfo['imagePath']) + + logging.debug("documentViewer (getdocinfo) docinfo: keys=%s"%docinfo.keys()) + #logging.debug("documentViewer (getdocinfo) docinfo: %s"%docinfo) + # store in session + self.REQUEST.SESSION['docinfo'] = docinfo return docinfo - - - def getDocinfoFromTextTool(self, url, dom=None, docinfo=None): - """parse texttool tag in index meta""" - logger("documentViewer (getdocinfofromtexttool)", logging.INFO, "url: %s" % (url)) - if docinfo is None: - docinfo = {} - if docinfo.get('lang', None) is None: - docinfo['lang'] = '' # default keine Sprache gesetzt - if dom is None: - dom = self.getIndexMeta(url) - - archivePath = None - archiveName = None - - archiveNames = dom.xpath("//resource/name") - if archiveNames and (len(archiveNames) > 0): - archiveName = getTextFromNode(archiveNames[0]) - else: - logger("documentViewer (getdocinfofromtexttool)", logging.WARNING, "resource/name missing in: %s" % (url)) - - archivePaths = dom.xpath("//resource/archive-path") - if archivePaths and (len(archivePaths) > 0): - archivePath = getTextFromNode(archivePaths[0]) - # clean up archive path - if archivePath[0] != '/': - archivePath = '/' + archivePath - if archiveName and (not archivePath.endswith(archiveName)): - archivePath += "/" + archiveName - else: - # try to get archive-path from url - logger("documentViewer (getdocinfofromtexttool)", logging.WARNING, "resource/archive-path missing in: %s" % (url)) - if (not url.startswith('http')): - archivePath = url.replace('index.meta', '') + + def getDocinfoFromResource(self, docinfo, resource): + """reads contents of resource element into docinfo""" + docName = resource.get('name', None) + docinfo['documentName'] = docName + docPath = resource.get('archive-path', None) + if docPath: + # clean up document path + if docPath[0] != '/': + docPath = '/' + docPath - if archivePath is None: - # we balk without archive-path - raise IOError("Missing archive-path (for text-tool) in %s" % (url)) - - imageDirs = dom.xpath("//texttool/image") - if imageDirs and (len(imageDirs) > 0): - imageDir = getTextFromNode(imageDirs[0]) + if docName and (not docPath.endswith(docName)): + docPath += "/" + docName else: - # we balk with no image tag / not necessary anymore because textmode is now standard - #raise IOError("No text-tool info in %s"%(url)) - imageDir = "" - #xquery="//pb" - docinfo['imagePath'] = "" # keine Bilder - docinfo['imageURL'] = "" - - if imageDir and archivePath: + # use docUrl as docPath + docUrl = docinfo['documentURL'] + if not docUrl.startswith('http:'): + docPath = docUrl + if docPath: + # fix URLs starting with /mpiwg/online + docPath = docPath.replace('/mpiwg/online', '', 1) + + docinfo['documentPath'] = docPath + return docinfo + + def getDocinfoFromTexttool(self, docinfo, texttool): + """reads contents of texttool element into docinfo""" + # image dir + imageDir = texttool.get('image', None) + docPath = docinfo.get('documentPath', None) + if imageDir and docPath: #print "image: ", imageDir, " archivepath: ", archivePath - imageDir = os.path.join(archivePath, imageDir) - imageDir = imageDir.replace("/mpiwg/online", '') - docinfo = self.getDirinfoFromDigilib(imageDir, docinfo=docinfo) + imageDir = os.path.join(docPath, imageDir) + imageDir = imageDir.replace('/mpiwg/online', '', 1) docinfo['imagePath'] = imageDir - - docinfo['imageURL'] = self.digilibBaseUrl + "/servlet/Scaler?fn=" + imageDir - - viewerUrls = dom.xpath("//texttool/digiliburlprefix") - if viewerUrls and (len(viewerUrls) > 0): - viewerUrl = getTextFromNode(viewerUrls[0]) - docinfo['viewerURL'] = viewerUrl - - textUrls = dom.xpath("//texttool/text") - if textUrls and (len(textUrls) > 0): - textUrl = getTextFromNode(textUrls[0]) + + # old style text URL + textUrl = texttool.get('text', None) + if textUrl and docPath: if urlparse.urlparse(textUrl)[0] == "": #keine url - textUrl = os.path.join(archivePath, textUrl) - # fix URLs starting with /mpiwg/online - if textUrl.startswith("/mpiwg/online"): - textUrl = textUrl.replace("/mpiwg/online", '', 1) + textUrl = os.path.join(docPath, textUrl) docinfo['textURL'] = textUrl - textUrls = dom.xpath("//texttool/text-url-path") - if textUrls and (len(textUrls) > 0): - textUrl = getTextFromNode(textUrls[0]) + # new style text-url-path + textUrl = texttool.get('text-url-path', None) + if textUrl: docinfo['textURLPath'] = textUrl - if not docinfo['imagePath']: - # text-only, no page images - docinfo = self.getNumPages(docinfo) #im moment einfach auf eins setzen, navigation ueber die thumbs geht natuerlich nicht - - presentationUrls = dom.xpath("//texttool/presentation") - docinfo = self.getBibinfoFromIndexMeta(url, docinfo=docinfo, dom=dom) # get info von bib tag - - if presentationUrls and (len(presentationUrls) > 0): # ueberschreibe diese durch presentation informationen - # presentation url ergiebt sich ersetzen von index.meta in der url der fuer die Metadaten - # durch den relativen Pfad auf die presentation infos - presentationPath = getTextFromNode(presentationUrls[0]) - if url.endswith("index.meta"): - presentationUrl = url.replace('index.meta', presentationPath) + + # page flow + docinfo['pageFlow'] = texttool.get('page-flow', 'ltr') + + # odd pages are left + docinfo['oddPage'] = texttool.get('odd-scan-position', 'left') + + # number of title page (0: not defined) + docinfo['titlePage'] = texttool.get('title-scan-no', 0) + + # old presentation stuff + presentation = texttool.get('presentation', None) + if presentation and docPath: + if presentation.startswith('http:'): + docinfo['presentationUrl'] = presentation else: - presentationUrl = url + "/" + presentationPath - - docinfo = self.getBibinfoFromTextToolPresentation(presentationUrl, docinfo=docinfo, dom=dom) - - docinfo = self.getAuthinfoFromIndexMeta(url, docinfo=docinfo, dom=dom) # get access info + docinfo['presentationUrl'] = os.path.join(docPath, presentation) + return docinfo - - - def getBibinfoFromTextToolPresentation(self,url,docinfo=None,dom=None): - """gets the bibliographical information from the preseantion entry in texttools - """ - dom=self.getPresentationInfoXML(url) - try: - docinfo['author']=getTextFromNode(dom.xpath("//author")[0]) - except: - pass - try: - docinfo['title']=getTextFromNode(dom.xpath("//title")[0]) - except: - pass + + def getDocinfoFromBib(self, docinfo, bib): + """reads contents of bib element into docinfo""" + logging.debug("getDocinfoFromBib bib=%s"%repr(bib)) + # put all raw bib fields in dict "bib" + docinfo['bib'] = bib + bibtype = bib.get('@type', None) + docinfo['bibType'] = bibtype + # also store DC metadata for convenience + dc = self.metadataService.getDCMappedData(bib) + docinfo['creator'] = dc.get('creator',None) + docinfo['title'] = dc.get('title',None) + docinfo['date'] = dc.get('date',None) + return docinfo + + def getDocinfoFromAccess(self, docinfo, acc): + """reads contents of access element into docinfo""" + #TODO: also read resource type + logging.debug("getDocinfoFromAccess acc=%s"%repr(acc)) try: - docinfo['year']=getTextFromNode(dom.xpath("//date")[0]) + acctype = acc['@attr']['type'] + if acctype: + access=acctype + if access in ['group', 'institution']: + access = acc['name'].lower() + + docinfo['accessType'] = access + except: pass + return docinfo - - def getDocinfoFromImagePath(self,path,docinfo=None,cut=0): - """path ist the path to the images it assumes that the index.meta file is one level higher.""" - logger("documentViewer (getdocinfofromimagepath)", logging.INFO,"path: %s"%(path)) - if docinfo is None: - docinfo = {} - path=path.replace("/mpiwg/online","") - docinfo['imagePath'] = path - docinfo=self.getDirinfoFromDigilib(path,docinfo=docinfo,cut=cut) - - pathorig=path - for x in range(cut): - path=getParentDir(path) - logging.error("PATH:"+path) - imageUrl=self.digilibBaseUrl+"/servlet/Scaler?fn="+path - docinfo['imageURL'] = imageUrl - - #path ist the path to the images it assumes that the index.meta file is one level higher. - docinfo = self.getBibinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1) - docinfo = self.getAuthinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1) - return docinfo - - - def getDocinfo(self, mode, url): - """returns docinfo depending on mode""" - logger("documentViewer (getdocinfo)", logging.INFO,"mode: %s, url: %s"%(mode,url)) - # look for cached docinfo in session - if self.REQUEST.SESSION.has_key('docinfo'): - docinfo = self.REQUEST.SESSION['docinfo'] - # check if its still current - if docinfo is not None and docinfo.get('mode') == mode and docinfo.get('url') == url: - logger("documentViewer (getdocinfo)", logging.INFO,"docinfo in session: %s"%docinfo) - return docinfo - # new docinfo - docinfo = {'mode': mode, 'url': url} - if mode=="texttool": #index.meta with texttool information - docinfo = self.getDocinfoFromTextTool(url, docinfo=docinfo) - elif mode=="imagepath": - docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo) - elif mode=="filepath": - docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo,cut=1) + + def getDocinfoFromDigilib(self, docinfo, path): + infoUrl=self.digilibBaseUrl+"/dirInfo-xml.jsp?mo=dir&fn="+path + # fetch data + txt = getHttpData(infoUrl) + if not txt: + logging.error("Unable to get dir-info from %s"%(infoUrl)) + return docinfo + + dom = ET.fromstring(txt) + size = getText(dom.find("size")) + logging.debug("getDocinfoFromDigilib: size=%s"%size) + if size: + docinfo['numPages'] = int(size) else: - logger("documentViewer (getdocinfo)", logging.ERROR,"unknown mode!") - raise ValueError("Unknown mode %s! Has to be one of 'texttool','imagepath','filepath'."%(mode)) - - logger("documentViewer (getdocinfo)", logging.INFO,"docinfo: %s"%docinfo) - self.REQUEST.SESSION['docinfo'] = docinfo + docinfo['numPages'] = 0 + + # TODO: produce and keep list of image names and numbers return docinfo + + + def getDocinfoFromPresentationInfoXml(self,docinfo): + """gets DC-like bibliographical information from the presentation entry in texttools""" + url = docinfo.get('presentationUrl', None) + if not url: + logging.error("getDocinfoFromPresentation: no URL!") + return docinfo + dom = None + metaUrl = None + if url.startswith("http://"): + # real URL + metaUrl = url + else: + # online path + + server=self.digilibBaseUrl+"/servlet/Texter?fn=" + metaUrl=server+url - def getPageinfo(self, current, start=None, rows=None, cols=None, docinfo=None, viewMode=None, tocMode=None): + txt=getHttpData(metaUrl) + if txt is None: + logging.error("Unable to read info.xml from %s"%(url)) + return docinfo + + dom = ET.fromstring(txt) + docinfo['creator']=getText(dom.find(".//author")) + docinfo['title']=getText(dom.find(".//title")) + docinfo['date']=getText(dom.find(".//date")) + return docinfo + + + def getPageinfo(self, current=None, start=None, rows=None, cols=None, docinfo=None, viewMode=None, viewType=None, tocMode=None): """returns pageinfo with the given parameters""" + logging.debug("getPageInfo(current=%s, start=%s, rows=%s, cols=%s, viewMode=%s, viewType=%s, tocMode=%s)"%(current,start,rows,cols,viewMode,viewType,tocMode)) pageinfo = {} + pageinfo['viewMode'] = viewMode + pageinfo['viewType'] = viewType + pageinfo['tocMode'] = tocMode + current = getInt(current) pageinfo['current'] = current rows = int(rows or self.thumbrows) @@ -648,307 +713,90 @@ class documentViewer(Folder): pageinfo['cols'] = cols grpsize = cols * rows pageinfo['groupsize'] = grpsize + # is start is empty use one around current start = getInt(start, default=(math.ceil(float(current)/float(grpsize))*grpsize-(grpsize-1))) # int(current / grpsize) * grpsize +1)) pageinfo['start'] = start - pageinfo['end'] = start + grpsize - if (docinfo is not None) and ('numPages' in docinfo): - np = int(docinfo['numPages']) - pageinfo['end'] = min(pageinfo['end'], np) - pageinfo['numgroups'] = int(np / grpsize) - if np % grpsize > 0: - pageinfo['numgroups'] += 1 - - - pageinfo['viewMode'] = viewMode - pageinfo['tocMode'] = tocMode - pageinfo['query'] = self.REQUEST.get('query',' ') - pageinfo['queryType'] = self.REQUEST.get('queryType',' ') + pn = self.REQUEST.get('pn','1') + pageinfo['pn'] = pn + np = int(docinfo.get('numPages', 0)) + if np == 0: + # numPages unknown - maybe we can get it from text page + if docinfo.get('textURLPath', None): + # cache text page as well + pageinfo['textPage'] = self.getTextPage(mode=viewType, pn=pn, docinfo=docinfo, pageinfo=pageinfo) + np = int(docinfo.get('numPages', 0)) + + pageinfo['numgroups'] = int(np / grpsize) + if np % grpsize > 0: + pageinfo['numgroups'] += 1 + + pageFlowLtr = docinfo.get('pageFlow', 'ltr') != 'rtl' + oddScanLeft = docinfo.get('oddPage', 'left') != 'right' + # add zeroth page for two columns + pageZero = (cols == 2 and (pageFlowLtr != oddScanLeft)) + pageinfo['pageZero'] = pageZero + pageinfo['pageList'] = self.getPageList(start=start, rows=rows, cols=cols, pageFlowLtr=pageFlowLtr, pageZero=pageZero, minIdx=1, maxIdx=np) + + pageinfo['characterNormalization'] = self.REQUEST.get('characterNormalization','reg') + pageinfo['query'] = self.REQUEST.get('query','') + pageinfo['queryType'] = self.REQUEST.get('queryType','') pageinfo['querySearch'] =self.REQUEST.get('querySearch', 'fulltext') + pageinfo['highlightQuery'] = self.REQUEST.get('highlightQuery','') + pageinfo['tocPageSize'] = getInt(self.REQUEST.get('tocPageSize', 30)) + pageinfo['queryPageSize'] = getInt(self.REQUEST.get('queryPageSize', 10)) + pageinfo['tocPN'] = getInt(self.REQUEST.get('tocPN', '1')) + pageinfo['searchPN'] = getInt(self.REQUEST.get('searchPN','1')) - pageinfo['textPN'] = self.REQUEST.get('textPN','1') - - pageinfo['tocPageSize'] = self.REQUEST.get('tocPageSize', '30') - pageinfo['queryPageSize'] =self.REQUEST.get('queryPageSize', '20') - pageinfo['tocPN'] = self.REQUEST.get('tocPN', '1') - toc = int (pageinfo['tocPN']) - pageinfo['textPages'] =int (toc) - + # limit tocPN if 'tocSize_%s'%tocMode in docinfo: - tocSize = int(docinfo['tocSize_%s'%tocMode]) - tocPageSize = int(pageinfo['tocPageSize']) - # cached toc - + tocSize = docinfo['tocSize_%s'%tocMode] + tocPageSize = pageinfo['tocPageSize'] + # cached toc if tocSize%tocPageSize>0: tocPages=tocSize/tocPageSize+1 else: tocPages=tocSize/tocPageSize - pageinfo['tocPN'] = min (tocPages,toc) - - pageinfo['searchPN'] =self.REQUEST.get('searchPN','1') - pageinfo['sn'] =self.REQUEST.get('sn','1') - - return pageinfo - def getSearch(self, pn=1, pageinfo=None, docinfo=None, query=None, queryType=None): - """get search list""" - docpath = docinfo['textURLPath'] - url = docinfo['url'] - logging.debug("documentViewer (gettoc) docpath: %s"%(docpath)) - logging.debug("documentViewer (gettoc) url: %s"%(url)) - pagesize = pageinfo['queryPageSize'] - pn = pageinfo['searchPN'] - sn = pageinfo['sn'] - query =pageinfo['query'] - queryType =pageinfo['queryType'] - viewMode= pageinfo['viewMode'] - tocMode = pageinfo['tocMode'] - tocPN = pageinfo['tocPN'] - selfurl = self.absolute_url() - logging.debug("documentViewer (gettoc) /mpdl/interface/doc-query.xql?document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s"%(docpath, 'text', queryType, query, pagesize, pn, sn, viewMode)) - page=self.template.fulltextclient.eval("/mpdl/interface/doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s"%(docpath, 'text', queryType, query, pagesize, pn, sn, viewMode) ,outputUnicode=False) - pagexml = page.replace('?document=%s'%str(docpath),'?url=%s'%url) - pagedom = Parse(pagexml) - if (queryType=="fulltext")or(queryType=="xpath")or(queryType=="xquery")or(queryType=="fulltextMorphLemma"): - pagedivs = pagedom.xpath("//div[@class='queryResultPage']") - if len(pagedivs)>0: - pagenode=pagedivs[0] - links=pagenode.xpath("//a") - for l in links: - hrefNode = l.getAttributeNodeNS(None, u"href") - if hrefNode: - href = hrefNode.nodeValue - if href.startswith('page-fragment.xql'): - selfurl = self.absolute_url() - #l.setAttributeNS(None, "span class = 'hit highlight'", "background-color: #77DD77;") - pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s'%(viewMode,queryType,query,pagesize,pn,tocMode,pn,tocPN)) - hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl) - l.setAttributeNS(None, "onClick", "id='highlighting'") - return serializeNode(pagenode) - - if (queryType=="fulltextMorph"): - pagedivs = pagedom.xpath("//div[@class='queryResult']") - - if len(pagedivs)>0: - pagenode=pagedivs[0] - links=pagenode.xpath("//a") - for l in links: - hrefNode = l.getAttributeNodeNS(None, u"href") - if hrefNode: - href = hrefNode.nodeValue - if href.startswith('page-fragment.xql'): - selfurl = self.absolute_url() - pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s'%(viewMode,queryType,query,pagesize,pn,tocMode,pn,tocPN)) - hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl) - if href.startswith('../lt/lemma.xql'): - selfurl = self.absolute_url() - hrefNode.nodeValue = href.replace('lt/lemma.xql','%s/template/head_main_lemma'%selfurl) - l.setAttributeNS(None, 'target', '_blank') - l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;") - l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();') - return serializeNode(pagenode) - - if (queryType=="ftIndex")or(queryType=="ftIndexMorph"): - pagedivs= pagedom.xpath("//div[@class='queryResultPage']") - if len(pagedivs)>0: - pagenode=pagedivs[0] - links=pagenode.xpath("//a") - for l in links: - hrefNode = l.getAttributeNodeNS(None, u"href") - if hrefNode: - href = hrefNode.nodeValue - hrefNode.nodeValue=href.replace('mode=text','mode=texttool&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s'%(viewMode,tocMode,tocPN,pn)) - - if href.startswith('../lt/lex.xql'): - # selfurl = self.absolute_url() - hrefNode.nodeValue = href.replace('../lt/lex.xql','%s../template/head_main_voc'%selfurl) - l.setAttributeNS(None, 'target', '_blank') - l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;") - l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();') - if href.startswith('../lt/lemma.xql'): - #selfurl = self.absolute_url() - hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s../template/head_main_lemma'%selfurl) - l.setAttributeNS(None, 'target', '_blank') - l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;") - l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();') - return serializeNode(pagenode) - return "no text here" - - def getNumPages(self,docinfo=None): - """get list of pages from fulltext and put in docinfo""" - xquery = '//pb' - text = self.template.fulltextclient.eval("/mpdl/interface/xquery.xql", "document=%s&xquery=%s"%(docinfo['textURLPath'],xquery)) - # TODO: better processing of the page list. do we need the info somewhere else also? - docinfo['numPages'] = text.count(" 0: - - pagenode = pagedivs[0] - links = pagenode.xpath("//a") - for l in links: - hrefNode = l.getAttributeNodeNS(None, u"href") - if hrefNode: - href= hrefNode.nodeValue - if href.startswith('#note-0006-01'): - selfurl = self.absolute_url() - hrefNode.nodeValue = href.replace("href='#note-'",'xaxa/%s/'%selfurl) - return serializeNode(pagenode) - if mode == "xml": - # first div contains text - pagedivs = pagedom.xpath("/div") - if len(pagedivs) > 0: - pagenode = pagedivs[0] - return serializeNode(pagenode) - # text-with-links mode - if mode == "text_dict": - # first div contains text - pagedivs = pagedom.xpath("/div") - if len(pagedivs) > 0: - pagenode = pagedivs[0] - # check all a-tags - links = pagenode.xpath("//a") - for l in links: - hrefNode = l.getAttributeNodeNS(None, u"href") - if hrefNode: - # is link with href - href = hrefNode.nodeValue - if href.startswith('lt/lex.xql'): - # is pollux link - selfurl = self.absolute_url() - # change href - hrefNode.nodeValue = href.replace('lt/lex.xql','%s/template/head_main_voc'%selfurl) - # add target - l.setAttributeNS(None, 'target', '_blank') - l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=700, scrollbars=1'); return false;") - l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();') - - if href.startswith('lt/lemma.xql'): - selfurl = self.absolute_url() - hrefNode.nodeValue = href.replace('lt/lemma.xql','%s/template/head_main_lemma'%selfurl) - l.setAttributeNS(None, 'target', '_blank') - l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=700, scrollbars=1'); return false;") - l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();') - return serializeNode(pagenode) - - return "no text here" - - def getTranslate(self, query=None, language=None): - """translate into another languages""" - pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lex.xql","document=&language="+str(language)+"&query="+url_quote(str(query))) - return pagexml - - def getLemma(self, lemma=None, language=None): - """simular words lemma """ - pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lemma.xql","document=&language="+str(language)+"&lemma="+url_quote(str(lemma))) - #pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lemma.xql","lemma=%s&language=%s"%(lemma,language),outputUnicode=False) - return pagexml - - def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1): - """number of""" - docpath = docinfo['textURLPath'] - pagesize = pageinfo['queryPageSize'] - pn = pageinfo['searchPN'] - query =pageinfo['query'] - queryType =pageinfo['queryType'] - - tocSearch = 0 - tocDiv = None - pagexml=self.template.fulltextclient.eval("/mpdl/interface/doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath, 'text', queryType, query, pagesize, pn) ,outputUnicode=False) - - pagedom = Parse(pagexml) - numdivs = pagedom.xpath("//div[@class='queryResultHits']") - tocSearch = int(getTextFromNode(numdivs[0])) - tc=int((tocSearch/20)+1) - logging.debug("documentViewer (gettoc) tc: %s"%(tc)) - return tc - - def getToc(self, mode="text", docinfo=None): - """loads table of contents and stores in docinfo""" - logging.debug("documentViewer (gettoc) mode: %s"%(mode)) - if 'tocSize_%s'%mode in docinfo: - # cached toc - return docinfo - docpath = docinfo['textURLPath'] - # we need to set a result set size - pagesize = 1000 - pn = 1 - if mode == "text": - queryType = "toc" - else: - queryType = mode - # number of entries in toc - tocSize = 0 - tocDiv = None - pagexml=self.template.fulltextclient.eval("/mpdl/interface/doc-query.xql", "document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType,pagesize,pn), outputUnicode=False) - # post-processing downloaded xml - pagedom = Parse(pagexml) - # get number of entries - numdivs = pagedom.xpath("//div[@class='queryResultHits']") - if len(numdivs) > 0: - tocSize = int(getTextFromNode(numdivs[0])) - # div contains text - #pagedivs = pagedom.xpath("//div[@class='queryResultPage']") - #if len(pagedivs) > 0: - # tocDiv = pagedivs[0] + return pageinfo - docinfo['tocSize_%s'%mode] = tocSize - #docinfo['tocDiv_%s'%mode] = tocDiv - return docinfo - - def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None): - """returns single page from the table of contents""" - # TODO: this should use the cached TOC - if mode == "text": - queryType = "toc" - else: - queryType = mode - docpath = docinfo['textURLPath'] - path = docinfo['textURLPath'] - #logging.debug("documentViewer (gettoc) pathNomer: %s"%(pathNomer)) - pagesize = pageinfo['tocPageSize'] - pn = pageinfo['tocPN'] - url = docinfo['url'] - selfurl = self.absolute_url() - viewMode= pageinfo['viewMode'] - tocMode = pageinfo['tocMode'] - tocPN = pageinfo['tocPN'] - - pagexml=self.template.fulltextclient.eval("/mpdl/interface/doc-query.xql", "document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn), outputUnicode=False) - page = pagexml.replace('page-fragment.xql?document=%s'%str(path),'%s?url=%s&viewMode=%s&tocMode=%s&tocPN=%s'%(selfurl,url, viewMode, tocMode, tocPN)) - text = page.replace('mode=image','mode=texttool') - return text - # post-processing downloaded xml - #pagedom = Parse(text) - # div contains text - #pagedivs = pagedom.xpath("//div[@class='queryResultPage']") - #if len(pagedivs) > 0: - # pagenode = pagedivs[0] - # return serializeNode(pagenode) - #else: - # return "No TOC!" + def getPageList(self, start=None, rows=None, cols=None, pageFlowLtr=True, pageZero=False, minIdx=1, maxIdx=0): + """returns array of page informations for one screenfull of thumbnails""" + if maxIdx == 0: + maxIdx = start + rows * cols + + pages = [] + if pageZero and start == 1: + # correct beginning + idx = 0 + else: + idx = start + + for r in range(rows): + row = [] + for c in range(cols): + if idx < minIdx or idx > maxIdx: + page = {'idx':None} + else: + page = {'idx':idx} + + idx += 1 + if pageFlowLtr: + row.append(page) + else: + row.insert(0, page) + + pages.append(row) + + logging.debug("getPageList returns=%s"%(pages)) + return pages + + + security.declareProtected('View management screens','changeDocumentViewerForm') + changeDocumentViewerForm = PageTemplateFile('zpt/changeDocumentViewer', globals()) def changeDocumentViewer(self,title="",digilibBaseUrl=None,thumbrows=2,thumbcols=5,authgroups='mpiwg',RESPONSE=None): """init document viewer""" @@ -957,10 +805,14 @@ class documentViewer(Folder): self.thumbrows = thumbrows self.thumbcols = thumbcols self.authgroups = [s.strip().lower() for s in authgroups.split(',')] + try: + # assume MetaDataFolder instance is called metadata + self.metadataService = getattr(self, 'metadata') + except Exception, e: + logging.error("Unable to find MetaDataFolder 'metadata': "+str(e)) + if RESPONSE is not None: RESPONSE.redirect('manage_main') - - def manage_AddDocumentViewerForm(self): """add the viewer form""" @@ -975,10 +827,7 @@ def manage_AddDocumentViewer(self,id,ima if RESPONSE is not None: RESPONSE.redirect('manage_main') - -## ## DocumentViewerTemplate class -## class DocumentViewerTemplate(ZopePageTemplate): """Template for document viewer""" meta_type="DocumentViewer Template"