--- documentViewer/documentViewer.py 2011/08/31 16:22:12 1.175.2.34 +++ documentViewer/documentViewer.py 2011/07/29 10:33:06 1.176 @@ -1,33 +1,56 @@ + from OFS.Folder import Folder from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate -from Products.PageTemplates.PageTemplateFile import PageTemplateFile -from App.ImageFile import ImageFile +from Products.PageTemplates.PageTemplateFile import PageTemplateFile from AccessControl import ClassSecurityInfo from AccessControl import getSecurityManager from Globals import package_home +from Products.zogiLib.zogiLib import browserCheck -import xml.etree.ElementTree as ET - -import os +from Ft.Xml import EMPTY_NAMESPACE, Parse +import Ft.Xml.Domlette +import os.path import sys import urllib +import urllib2 import logging import math import urlparse +import cStringIO import re import string -from SrvTxtUtils import getInt, getText, getHttpData, refreshingImageFileIndexHtml +def logger(txt,method,txt2): + """logging""" + logging.info(txt+ txt2) + +def getInt(number, default=0): + """returns always an int (0 in case of problems)""" + try: + return int(number) + except: + return int(default) + +def getTextFromNode(nodename): + """get the cdata content of a node""" + if nodename is None: + return "" + nodelist=nodename.childNodes + rc = "" + for node in nodelist: + if node.nodeType == node.TEXT_NODE: + rc = rc + node.data + return rc + def serializeNode(node, encoding="utf-8"): """returns a string containing node as XML""" - s = ET.tostring(node) - - # 4Suite: - # stream = cStringIO.StringIO() - # Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding) - # s = stream.getvalue() - # stream.close() + stream = cStringIO.StringIO() + #logging.debug("BUF: %s"%(stream)) + Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding) + s = stream.getvalue() + #logging.debug("BUF: %s"%(s)) + stream.close() return s def browserCheck(self): @@ -91,12 +114,54 @@ def browserCheck(self): return bt -def getParentPath(path, cnt=1): - """returns pathname shortened by cnt""" - # make sure path doesn't end with / - path = path.rstrip('/') - # split by /, shorten, and reassemble - return '/'.join(path.split('/')[0:-cnt]) + +def getParentDir(path): + """returns pathname shortened by one""" + return '/'.join(path.split('/')[0:-1]) + + +def getHttpData(url, data=None, num_tries=3, timeout=10): + """returns result from url+data HTTP request""" + # we do GET (by appending data to url) + if isinstance(data, str) or isinstance(data, unicode): + # if data is string then append + url = "%s?%s"%(url,data) + elif isinstance(data, dict) or isinstance(data, list) or isinstance(data, tuple): + # urlencode + url = "%s?%s"%(url,urllib.urlencode(data)) + + response = None + errmsg = None + for cnt in range(num_tries): + try: + logging.debug("getHttpData(#%s %ss) url=%s"%(cnt+1,timeout,url)) + if sys.version_info < (2, 6): + # set timeout on socket -- ugly :-( + import socket + socket.setdefaulttimeout(float(timeout)) + response = urllib2.urlopen(url) + else: + response = urllib2.urlopen(url,timeout=float(timeout)) + # check result? + break + except urllib2.HTTPError, e: + logging.error("getHttpData: HTTP error(%s): %s"%(e.code,e)) + errmsg = str(e) + # stop trying + break + except urllib2.URLError, e: + logging.error("getHttpData: URLLIB error(%s): %s"%(e.reason,e)) + errmsg = str(e) + # stop trying + #break + + if response is not None: + data = response.read() + response.close() + return data + + raise IOError("ERROR fetching HTTP data from %s: %s"%(url,errmsg)) + #return None ## ## documentViewer class @@ -109,18 +174,12 @@ class documentViewer(Folder): manage_options=Folder.manage_options+( {'label':'main config','action':'changeDocumentViewerForm'}, ) - - metadataService = None - """MetaDataFolder instance""" # templates and forms - viewer_text = PageTemplateFile('zpt/viewer_text', globals()) - viewer_images = PageTemplateFile('zpt/viewer_images', globals()) viewer_main = PageTemplateFile('zpt/viewer_main', globals()) toc_thumbs = PageTemplateFile('zpt/toc_thumbs', globals()) toc_text = PageTemplateFile('zpt/toc_text', globals()) toc_figures = PageTemplateFile('zpt/toc_figures', globals()) - toc_none = PageTemplateFile('zpt/toc_none', globals()) page_main_images = PageTemplateFile('zpt/page_main_images', globals()) page_main_double = PageTemplateFile('zpt/page_main_double', globals()) page_main_text = PageTemplateFile('zpt/page_main_text', globals()) @@ -129,13 +188,13 @@ class documentViewer(Folder): page_main_xml = PageTemplateFile('zpt/page_main_xml', globals()) page_main_pureXml = PageTemplateFile('zpt/page_main_pureXml', globals()) head_main = PageTemplateFile('zpt/head_main', globals()) + docuviewer_css = PageTemplateFile('css/docuviewer.css', globals()) info_xml = PageTemplateFile('zpt/info_xml', globals()) - docuviewer_css = ImageFile('css/docuviewer.css',globals()) - # make ImageFile better for development - docuviewer_css.index_html = refreshingImageFileIndexHtml thumbs_main_rss = PageTemplateFile('zpt/thumbs_main_rss', globals()) + security.declareProtected('View management screens','changeDocumentViewerForm') + changeDocumentViewerForm = PageTemplateFile('zpt/changeDocumentViewer', globals()) def __init__(self,id,imageScalerUrl=None,textServerName=None,title="",digilibBaseUrl=None,thumbcols=2,thumbrows=5,authgroups="mpiwg"): @@ -158,7 +217,6 @@ class documentViewer(Folder): templateFolder._setObject('fulltextclient',textServer) except Exception, e: logging.error("Unable to create MpdlXmlTextServer for fulltextclient: "+str(e)) - try: from Products.zogiLib.zogiLib import zogiLib zogilib = zogiLib(id="zogilib", title="zogilib for docuviewer", dlServerURL=imageScalerUrl, layout="book") @@ -166,16 +224,7 @@ class documentViewer(Folder): templateFolder._setObject('zogilib',zogilib) except Exception, e: logging.error("Unable to create zogiLib for zogilib: "+str(e)) - - try: - # assume MetaDataFolder instance is called metadata - self.metadataService = getattr(self, 'metadata') - except Exception, e: - logging.error("Unable to find MetaDataFolder 'metadata': "+str(e)) - - if digilibBaseUrl is not None: - self.digilibBaseUrl = digilibBaseUrl - + # proxy text server methods to fulltextclient def getTextPage(self, **args): @@ -206,9 +255,9 @@ class documentViewer(Folder): """get all gis places """ return self.template.fulltextclient.getAllGisPlaces(**args) - def getWordInfo(self, **args): + def getTranslate(self, **args): """get translate""" - return self.template.fulltextclient.getWordInfo(**args) + return self.template.fulltextclient.getTranslate(**args) def getLemma(self, **args): """get lemma""" @@ -241,7 +290,7 @@ class documentViewer(Folder): ''' logging.debug("HHHHHHHHHHHHHH:load the rss") - logging.debug("documentViewer (index) mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn)) + logger("documentViewer (index)", logging.INFO, "mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn)) if not hasattr(self, 'template'): # create template folder if it doesn't exist @@ -263,20 +312,20 @@ class documentViewer(Folder): viewMode="images" return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode) - security.declareProtected('View','index_html') - def index_html(self,url,mode="texttool",viewMode="auto",viewType=None,tocMode="thumbs",start=1,pn=1): - """ - view page - @param url: url which contains display information + def index_html(self,url,mode="texttool",viewMode="auto",tocMode="thumbs",start=None,pn=1,mk=None): + ''' + view it @param mode: defines how to access the document behind url - @param viewMode: 'images': display images, 'text': display text, default is 'auto' - @param viewType: sub-type of viewMode, e.g. 'dict' for viewMode='text' + @param url: url which contains display information + @param viewMode: if images display images, if text display text, default is auto (text,images or auto) @param tocMode: type of 'table of contents' for navigation (thumbs, text, figures, none) - """ + @param characterNormalization type of text display (reg, norm, none) + @param querySearch: type of different search modes (fulltext, fulltextMorph, xpath, xquery, ftIndex, ftIndexMorph, fulltextMorphLemma) + ''' - logging.debug("documentViewer(index_html) mode=%s url=%s viewMode=%s viewType=%s start=%s pn=%s"%(mode,url,viewMode,viewType,start,pn)) + logging.debug("documentViewer (index) mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn)) if not hasattr(self, 'template'): # this won't work @@ -291,36 +340,21 @@ class documentViewer(Folder): if tocMode != "thumbs": # get table of contents docinfo = self.getToc(mode=tocMode, docinfo=docinfo) - - # auto viewMode: text if there is a text else images - if viewMode=="auto": - if docinfo.get('textURL', None) or docinfo.get('textURLPath', None): - viewMode = "text" - viewType = "dict" + + if viewMode=="auto": # automodus gewaehlt + if docinfo.has_key('textURL') or docinfo.get('textURLPath',None): #texturl gesetzt und textViewer konfiguriert + viewMode="text_dict" else: - viewMode = "images" + viewMode="images" - elif viewMode == "text_dict": - # legacy fix - viewMode = "text" - viewType = "dict" - - # stringify viewType - if isinstance(viewType, list): - logging.debug("index_html: viewType is list:%s"%viewType) - viewType = ','.join([t for t in viewType if t]) - - pageinfo = self.getPageinfo(start=start, current=pn, docinfo=docinfo, viewMode=viewMode, viewType=viewType, tocMode=tocMode) - - # get template /template/viewer_$viewMode - pt = getattr(self.template, 'viewer_%s'%viewMode, None) - if pt is None: - logging.error("No template for viewMode=%s!"%viewMode) - # TODO: error page? - return "No template for viewMode=%s!"%viewMode + pageinfo = self.getPageinfo(start=start,current=pn, docinfo=docinfo,viewMode=viewMode,tocMode=tocMode) - # and execute with parameters - return pt(docinfo=docinfo, pageinfo=pageinfo) + if (docinfo.get('textURLPath',None)): + page = self.getTextPage(docinfo=docinfo, pageinfo=pageinfo) + pageinfo['textPage'] = page + tt = getattr(self, 'template') + pt = getattr(tt, 'viewer_main') + return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode,mk=self.generateMarks(mk)) def generateMarks(self,mk): ret="" @@ -343,95 +377,65 @@ class documentViewer(Folder): """try to get the digilib URL from zogilib""" url = self.template.zogilib.getDLBaseUrl() return url - - def getScalerUrl(self, fn=None, pn=None, dw=100, dh=100, docinfo=None): - """returns URL to digilib Scaler with params""" - url = None - if docinfo is not None: - url = docinfo.get('imageURL', None) - - if url is None: - url = "%s/servlet/Scaler?"%self.digilibBaseUrl - if fn is None and docinfo is not None: - fn = docinfo.get('imagePath','') - - url += "fn=%s"%fn - - if pn: - url += "&pn=%s"%pn - - url += "&dw=%s&dh=%s"%(dw,dh) - return url def getDocumentViewerURL(self): """returns the URL of this instance""" return self.absolute_url() def getStyle(self, idx, selected, style=""): - """returns a string with the given style and append 'sel' if idx == selected.""" + """returns a string with the given style and append 'sel' if path == selected.""" #logger("documentViewer (getstyle)", logging.INFO, "idx: %s selected: %s style: %s"%(idx,selected,style)) if idx == selected: return style + 'sel' else: return style - def getParams(self, param=None, val=None, params=None, duplicates=None): - """returns dict with URL parameters. - - Takes URL parameters and additionally param=val or dict params. - Deletes key if value is None.""" + def getLink(self, param=None, val=None, params=None, baseUrl=None, paramSep='&'): + """returns URL to documentviewer with parameter param set to val or from dict params""" # copy existing request params - newParams=self.REQUEST.form.copy() + urlParams=self.REQUEST.form.copy() # change single param if param is not None: if val is None: - if newParams.has_key(param): - del newParams[param] + if urlParams.has_key(param): + del urlParams[param] else: - newParams[param] = str(val) + urlParams[param] = str(val) # change more params if params is not None: - for (k, v) in params.items(): + for k in params.keys(): + v = params[k] if v is None: # val=None removes param - if newParams.has_key(k): - del newParams[k] + if urlParams.has_key(k): + del urlParams[k] else: - newParams[k] = v + urlParams[k] = v - if duplicates: - # eliminate lists (coming from duplicate keys) - for (k,v) in newParams.items(): - if isinstance(v, list): - if duplicates == 'comma': - # make comma-separated list of non-empty entries - newParams[k] = ','.join([t for t in v if t]) - elif duplicates == 'first': - # take first non-empty entry - newParams[k] = [t for t in v if t][0] - - return newParams - - def getLink(self, param=None, val=None, params=None, baseUrl=None, paramSep='&', duplicates='comma'): - """returns URL to documentviewer with parameter param set to val or from dict params""" - urlParams = self.getParams(param=param, val=val, params=params, duplicates=duplicates) + # FIXME: does this belong here? + if urlParams.get("mode", None) == "filepath": #wenn beim erst Aufruf filepath gesetzt wurde aendere das nun zu imagepath + urlParams["mode"] = "imagepath" + urlParams["url"] = getParentDir(urlParams["url"]) + # quote values and assemble into query string (not escaping '/') - ps = paramSep.join(["%s=%s"%(k,urllib.quote_plus(unicode(v),'/')) for (k, v) in urlParams.items()]) + ps = paramSep.join(["%s=%s"%(k,urllib.quote_plus(v,'/')) for (k, v) in urlParams.items()]) + #ps = urllib.urlencode(urlParams) if baseUrl is None: - baseUrl = self.getDocumentViewerURL() + baseUrl = self.REQUEST['URL1'] url = "%s?%s"%(baseUrl, ps) return url - def getLinkAmp(self, param=None, val=None, params=None, baseUrl=None, duplicates='comma'): + + def getLinkAmp(self, param=None, val=None, params=None, baseUrl=None): """link to documentviewer with parameter param set to val""" - return self.getLink(param=param, val=val, params=params, baseUrl=baseUrl, paramSep='&', duplicates=duplicates) - + return self.getLink(param, val, params, baseUrl, '&') def getInfo_xml(self,url,mode): """returns info about the document as XML""" + if not self.digilibBaseUrl: self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary" @@ -439,14 +443,28 @@ class documentViewer(Folder): pt = getattr(self.template, 'info_xml') return pt(docinfo=docinfo) + def getOptionToggle(self, newState=None, optionName='text_options_open', initialState=True): + """returns new option state""" + if not self.REQUEST.SESSION.has_key(optionName): + # not in session -- initial + opt = {'lastState': newState, 'state': initialState} + else: + opt = self.REQUEST.SESSION.get(optionName) + if opt['lastState'] != newState: + # state in session has changed -- toggle + opt['state'] = not opt['state'] + opt['lastState'] = newState + + self.REQUEST.SESSION[optionName] = opt + return opt['state'] + def isAccessible(self, docinfo): """returns if access to the resource is granted""" access = docinfo.get('accessType', None) logging.debug("documentViewer (accessOK) access type %s"%access) - if access == 'free': + if access is not None and access == 'free': logging.debug("documentViewer (accessOK) access is free") return True - elif access is None or access in self.authgroups: # only local access -- only logged in users user = getSecurityManager().getUser() @@ -460,443 +478,475 @@ class documentViewer(Folder): logging.error("documentViewer (accessOK) unknown access type %s"%access) return False + + def getDirinfoFromDigilib(self,path,docinfo=None,cut=0): + """gibt param von dlInfo aus""" + if docinfo is None: + docinfo = {} + + for x in range(cut): + + path=getParentDir(path) + + infoUrl=self.digilibBaseUrl+"/dirInfo-xml.jsp?mo=dir&fn="+path + + logging.debug("documentViewer (getparamfromdigilib) dirInfo from %s"%(infoUrl)) + + txt = getHttpData(infoUrl) + if txt is None: + raise IOError("Unable to get dir-info from %s"%(infoUrl)) - - def getDocinfo(self, mode, url): - """returns docinfo depending on mode""" - logging.debug("getDocinfo: mode=%s, url=%s"%(mode,url)) - # look for cached docinfo in session - if self.REQUEST.SESSION.has_key('docinfo'): - docinfo = self.REQUEST.SESSION['docinfo'] - # check if its still current - if docinfo is not None and docinfo.get('mode', None) == mode and docinfo.get('url', None) == url: - logging.debug("getDocinfo: docinfo in session. keys=%s"%docinfo.keys()) - return docinfo + dom = Parse(txt) + sizes=dom.xpath("//dir/size") + logging.debug("documentViewer (getparamfromdigilib) dirInfo:size"%sizes) + + if sizes: + docinfo['numPages'] = int(getTextFromNode(sizes[0])) + else: + docinfo['numPages'] = 0 - # new docinfo - docinfo = {'mode': mode, 'url': url} - # add self url - docinfo['viewerUrl'] = self.getDocumentViewerURL() - docinfo['digilibBaseUrl'] = self.digilibBaseUrl - # get index.meta DOM - docUrl = None - metaDom = None - if mode=="texttool": - # url points to document dir or index.meta - metaDom = self.metadataService.getDomFromPathOrUrl(url) - docUrl = url.replace('/index.meta', '') - if metaDom is None: - raise IOError("Unable to find index.meta for mode=texttool!") - - elif mode=="imagepath": - # url points to folder with images, index.meta optional - # asssume index.meta in parent dir - docUrl = getParentPath(url) - metaDom = self.metadataService.getDomFromPathOrUrl(docUrl) - - elif mode=="filepath": - # url points to image file, index.meta optional - # asssume index.meta is two path segments up - docUrl = getParentPath(url, 2) - metaDom = self.metadataService.getDomFromPathOrUrl(docUrl) - + # TODO: produce and keep list of image names and numbers + + return docinfo + + def getIndexMetaPath(self,url): + """gib nur den Pfad zurueck""" + regexp = re.compile(r".*(experimental|permanent)/(.*)") + regpath = regexp.match(url) + if (regpath==None): + return "" + logging.debug("(getDomFromIndexMeta): URLXAXA: %s"%regpath.group(2)) + return ("/mpiwg/online/"+regpath.group(1)+"/"+regpath.group(2)) + + + + def getIndexMetaUrl(self,url): + """returns utr of index.meta document at url""" + + metaUrl = None + if url.startswith("http://"): + # real URL + metaUrl = url else: - logging.error("documentViewer (getdocinfo) unknown mode: %s!"%mode) - raise ValueError("Unknown mode %s! Has to be one of 'texttool','imagepath','filepath'."%(mode)) + # online path + server=self.digilibBaseUrl+"/servlet/Texter?fn=" + metaUrl=server+url.replace("/mpiwg/online","") + if not metaUrl.endswith("index.meta"): + metaUrl += "/index.meta" - docinfo['documentUrl'] = docUrl - # process index.meta contents - if metaDom is not None and metaDom.tag == 'resource': - # document directory name and path - resource = self.metadataService.getResourceData(dom=metaDom) - if resource: - docinfo = self.getDocinfoFromResource(docinfo, resource) - - # texttool info - texttool = self.metadataService.getTexttoolData(dom=metaDom) - if texttool: - docinfo = self.getDocinfoFromTexttool(docinfo, texttool) - - # bib info - bib = self.metadataService.getBibData(dom=metaDom) - if bib: - docinfo = self.getDocinfoFromBib(docinfo, bib) - else: - # no bib - try info.xml - docinfo = self.getDocinfoFromPresentationInfoXml(docinfo) + return metaUrl + + def getDomFromIndexMeta(self, url): + """get dom from index meta""" + dom = None + metaUrl = self.getIndexMetaUrl(url) - # auth info - access = self.metadataService.getAccessData(dom=metaDom) - if access: - docinfo = self.getDocinfoFromAccess(docinfo, access) - - # attribution info - attribution = self.metadataService.getAttributionData(dom=metaDom) - if attribution: - logging.debug("getDocinfo: attribution=%s"%repr(attribution)) - docinfo['attribution'] = attribution - #docinfo = self.getDocinfoFromAccess(docinfo, access) - - # copyright info - copyright = self.metadataService.getCopyrightData(dom=metaDom) - if copyright: - logging.debug("getDocinfo: copyright=%s"%repr(copyright)) - docinfo['copyright'] = copyright - #docinfo = self.getDocinfoFromAccess(docinfo, access) - - # image path - if mode != 'texttool': - # override image path from texttool with url - docinfo['imagePath'] = url.replace('/mpiwg/online/', '', 1) - - # number of images from digilib - if docinfo.get('imagePath', None): - docinfo['imageURL'] = self.digilibBaseUrl + "/servlet/Scaler?fn=" + docinfo['imagePath'] - docinfo = self.getDocinfoFromDigilib(docinfo, docinfo['imagePath']) - - logging.debug("documentViewer (getdocinfo) docinfo: keys=%s"%docinfo.keys()) - #logging.debug("documentViewer (getdocinfo) docinfo: %s"%docinfo) - # store in session - self.REQUEST.SESSION['docinfo'] = docinfo + logging.debug("(getDomFromIndexMeta): METAURL: %s"%metaUrl) + txt=getHttpData(metaUrl) + if txt is None: + raise IOError("Unable to read index meta from %s"%(url)) + + dom = Parse(txt) + return dom + + def getPresentationInfoXML(self, url): + """returns dom of info.xml document at url""" + dom = None + metaUrl = None + if url.startswith("http://"): + # real URL + metaUrl = url + else: + # online path + server=self.digilibBaseUrl+"/servlet/Texter?fn=" + metaUrl=server+url.replace("/mpiwg/online","") + + txt=getHttpData(metaUrl) + if txt is None: + raise IOError("Unable to read infoXMLfrom %s"%(url)) + + dom = Parse(txt) + return dom + + + def getAuthinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0): + """gets authorization info from the index.meta file at path or given by dom""" + logging.debug("documentViewer (getauthinfofromindexmeta) path: %s"%(path)) + + access = None + + if docinfo is None: + docinfo = {} + + if dom is None: + for x in range(cut): + path=getParentDir(path) + dom = self.getDomFromIndexMeta(path) + + acctype = dom.xpath("//access-conditions/access/@type") + if acctype and (len(acctype)>0): + access=acctype[0].value + if access in ['group', 'institution']: + access = getTextFromNode(dom.xpath("//access-conditions/access/name")[0]).lower() + + docinfo['accessType'] = access + return docinfo + + + def getBibinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0): + """gets bibliographical info from the index.meta file at path or given by dom""" + #logging.debug("documentViewer (getbibinfofromindexmeta) path: %s"%(path)) + + if docinfo is None: + docinfo = {} + + if dom is None: + for x in range(cut): + path=getParentDir(path) + dom = self.getDomFromIndexMeta(path) + + docinfo['indexMetaPath']=self.getIndexMetaPath(path); + + #logging.debug("documentViewer (getbibinfofromindexmeta cutted) path: %s"%(path)) + # put in all raw bib fields as dict "bib" + bib = dom.xpath("//bib/*") + if bib and len(bib)>0: + bibinfo = {} + for e in bib: + bibinfo[e.localName] = getTextFromNode(e) + docinfo['bib'] = bibinfo + + # extract some fields (author, title, year) according to their mapping + metaData=self.metadata.main.meta.bib + bibtype=dom.xpath("//bib/@type") + if bibtype and (len(bibtype)>0): + bibtype=bibtype[0].value + else: + bibtype="generic" + + bibtype=bibtype.replace("-"," ") # wrong typesiin index meta "-" instead of " " (not wrong! ROC) + docinfo['bib_type'] = bibtype + bibmap=metaData.generateMappingForType(bibtype) + #logging.debug("documentViewer (getbibinfofromindexmeta) bibmap:"+repr(bibmap)) + #logging.debug("documentViewer (getbibinfofromindexmeta) bibtype:"+repr(bibtype)) + # if there is no mapping bibmap is empty (mapping sometimes has empty fields) + if len(bibmap) > 0 and len(bibmap['author'][0]) > 0: + try: + docinfo['author']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['author'][0])[0]) + except: pass + try: + docinfo['title']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['title'][0])[0]) + except: pass + try: + docinfo['year']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['year'][0])[0]) + except: pass + #logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype) + try: + docinfo['lang']=getTextFromNode(dom.xpath("//bib/lang")[0]) + except: + docinfo['lang']='' + try: + docinfo['city']=getTextFromNode(dom.xpath("//bib/city")[0]) + except: + docinfo['city']='' + try: + docinfo['number_of_pages']=getTextFromNode(dom.xpath("//bib/number_of_pages")[0]) + except: + docinfo['number_of_pages']='' + try: + docinfo['series_volume']=getTextFromNode(dom.xpath("//bib/series_volume")[0]) + except: + docinfo['series_volume']='' + try: + docinfo['number_of_volumes']=getTextFromNode(dom.xpath("//bib/number_of_volumes")[0]) + except: + docinfo['number_of_volumes']='' + try: + docinfo['translator']=getTextFromNode(dom.xpath("//bib/translator")[0]) + except: + docinfo['translator']='' + try: + docinfo['edition']=getTextFromNode(dom.xpath("//bib/edition")[0]) + except: + docinfo['edition']='' + try: + docinfo['series_author']=getTextFromNode(dom.xpath("//bib/series_author")[0]) + except: + docinfo['series_author']='' + try: + docinfo['publisher']=getTextFromNode(dom.xpath("//bib/publisher")[0]) + except: + docinfo['publisher']='' + try: + docinfo['series_title']=getTextFromNode(dom.xpath("//bib/series_title")[0]) + except: + docinfo['series_title']='' + try: + docinfo['isbn_issn']=getTextFromNode(dom.xpath("//bib/isbn_issn")[0]) + except: + docinfo['isbn_issn']='' + #logging.debug("I NEED BIBTEX %s"%docinfo) return docinfo + + + def getNameFromIndexMeta(self,path,docinfo=None,dom=None,cut=0): + """gets name info from the index.meta file at path or given by dom""" + if docinfo is None: + docinfo = {} + + if dom is None: + for x in range(cut): + path=getParentDir(path) + dom = self.getDomFromIndexMeta(path) - def getDocinfoFromResource(self, docinfo, resource): - """reads contents of resource element into docinfo""" - docName = resource.get('name', None) - docinfo['documentName'] = docName - docPath = resource.get('archive-path', None) - if docPath: - # clean up document path - if docPath[0] != '/': - docPath = '/' + docPath + docinfo['name']=getTextFromNode(dom.xpath("/resource/name")[0]) + logging.debug("documentViewer docinfo[name] %s"%docinfo['name']) + return docinfo + + def getDocinfoFromTextTool(self, url, dom=None, docinfo=None): + """parse texttool tag in index meta""" + logging.debug("documentViewer (getdocinfofromtexttool) url: %s" % (url)) + if docinfo is None: + docinfo = {} + if docinfo.get('lang', None) is None: + docinfo['lang'] = '' # default keine Sprache gesetzt + if dom is None: + dom = self.getDomFromIndexMeta(url) + + archivePath = None + archiveName = None + + archiveNames = dom.xpath("//resource/name") + if archiveNames and (len(archiveNames) > 0): + archiveName = getTextFromNode(archiveNames[0]) + else: + logging.warning("documentViewer (getdocinfofromtexttool) resource/name missing in: %s" % (url)) + + archivePaths = dom.xpath("//resource/archive-path") + if archivePaths and (len(archivePaths) > 0): + archivePath = getTextFromNode(archivePaths[0]) + # clean up archive path + if archivePath[0] != '/': + archivePath = '/' + archivePath + if archiveName and (not archivePath.endswith(archiveName)): + archivePath += "/" + archiveName + else: + # try to get archive-path from url + logging.warning("documentViewer (getdocinfofromtexttool) resource/archive-path missing in: %s" % (url)) + if (not url.startswith('http')): + archivePath = url.replace('index.meta', '') - if docName and (not docPath.endswith(docName)): - docPath += "/" + docName + if archivePath is None: + # we balk without archive-path + raise IOError("Missing archive-path (for text-tool) in %s" % (url)) + + imageDirs = dom.xpath("//texttool/image") + if imageDirs and (len(imageDirs) > 0): + imageDir = getTextFromNode(imageDirs[0]) + + else: + # we balk with no image tag / not necessary anymore because textmode is now standard + #raise IOError("No text-tool info in %s"%(url)) + imageDir = "" + #xquery="//pb" + docinfo['imagePath'] = "" # keine Bilder + docinfo['imageURL'] = "" - else: - # use docUrl as docPath - docUrl = docinfo['documentURL'] - if not docUrl.startswith('http:'): - docPath = docUrl - if docPath: - # fix URLs starting with /mpiwg/online - docPath = docPath.replace('/mpiwg/online', '', 1) - - docinfo['documentPath'] = docPath - return docinfo - - def getDocinfoFromTexttool(self, docinfo, texttool): - """reads contents of texttool element into docinfo""" - # image dir - imageDir = texttool.get('image', None) - docPath = docinfo.get('documentPath', None) - if imageDir and docPath: + if imageDir and archivePath: #print "image: ", imageDir, " archivepath: ", archivePath - imageDir = os.path.join(docPath, imageDir) - imageDir = imageDir.replace('/mpiwg/online', '', 1) + imageDir = os.path.join(archivePath, imageDir) + imageDir = imageDir.replace("/mpiwg/online", '') + docinfo = self.getDirinfoFromDigilib(imageDir, docinfo=docinfo) docinfo['imagePath'] = imageDir + + docinfo['imageURL'] = self.digilibBaseUrl + "/servlet/Scaler?fn=" + imageDir + + viewerUrls = dom.xpath("//texttool/digiliburlprefix") + if viewerUrls and (len(viewerUrls) > 0): + viewerUrl = getTextFromNode(viewerUrls[0]) + docinfo['viewerURL'] = viewerUrl # old style text URL - textUrl = texttool.get('text', None) - if textUrl and docPath: + textUrls = dom.xpath("//texttool/text") + if textUrls and (len(textUrls) > 0): + textUrl = getTextFromNode(textUrls[0]) if urlparse.urlparse(textUrl)[0] == "": #keine url - textUrl = os.path.join(docPath, textUrl) + textUrl = os.path.join(archivePath, textUrl) + # fix URLs starting with /mpiwg/online + if textUrl.startswith("/mpiwg/online"): + textUrl = textUrl.replace("/mpiwg/online", '', 1) docinfo['textURL'] = textUrl # new style text-url-path - textUrl = texttool.get('text-url-path', None) - if textUrl: + textUrls = dom.xpath("//texttool/text-url-path") + if textUrls and (len(textUrls) > 0): + textUrl = getTextFromNode(textUrls[0]) docinfo['textURLPath'] = textUrl - - # page flow - docinfo['pageFlow'] = texttool.get('page-flow', 'ltr') - - # odd pages are left - docinfo['oddPage'] = texttool.get('odd-scan-position', 'left') - - # number of title page (0: not defined) - docinfo['titlePage'] = texttool.get('title-scan-no', 0) - - # old presentation stuff - presentation = texttool.get('presentation', None) - if presentation and docPath: - if presentation.startswith('http:'): - docinfo['presentationUrl'] = presentation + textUrlkurz = string.split(textUrl, ".")[0] + docinfo['textURLPathkurz'] = textUrlkurz + #if not docinfo['imagePath']: + # text-only, no page images + #docinfo = self.getNumTextPages(docinfo) + + + presentationUrls = dom.xpath("//texttool/presentation") + docinfo = self.getBibinfoFromIndexMeta(url, docinfo=docinfo, dom=dom) # get info von bib tag + #docinfo = self.getDownloadfromDocinfoToBibtex(url, docinfo=docinfo, dom=dom) + docinfo = self.getNameFromIndexMeta(url, docinfo=docinfo, dom=dom) + + + if presentationUrls and (len(presentationUrls) > 0): # ueberschreibe diese durch presentation informationen + # presentation url ergiebt sich ersetzen von index.meta in der url der fuer die Metadaten + # durch den relativen Pfad auf die presentation infos + presentationPath = getTextFromNode(presentationUrls[0]) + if url.endswith("index.meta"): + presentationUrl = url.replace('index.meta', presentationPath) else: - docinfo['presentationUrl'] = os.path.join(docPath, presentation) - + presentationUrl = url + "/" + presentationPath + + docinfo = self.getBibinfoFromTextToolPresentation(presentationUrl, docinfo=docinfo, dom=dom) + + docinfo = self.getAuthinfoFromIndexMeta(url, docinfo=docinfo, dom=dom) # get access info return docinfo - - def getDocinfoFromBib(self, docinfo, bib): - """reads contents of bib element into docinfo""" - logging.debug("getDocinfoFromBib bib=%s"%repr(bib)) - # put all raw bib fields in dict "bib" - docinfo['bib'] = bib - bibtype = bib.get('@type', None) - docinfo['bibType'] = bibtype - # also store DC metadata for convenience - dc = self.metadataService.getDCMappedData(bib) - docinfo['creator'] = dc.get('creator',None) - docinfo['title'] = dc.get('title',None) - docinfo['date'] = dc.get('date',None) - return docinfo - - def getDocinfoFromAccess(self, docinfo, acc): - """reads contents of access element into docinfo""" - #TODO: also read resource type - logging.debug("getDocinfoFromAccess acc=%s"%repr(acc)) + + + def getBibinfoFromTextToolPresentation(self,url,docinfo=None,dom=None): + """gets the bibliographical information from the preseantion entry in texttools + """ + dom=self.getPresentationInfoXML(url) try: - acctype = acc['@attr']['type'] - if acctype: - access=acctype - if access in ['group', 'institution']: - access = acc['name'].lower() - - docinfo['accessType'] = access - + docinfo['author']=getTextFromNode(dom.xpath("//author")[0]) + except: + pass + try: + docinfo['title']=getTextFromNode(dom.xpath("//title")[0]) + except: + pass + try: + docinfo['year']=getTextFromNode(dom.xpath("//date")[0]) except: pass - return docinfo - - def getDocinfoFromDigilib(self, docinfo, path): - infoUrl=self.digilibBaseUrl+"/dirInfo-xml.jsp?mo=dir&fn="+path - # fetch data - txt = getHttpData(infoUrl) - if not txt: - logging.error("Unable to get dir-info from %s"%(infoUrl)) - return docinfo - - dom = ET.fromstring(txt) - size = getText(dom.find("size")) - logging.debug("getDocinfoFromDigilib: size=%s"%size) - if size: - docinfo['numPages'] = int(size) - else: - docinfo['numPages'] = 0 - - # TODO: produce and keep list of image names and numbers + + def getDocinfoFromImagePath(self,path,docinfo=None,cut=0): + """path ist the path to the images it assumes that the index.meta file is one level higher.""" + logging.debug("documentViewer (getdocinfofromimagepath) path: %s"%(path)) + if docinfo is None: + docinfo = {} + path=path.replace("/mpiwg/online","") + docinfo['imagePath'] = path + docinfo=self.getDirinfoFromDigilib(path,docinfo=docinfo,cut=cut) + + pathorig=path + for x in range(cut): + path=getParentDir(path) + logging.debug("documentViewer (getdocinfofromimagepath) PATH:"+path) + imageUrl=self.digilibBaseUrl+"/servlet/Scaler?fn="+path + docinfo['imageURL'] = imageUrl + + #path ist the path to the images it assumes that the index.meta file is one level higher. + docinfo = self.getBibinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1) + #docinfo = self.getDownloadfromDocinfoToBibtex(pathorig,docinfo=docinfo,cut=cut+1) + docinfo = self.getAuthinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1) return docinfo - - - def getDocinfoFromPresentationInfoXml(self,docinfo): - """gets DC-like bibliographical information from the presentation entry in texttools""" - url = docinfo.get('presentationUrl', None) - if not url: - logging.error("getDocinfoFromPresentation: no URL!") - return docinfo - - dom = None - metaUrl = None - if url.startswith("http://"): - # real URL - metaUrl = url + + + def getDocinfo(self, mode, url): + """returns docinfo depending on mode""" + logging.debug("documentViewer (getdocinfo) mode: %s, url: %s"%(mode,url)) + # look for cached docinfo in session + if self.REQUEST.SESSION.has_key('docinfo'): + docinfo = self.REQUEST.SESSION['docinfo'] + # check if its still current + if docinfo is not None and docinfo.get('mode') == mode and docinfo.get('url') == url: + logging.debug("documentViewer (getdocinfo) docinfo in session: %s"%docinfo) + return docinfo + # new docinfo + docinfo = {'mode': mode, 'url': url} + if mode=="texttool": #index.meta with texttool information + docinfo = self.getDocinfoFromTextTool(url, docinfo=docinfo) + elif mode=="imagepath": + docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo) + elif mode=="filepath": + docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo,cut=1) else: - # online path - - server=self.digilibBaseUrl+"/servlet/Texter?fn=" - metaUrl=server+url + logging.error("documentViewer (getdocinfo) unknown mode: %s!"%mode) + raise ValueError("Unknown mode %s! Has to be one of 'texttool','imagepath','filepath'."%(mode)) + + # FIXME: fake texturlpath + if not docinfo.has_key('textURLPath'): + docinfo['textURLPath'] = None - txt=getHttpData(metaUrl) - if txt is None: - logging.error("Unable to read info.xml from %s"%(url)) - return docinfo - - dom = ET.fromstring(txt) - docinfo['creator']=getText(dom.find(".//author")) - docinfo['title']=getText(dom.find(".//title")) - docinfo['date']=getText(dom.find(".//date")) + logging.debug("documentViewer (getdocinfo) docinfo: %s"%docinfo) + #logging.debug("documentViewer (getdocinfo) docinfo: %s"%) + self.REQUEST.SESSION['docinfo'] = docinfo return docinfo - - - def getPageinfo(self, current=None, start=None, rows=None, cols=None, docinfo=None, viewMode=None, viewType=None, tocMode=None): + + def getPageinfo(self, current, start=None, rows=None, cols=None, docinfo=None, viewMode=None, tocMode=None): """returns pageinfo with the given parameters""" - logging.debug("getPageInfo(current=%s, start=%s, rows=%s, cols=%s, viewMode=%s, viewType=%s, tocMode=%s)"%(current,start,rows,cols,viewMode,viewType,tocMode)) pageinfo = {} - pageinfo['viewMode'] = viewMode - pageinfo['viewType'] = viewType - pageinfo['tocMode'] = tocMode - current = getInt(current) + pageinfo['current'] = current - pageinfo['pn'] = current rows = int(rows or self.thumbrows) pageinfo['rows'] = rows cols = int(cols or self.thumbcols) pageinfo['cols'] = cols grpsize = cols * rows pageinfo['groupsize'] = grpsize - # is start is empty use one around current start = getInt(start, default=(math.ceil(float(current)/float(grpsize))*grpsize-(grpsize-1))) # int(current / grpsize) * grpsize +1)) pageinfo['start'] = start - - np = int(docinfo.get('numPages', 0)) - if np == 0: - # numPages unknown - maybe we can get it from text page - if docinfo.get('textURLPath', None): - # cache text page as well - pageinfo['textPage'] = self.getTextPage(mode=viewType, pn=current, docinfo=docinfo, pageinfo=pageinfo) - np = int(docinfo.get('numPages', 0)) - - pageinfo['numgroups'] = int(np / grpsize) - if np % grpsize > 0: - pageinfo['numgroups'] += 1 - - pageFlowLtr = docinfo.get('pageFlow', 'ltr') != 'rtl' - oddScanLeft = docinfo.get('oddPage', 'left') != 'right' - # add zeroth page for two columns - pageZero = (cols == 2 and (pageFlowLtr != oddScanLeft)) - pageinfo['pageZero'] = pageZero - pageinfo['pageBatch'] = self.getPageBatch(start=start, rows=rows, cols=cols, pageFlowLtr=pageFlowLtr, pageZero=pageZero, minIdx=1, maxIdx=np) - - # TODO: do we need this here? + pageinfo['end'] = start + grpsize + if (docinfo is not None) and ('numPages' in docinfo): + np = int(docinfo['numPages']) + pageinfo['end'] = min(pageinfo['end'], np) + pageinfo['numgroups'] = int(np / grpsize) + if np % grpsize > 0: + pageinfo['numgroups'] += 1 + pageinfo['viewMode'] = viewMode + pageinfo['tocMode'] = tocMode pageinfo['characterNormalization'] = self.REQUEST.get('characterNormalization','reg') + #pageinfo['optionToggle'] = self.REQUEST.get('optionToggle','1') pageinfo['query'] = self.REQUEST.get('query','') pageinfo['queryType'] = self.REQUEST.get('queryType','') pageinfo['querySearch'] =self.REQUEST.get('querySearch', 'fulltext') + pageinfo['textPN'] = self.REQUEST.get('textPN','1') pageinfo['highlightQuery'] = self.REQUEST.get('highlightQuery','') - pageinfo['tocPageSize'] = getInt(self.REQUEST.get('tocPageSize', 30)) - pageinfo['queryPageSize'] = getInt(self.REQUEST.get('queryPageSize', 10)) - pageinfo['tocPN'] = getInt(self.REQUEST.get('tocPN', '1')) - pageinfo['searchPN'] = getInt(self.REQUEST.get('searchPN','1')) - # limit tocPN + pageinfo ['highlightElementPos'] = self.REQUEST.get('highlightElementPos','') + pageinfo ['highlightElement'] = self.REQUEST.get('highlightElement','') + + + pageinfo['tocPageSize'] = self.REQUEST.get('tocPageSize', '30') + pageinfo['queryPageSize'] =self.REQUEST.get('queryPageSize', '10') + pageinfo['tocPN'] = self.REQUEST.get('tocPN', '1') + toc = int (pageinfo['tocPN']) + pageinfo['textPages'] =int (toc) + if 'tocSize_%s'%tocMode in docinfo: - tocSize = docinfo['tocSize_%s'%tocMode] - tocPageSize = pageinfo['tocPageSize'] + tocSize = int(docinfo['tocSize_%s'%tocMode]) + tocPageSize = int(pageinfo['tocPageSize']) # cached toc if tocSize%tocPageSize>0: tocPages=tocSize/tocPageSize+1 else: tocPages=tocSize/tocPageSize - - pageinfo['tocPN'] = min(tocPages,pageinfo['tocPN']) - + pageinfo['tocPN'] = min (tocPages,toc) + pageinfo['searchPN'] =self.REQUEST.get('searchPN','1') + #pageinfo['sn'] =self.REQUEST.get('sn','') + pageinfo['s'] =self.REQUEST.get('s','') return pageinfo - - - def getPageBatch(self, start=1, rows=10, cols=2, pageFlowLtr=True, pageZero=False, minIdx=1, maxIdx=0): - """returns dict with array of page informations for one screenfull of thumbnails""" - batch = {} - grpsize = rows * cols - if maxIdx == 0: - maxIdx = start + grpsize - - nb = int(math.ceil(maxIdx / float(grpsize))) - # list of all batch start and end points - batches = [] - if pageZero: - ofs = 0 - else: - ofs = 1 - - for i in range(nb): - s = i * grpsize + ofs - e = min((i + 1) * grpsize + ofs - 1, maxIdx) - batches.append({'start':s, 'end':e}) - - batch['batches'] = batches - - pages = [] - if pageZero and start == 1: - # correct beginning - idx = 0 - else: - idx = start - - for r in range(rows): - row = [] - for c in range(cols): - if idx < minIdx or idx > maxIdx: - page = {'idx':None} - else: - page = {'idx':idx} - - idx += 1 - if pageFlowLtr: - row.append(page) - else: - row.insert(0, page) - - pages.append(row) - - if start > 1: - batch['prevStart'] = max(start - grpsize, 1) - else: - batch['prevStart'] = None - - if start + grpsize < maxIdx: - batch['nextStart'] = start + grpsize - else: - batch['nextStart'] = None - - batch['pages'] = pages - return batch - - def getBatch(self, start=1, size=10, end=0, data=None, fullData=True): - """returns dict with information for one screenfull of data.""" - batch = {} - if end == 0: - end = start + size - - nb = int(math.ceil(end / float(size))) - # list of all batch start and end points - batches = [] - for i in range(nb): - s = i * size + 1 - e = min((i + 1) * size, end) - batches.append({'start':s, 'end':e}) - - batch['batches'] = batches - # list of elements in this batch - this = [] - j = 0 - for i in range(start, min(start+size, end)): - if data: - if fullData: - d = data[i] - else: - d = data[j] - j += 1 - - else: - d = i+1 - - this.append(d) - - batch['this'] = this - if start > 1: - batch['prevStart'] = max(start - size, 1) - else: - batch['prevStart'] = None - - if start + size < end: - batch['nextStart'] = start + size - else: - batch['nextStart'] = None - - return batch - - - security.declareProtected('View management screens','changeDocumentViewerForm') - changeDocumentViewerForm = PageTemplateFile('zpt/changeDocumentViewer', globals()) - def changeDocumentViewer(self,title="",digilibBaseUrl=None,thumbrows=2,thumbcols=5,authgroups='mpiwg',RESPONSE=None): +def changeDocumentViewer(self,title="",digilibBaseUrl=None,thumbrows=2,thumbcols=5,authgroups='mpiwg',RESPONSE=None): """init document viewer""" self.title=title self.digilibBaseUrl = digilibBaseUrl self.thumbrows = thumbrows self.thumbcols = thumbcols self.authgroups = [s.strip().lower() for s in authgroups.split(',')] - try: - # assume MetaDataFolder instance is called metadata - self.metadataService = getattr(self, 'metadata') - except Exception, e: - logging.error("Unable to find MetaDataFolder 'metadata': "+str(e)) - if RESPONSE is not None: RESPONSE.redirect('manage_main') @@ -912,3 +962,37 @@ def manage_AddDocumentViewer(self,id,ima if RESPONSE is not None: RESPONSE.redirect('manage_main') + +## DocumentViewerTemplate class +class DocumentViewerTemplate(ZopePageTemplate): + """Template for document viewer""" + meta_type="DocumentViewer Template" + + +def manage_addDocumentViewerTemplateForm(self): + """Form for adding""" + pt=PageTemplateFile('zpt/addDocumentViewerTemplate', globals()).__of__(self) + return pt() + +def manage_addDocumentViewerTemplate(self, id='viewer_main', title=None, text=None, + REQUEST=None, submit=None): + "Add a Page Template with optional file content." + + self._setObject(id, DocumentViewerTemplate(id)) + ob = getattr(self, id) + txt=file(os.path.join(package_home(globals()),'zpt/viewer_main.zpt'),'r').read() + logging.info("txt %s:"%txt) + ob.pt_edit(txt,"text/html") + if title: + ob.pt_setTitle(title) + try: + u = self.DestinationURL() + except AttributeError: + u = REQUEST['URL1'] + + u = "%s/%s" % (u, urllib.quote(id)) + REQUEST.RESPONSE.redirect(u+'/manage_main') + return '' + + +