--- documentViewer/documentViewer.py 2011/07/15 09:02:26 1.175.2.2 +++ documentViewer/documentViewer.py 2012/01/04 07:38:17 1.178 @@ -7,11 +7,8 @@ from AccessControl import getSecurityMan from Globals import package_home from Products.zogiLib.zogiLib import browserCheck -#from Ft.Xml import EMPTY_NAMESPACE, Parse -#import Ft.Xml.Domlette - -import xml.etree.ElementTree as ET - +from Ft.Xml import EMPTY_NAMESPACE, Parse +import Ft.Xml.Domlette import os.path import sys import urllib @@ -19,6 +16,7 @@ import urllib2 import logging import math import urlparse +import cStringIO import re import string @@ -34,37 +32,25 @@ def getInt(number, default=0): except: return int(default) -def getText(node): +def getTextFromNode(nodename): """get the cdata content of a node""" - if node is None: + if nodename is None: return "" - # ET: - text = node.text or "" - for e in node: - text += gettext(e) - if e.tail: - text += e.tail - - # 4Suite: - #nodelist=node.childNodes - #text = "" - #for n in nodelist: - # if n.nodeType == node.TEXT_NODE: - # text = text + n.data - - return text - -getTextFromNode = getText + nodelist=nodename.childNodes + rc = "" + for node in nodelist: + if node.nodeType == node.TEXT_NODE: + rc = rc + node.data + return rc def serializeNode(node, encoding="utf-8"): """returns a string containing node as XML""" - s = ET.tostring(node) - - # 4Suite: - # stream = cStringIO.StringIO() - # Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding) - # s = stream.getvalue() - # stream.close() + stream = cStringIO.StringIO() + #logging.debug("BUF: %s"%(stream)) + Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding) + s = stream.getvalue() + #logging.debug("BUF: %s"%(s)) + stream.close() return s def browserCheck(self): @@ -364,7 +350,7 @@ class documentViewer(Folder): pageinfo = self.getPageinfo(start=start,current=pn, docinfo=docinfo,viewMode=viewMode,tocMode=tocMode) if (docinfo.get('textURLPath',None)): - page = self.getTextPage(mode=viewMode, docinfo=docinfo, pageinfo=pageinfo) + page = self.getTextPage(docinfo=docinfo, pageinfo=pageinfo) pageinfo['textPage'] = page tt = getattr(self, 'template') pt = getattr(tt, 'viewer_main') @@ -499,7 +485,8 @@ class documentViewer(Folder): docinfo = {} for x in range(cut): - path=getParentDir(path) + + path=getParentDir(path) infoUrl=self.digilibBaseUrl+"/dirInfo-xml.jsp?mo=dir&fn="+path @@ -509,14 +496,12 @@ class documentViewer(Folder): if txt is None: raise IOError("Unable to get dir-info from %s"%(infoUrl)) - dom = ET.fromstring(txt) - #dom = Parse(txt) - size=getText(dom.find("size")) - #sizes=dom.xpath("//dir/size") - logging.debug("documentViewer (getparamfromdigilib) dirInfo:size=%s"%size) + dom = Parse(txt) + sizes=dom.xpath("//dir/size") + logging.debug("documentViewer (getparamfromdigilib) dirInfo:size"%sizes) - if size: - docinfo['numPages'] = int(size) + if sizes: + docinfo['numPages'] = int(getTextFromNode(sizes[0])) else: docinfo['numPages'] = 0 @@ -561,8 +546,7 @@ class documentViewer(Folder): if txt is None: raise IOError("Unable to read index meta from %s"%(url)) - dom = ET.fromstring(txt) - #dom = Parse(txt) + dom = Parse(txt) return dom def getPresentationInfoXML(self, url): @@ -581,8 +565,7 @@ class documentViewer(Folder): if txt is None: raise IOError("Unable to read infoXMLfrom %s"%(url)) - dom = ET.fromstring(txt) - #dom = Parse(txt) + dom = Parse(txt) return dom @@ -600,14 +583,11 @@ class documentViewer(Folder): path=getParentDir(path) dom = self.getDomFromIndexMeta(path) - acc = dom.find(".//access-conditions/access") - if acc is not None: - acctype = acc.get('type') - #acctype = dom.xpath("//access-conditions/access/@type") - if acctype: - access=acctype - if access in ['group', 'institution']: - access = dom.find(".//access-conditions/access/name").text.lower() + acctype = dom.xpath("//access-conditions/access/@type") + if acctype and (len(acctype)>0): + access=acctype[0].value + if access in ['group', 'institution']: + access = getTextFromNode(dom.xpath("//access-conditions/access/name")[0]).lower() docinfo['accessType'] = access return docinfo @@ -615,7 +595,7 @@ class documentViewer(Folder): def getBibinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0): """gets bibliographical info from the index.meta file at path or given by dom""" - logging.debug("documentViewer (getbibinfofromindexmeta) path: %s"%(path)) + #logging.debug("documentViewer (getbibinfofromindexmeta) path: %s"%(path)) if docinfo is None: docinfo = {} @@ -627,91 +607,88 @@ class documentViewer(Folder): docinfo['indexMetaPath']=self.getIndexMetaPath(path); - logging.debug("documentViewer (getbibinfofromindexmeta cutted) path: %s"%(path)) + #logging.debug("documentViewer (getbibinfofromindexmeta cutted) path: %s"%(path)) # put in all raw bib fields as dict "bib" - bib = dom.find(".//bib") - #bib = dom.xpath("//bib/*") - if bib is not None: + bib = dom.xpath("//bib/*") + if bib and len(bib)>0: bibinfo = {} for e in bib: - bibinfo[e.tag] = getText(e) - + bibinfo[e.localName] = getTextFromNode(e) docinfo['bib'] = bibinfo # extract some fields (author, title, year) according to their mapping metaData=self.metadata.main.meta.bib - bibtype=bib.get("type") - #bibtype=dom.xpath("//bib/@type") - if not bibtype: + bibtype=dom.xpath("//bib/@type") + if bibtype and (len(bibtype)>0): + bibtype=bibtype[0].value + else: bibtype="generic" bibtype=bibtype.replace("-"," ") # wrong typesiin index meta "-" instead of " " (not wrong! ROC) docinfo['bib_type'] = bibtype bibmap=metaData.generateMappingForType(bibtype) - logging.debug("documentViewer (getbibinfofromindexmeta) bibmap:"+repr(bibmap)) - logging.debug("documentViewer (getbibinfofromindexmeta) bibtype:"+repr(bibtype)) + #logging.debug("documentViewer (getbibinfofromindexmeta) bibmap:"+repr(bibmap)) + #logging.debug("documentViewer (getbibinfofromindexmeta) bibtype:"+repr(bibtype)) # if there is no mapping bibmap is empty (mapping sometimes has empty fields) - if len(bibmap) > 0 and len(bibmap['author'][0]) > 0 or len(bibmap['title'][0]) > 0: + if len(bibmap) > 0 and len(bibmap['author'][0]) > 0: try: - docinfo['author']=getText(bib.find(bibmap['author'][0])) + docinfo['author']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['author'][0])[0]) except: pass try: - docinfo['title']=getText(bib.find(bibmap['title'][0])) + docinfo['title']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['title'][0])[0]) except: pass try: - docinfo['year']=getText(bib.find(bibmap['year'][0])) + docinfo['year']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['year'][0])[0]) except: pass - - # ROC: why is this here? - # logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype) - # try: - # docinfo['lang']=getTextFromNode(dom.find(".//bib/lang")[0]) - # except: - # docinfo['lang']='' - # try: - # docinfo['city']=getTextFromNode(dom.find(".//bib/city")[0]) - # except: - # docinfo['city']='' - # try: - # docinfo['number_of_pages']=getTextFromNode(dom.find(".//bib/number_of_pages")[0]) - # except: - # docinfo['number_of_pages']='' - # try: - # docinfo['series_volume']=getTextFromNode(dom.find(".//bib/series_volume")[0]) - # except: - # docinfo['series_volume']='' - # try: - # docinfo['number_of_volumes']=getTextFromNode(dom.find(".//bib/number_of_volumes")[0]) - # except: - # docinfo['number_of_volumes']='' - # try: - # docinfo['translator']=getTextFromNode(dom.find(".//bib/translator")[0]) - # except: - # docinfo['translator']='' - # try: - # docinfo['edition']=getTextFromNode(dom.find(".//bib/edition")[0]) - # except: - # docinfo['edition']='' - # try: - # docinfo['series_author']=getTextFromNode(dom.find(".//bib/series_author")[0]) - # except: - # docinfo['series_author']='' - # try: - # docinfo['publisher']=getTextFromNode(dom.find(".//bib/publisher")[0]) - # except: - # docinfo['publisher']='' - # try: - # docinfo['series_title']=getTextFromNode(dom.find(".//bib/series_title")[0]) - # except: - # docinfo['series_title']='' - # try: - # docinfo['isbn_issn']=getTextFromNode(dom.find(".//bib/isbn_issn")[0]) - # except: - # docinfo['isbn_issn']='' + #logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype) + try: + docinfo['lang']=getTextFromNode(dom.xpath("//bib/lang")[0]) + except: + docinfo['lang']='' + try: + docinfo['city']=getTextFromNode(dom.xpath("//bib/city")[0]) + except: + docinfo['city']='' + try: + docinfo['number_of_pages']=getTextFromNode(dom.xpath("//bib/number_of_pages")[0]) + except: + docinfo['number_of_pages']='' + try: + docinfo['series_volume']=getTextFromNode(dom.xpath("//bib/series_volume")[0]) + except: + docinfo['series_volume']='' + try: + docinfo['number_of_volumes']=getTextFromNode(dom.xpath("//bib/number_of_volumes")[0]) + except: + docinfo['number_of_volumes']='' + try: + docinfo['translator']=getTextFromNode(dom.xpath("//bib/translator")[0]) + except: + docinfo['translator']='' + try: + docinfo['edition']=getTextFromNode(dom.xpath("//bib/edition")[0]) + except: + docinfo['edition']='' + try: + docinfo['series_author']=getTextFromNode(dom.xpath("//bib/series_author")[0]) + except: + docinfo['series_author']='' + try: + docinfo['publisher']=getTextFromNode(dom.xpath("//bib/publisher")[0]) + except: + docinfo['publisher']='' + try: + docinfo['series_title']=getTextFromNode(dom.xpath("//bib/series_title")[0]) + except: + docinfo['series_title']='' + try: + docinfo['isbn_issn']=getTextFromNode(dom.xpath("//bib/isbn_issn")[0]) + except: + docinfo['isbn_issn']='' + #logging.debug("I NEED BIBTEX %s"%docinfo) return docinfo - # TODO: is this needed? def getNameFromIndexMeta(self,path,docinfo=None,dom=None,cut=0): """gets name info from the index.meta file at path or given by dom""" if docinfo is None: @@ -722,7 +699,7 @@ class documentViewer(Folder): path=getParentDir(path) dom = self.getDomFromIndexMeta(path) - docinfo['name']=getText(dom.find("name")) + docinfo['name']=getTextFromNode(dom.xpath("/resource/name")[0]) logging.debug("documentViewer docinfo[name] %s"%docinfo['name']) return docinfo @@ -739,12 +716,15 @@ class documentViewer(Folder): archivePath = None archiveName = None - archiveName = getText(dom.find("name")) - if not archiveName: + archiveNames = dom.xpath("//resource/name") + if archiveNames and (len(archiveNames) > 0): + archiveName = getTextFromNode(archiveNames[0]) + else: logging.warning("documentViewer (getdocinfofromtexttool) resource/name missing in: %s" % (url)) - archivePath = getText(dom.find("archive-path")) - if archivePath: + archivePaths = dom.xpath("//resource/archive-path") + if archivePaths and (len(archivePaths) > 0): + archivePath = getTextFromNode(archivePaths[0]) # clean up archive path if archivePath[0] != '/': archivePath = '/' + archivePath @@ -760,9 +740,11 @@ class documentViewer(Folder): # we balk without archive-path raise IOError("Missing archive-path (for text-tool) in %s" % (url)) - imageDir = getText(dom.find(".//texttool/image")) + imageDirs = dom.xpath("//texttool/image") + if imageDirs and (len(imageDirs) > 0): + imageDir = getTextFromNode(imageDirs[0]) - if not imageDir: + else: # we balk with no image tag / not necessary anymore because textmode is now standard #raise IOError("No text-tool info in %s"%(url)) imageDir = "" @@ -779,13 +761,15 @@ class documentViewer(Folder): docinfo['imageURL'] = self.digilibBaseUrl + "/servlet/Scaler?fn=" + imageDir - viewerUrl = getText(dom.find(".//texttool/digiliburlprefix")) - if viewerUrl: + viewerUrls = dom.xpath("//texttool/digiliburlprefix") + if viewerUrls and (len(viewerUrls) > 0): + viewerUrl = getTextFromNode(viewerUrls[0]) docinfo['viewerURL'] = viewerUrl # old style text URL - textUrl = getText(dom.find(".//texttool/text")) - if textUrl: + textUrls = dom.xpath("//texttool/text") + if textUrls and (len(textUrls) > 0): + textUrl = getTextFromNode(textUrls[0]) if urlparse.urlparse(textUrl)[0] == "": #keine url textUrl = os.path.join(archivePath, textUrl) # fix URLs starting with /mpiwg/online @@ -793,10 +777,24 @@ class documentViewer(Folder): textUrl = textUrl.replace("/mpiwg/online", '', 1) docinfo['textURL'] = textUrl - + + + #TODO: hack-DW for annalen + if (textUrl is not None) and (textUrl.startswith("/permanent/einstein/annalen")): + textUrl=textUrl.replace("/permanent/einstein/annalen/","/diverse/de/") + splitted=textUrl.split("/fulltext") + textUrl=splitted[0]+".xml" + textUrlkurz = string.split(textUrl, ".")[0] + docinfo['textURLPathkurz'] = textUrlkurz + docinfo['textURLPath'] = textUrl + logging.debug("hack") + logging.debug(textUrl) + + # new style text-url-path - textUrl = getText(dom.find(".//texttool/text-url-path")) - if textUrl: + textUrls = dom.xpath("//texttool/text-url-path") + if textUrls and (len(textUrls) > 0): + textUrl = getTextFromNode(textUrls[0]) docinfo['textURLPath'] = textUrl textUrlkurz = string.split(textUrl, ".")[0] docinfo['textURLPathkurz'] = textUrlkurz @@ -805,16 +803,16 @@ class documentViewer(Folder): #docinfo = self.getNumTextPages(docinfo) - presentationUrl = getText(dom.find(".//texttool/presentation")) + presentationUrls = dom.xpath("//texttool/presentation") docinfo = self.getBibinfoFromIndexMeta(url, docinfo=docinfo, dom=dom) # get info von bib tag - # TODO: is this needed here? + #docinfo = self.getDownloadfromDocinfoToBibtex(url, docinfo=docinfo, dom=dom) docinfo = self.getNameFromIndexMeta(url, docinfo=docinfo, dom=dom) - if presentationUrl: # ueberschreibe diese durch presentation informationen + if presentationUrls and (len(presentationUrls) > 0): # ueberschreibe diese durch presentation informationen # presentation url ergiebt sich ersetzen von index.meta in der url der fuer die Metadaten # durch den relativen Pfad auf die presentation infos - presentationPath = presentationUrl + presentationPath = getTextFromNode(presentationUrls[0]) if url.endswith("index.meta"): presentationUrl = url.replace('index.meta', presentationPath) else: @@ -831,9 +829,18 @@ class documentViewer(Folder): """gets the bibliographical information from the preseantion entry in texttools """ dom=self.getPresentationInfoXML(url) - docinfo['author']=getText(dom.find(".//author")) - docinfo['title']=getText(dom.find(".//title")) - docinfo['year']=getText(dom.find(".//date")) + try: + docinfo['author']=getTextFromNode(dom.xpath("//author")[0]) + except: + pass + try: + docinfo['title']=getTextFromNode(dom.xpath("//title")[0]) + except: + pass + try: + docinfo['year']=getTextFromNode(dom.xpath("//date")[0]) + except: + pass return docinfo def getDocinfoFromImagePath(self,path,docinfo=None,cut=0): @@ -854,6 +861,7 @@ class documentViewer(Folder): #path ist the path to the images it assumes that the index.meta file is one level higher. docinfo = self.getBibinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1) + #docinfo = self.getDownloadfromDocinfoToBibtex(pathorig,docinfo=docinfo,cut=cut+1) docinfo = self.getAuthinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1) return docinfo @@ -920,6 +928,12 @@ class documentViewer(Folder): pageinfo['querySearch'] =self.REQUEST.get('querySearch', 'fulltext') pageinfo['textPN'] = self.REQUEST.get('textPN','1') pageinfo['highlightQuery'] = self.REQUEST.get('highlightQuery','') + + pageinfo ['highlightElementPos'] = self.REQUEST.get('highlightElementPos','') + pageinfo ['highlightElement'] = self.REQUEST.get('highlightElement','') + + pageinfo ['xpointer'] = self.REQUEST.get('xpointer','') + pageinfo['tocPageSize'] = self.REQUEST.get('tocPageSize', '30') pageinfo['queryPageSize'] =self.REQUEST.get('queryPageSize', '10') pageinfo['tocPN'] = self.REQUEST.get('tocPN', '1') @@ -936,7 +950,8 @@ class documentViewer(Folder): tocPages=tocSize/tocPageSize pageinfo['tocPN'] = min (tocPages,toc) pageinfo['searchPN'] =self.REQUEST.get('searchPN','1') - pageinfo['sn'] =self.REQUEST.get('sn','') + #pageinfo['sn'] =self.REQUEST.get('sn','') + pageinfo['s'] =self.REQUEST.get('s','') return pageinfo def changeDocumentViewer(self,title="",digilibBaseUrl=None,thumbrows=2,thumbcols=5,authgroups='mpiwg',RESPONSE=None):