version 1.175.2.5, 2011/07/19 18:46:35
|
version 1.176, 2011/07/29 10:33:06
|
Line 1
|
Line 1
|
|
|
from OFS.Folder import Folder |
from OFS.Folder import Folder |
from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate |
from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate |
from Products.PageTemplates.PageTemplateFile import PageTemplateFile |
from Products.PageTemplates.PageTemplateFile import PageTemplateFile |
from AccessControl import ClassSecurityInfo |
from AccessControl import ClassSecurityInfo |
from AccessControl import getSecurityManager |
from AccessControl import getSecurityManager |
from Globals import package_home |
from Globals import package_home |
|
from Products.zogiLib.zogiLib import browserCheck |
|
|
#from Ft.Xml import EMPTY_NAMESPACE, Parse |
from Ft.Xml import EMPTY_NAMESPACE, Parse |
#import Ft.Xml.Domlette |
import Ft.Xml.Domlette |
|
|
import xml.etree.ElementTree as ET |
|
|
|
import os.path |
import os.path |
import sys |
import sys |
import urllib |
import urllib |
|
import urllib2 |
import logging |
import logging |
import math |
import math |
import urlparse |
import urlparse |
|
import cStringIO |
import re |
import re |
import string |
import string |
|
|
from SrvTxtUtils import getInt, getText, getHttpData |
|
|
|
def logger(txt,method,txt2): |
def logger(txt,method,txt2): |
"""logging""" |
"""logging""" |
logging.info(txt+ txt2) |
logging.info(txt+ txt2) |
|
|
|
|
|
def getInt(number, default=0): |
|
"""returns always an int (0 in case of problems)""" |
|
try: |
|
return int(number) |
|
except: |
|
return int(default) |
|
|
|
def getTextFromNode(nodename): |
|
"""get the cdata content of a node""" |
|
if nodename is None: |
|
return "" |
|
nodelist=nodename.childNodes |
|
rc = "" |
|
for node in nodelist: |
|
if node.nodeType == node.TEXT_NODE: |
|
rc = rc + node.data |
|
return rc |
|
|
def serializeNode(node, encoding="utf-8"): |
def serializeNode(node, encoding="utf-8"): |
"""returns a string containing node as XML""" |
"""returns a string containing node as XML""" |
s = ET.tostring(node) |
stream = cStringIO.StringIO() |
|
#logging.debug("BUF: %s"%(stream)) |
# 4Suite: |
Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding) |
# stream = cStringIO.StringIO() |
s = stream.getvalue() |
# Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding) |
#logging.debug("BUF: %s"%(s)) |
# s = stream.getvalue() |
stream.close() |
# stream.close() |
|
return s |
return s |
|
|
def browserCheck(self): |
def browserCheck(self): |
Line 98 def browserCheck(self):
|
Line 114 def browserCheck(self):
|
|
|
return bt |
return bt |
|
|
|
|
def getParentDir(path): |
def getParentDir(path): |
"""returns pathname shortened by one""" |
"""returns pathname shortened by one""" |
return '/'.join(path.split('/')[0:-1]) |
return '/'.join(path.split('/')[0:-1]) |
|
|
def getBibdataFromDom(dom): |
|
"""returns dict with all elements from bib-tag""" |
|
bibinfo = {} |
|
bib = dom.find(".//meta/bib") |
|
if bib is not None: |
|
# put type in @type |
|
type = bib.get('type') |
|
bibinfo['@type'] = type |
|
# put all subelements in dict |
|
for e in bib: |
|
bibinfo[e.tag] = getText(e) |
|
|
|
return bibinfo |
def getHttpData(url, data=None, num_tries=3, timeout=10): |
|
"""returns result from url+data HTTP request""" |
|
# we do GET (by appending data to url) |
|
if isinstance(data, str) or isinstance(data, unicode): |
|
# if data is string then append |
|
url = "%s?%s"%(url,data) |
|
elif isinstance(data, dict) or isinstance(data, list) or isinstance(data, tuple): |
|
# urlencode |
|
url = "%s?%s"%(url,urllib.urlencode(data)) |
|
|
|
response = None |
|
errmsg = None |
|
for cnt in range(num_tries): |
|
try: |
|
logging.debug("getHttpData(#%s %ss) url=%s"%(cnt+1,timeout,url)) |
|
if sys.version_info < (2, 6): |
|
# set timeout on socket -- ugly :-( |
|
import socket |
|
socket.setdefaulttimeout(float(timeout)) |
|
response = urllib2.urlopen(url) |
|
else: |
|
response = urllib2.urlopen(url,timeout=float(timeout)) |
|
# check result? |
|
break |
|
except urllib2.HTTPError, e: |
|
logging.error("getHttpData: HTTP error(%s): %s"%(e.code,e)) |
|
errmsg = str(e) |
|
# stop trying |
|
break |
|
except urllib2.URLError, e: |
|
logging.error("getHttpData: URLLIB error(%s): %s"%(e.reason,e)) |
|
errmsg = str(e) |
|
# stop trying |
|
#break |
|
|
|
if response is not None: |
|
data = response.read() |
|
response.close() |
|
return data |
|
|
|
raise IOError("ERROR fetching HTTP data from %s: %s"%(url,errmsg)) |
|
#return None |
|
|
## |
## |
## documentViewer class |
## documentViewer class |
Line 243 class documentViewer(Folder):
|
Line 290 class documentViewer(Folder):
|
|
|
''' |
''' |
logging.debug("HHHHHHHHHHHHHH:load the rss") |
logging.debug("HHHHHHHHHHHHHH:load the rss") |
logging.debug("documentViewer (index) mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn)) |
logger("documentViewer (index)", logging.INFO, "mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn)) |
|
|
if not hasattr(self, 'template'): |
if not hasattr(self, 'template'): |
# create template folder if it doesn't exist |
# create template folder if it doesn't exist |
Line 294 class documentViewer(Folder):
|
Line 341 class documentViewer(Folder):
|
# get table of contents |
# get table of contents |
docinfo = self.getToc(mode=tocMode, docinfo=docinfo) |
docinfo = self.getToc(mode=tocMode, docinfo=docinfo) |
|
|
# auto viewMode: text_dict if text else images |
if viewMode=="auto": # automodus gewaehlt |
if viewMode=="auto": |
if docinfo.has_key('textURL') or docinfo.get('textURLPath',None): #texturl gesetzt und textViewer konfiguriert |
if docinfo.get('textURL', None) or docinfo.get('textURLPath', None): |
|
#texturl gesetzt und textViewer konfiguriert |
|
viewMode="text_dict" |
viewMode="text_dict" |
else: |
else: |
viewMode="images" |
viewMode="images" |
|
|
pageinfo = self.getPageinfo(start=start, current=pn, docinfo=docinfo, viewMode=viewMode, tocMode=tocMode) |
pageinfo = self.getPageinfo(start=start, current=pn, docinfo=docinfo, viewMode=viewMode, tocMode=tocMode) |
|
|
if viewMode != 'images' and docinfo.get('textURLPath', None): |
if (docinfo.get('textURLPath',None)): |
# get full text page |
page = self.getTextPage(docinfo=docinfo, pageinfo=pageinfo) |
page = self.getTextPage(mode=viewMode, pn=pn, docinfo=docinfo, pageinfo=pageinfo) |
|
pageinfo['textPage'] = page |
pageinfo['textPage'] = page |
|
tt = getattr(self, 'template') |
# get template /template/viewer_main |
pt = getattr(tt, 'viewer_main') |
pt = getattr(self.template, 'viewer_main') |
|
# and execute with parameters |
|
return pt(docinfo=docinfo, pageinfo=pageinfo, viewMode=viewMode, mk=self.generateMarks(mk)) |
return pt(docinfo=docinfo, pageinfo=pageinfo, viewMode=viewMode, mk=self.generateMarks(mk)) |
|
|
def generateMarks(self,mk): |
def generateMarks(self,mk): |
Line 443 class documentViewer(Folder):
|
Line 485 class documentViewer(Folder):
|
docinfo = {} |
docinfo = {} |
|
|
for x in range(cut): |
for x in range(cut): |
|
|
path=getParentDir(path) |
path=getParentDir(path) |
|
|
infoUrl=self.digilibBaseUrl+"/dirInfo-xml.jsp?mo=dir&fn="+path |
infoUrl=self.digilibBaseUrl+"/dirInfo-xml.jsp?mo=dir&fn="+path |
Line 453 class documentViewer(Folder):
|
Line 496 class documentViewer(Folder):
|
if txt is None: |
if txt is None: |
raise IOError("Unable to get dir-info from %s"%(infoUrl)) |
raise IOError("Unable to get dir-info from %s"%(infoUrl)) |
|
|
dom = ET.fromstring(txt) |
dom = Parse(txt) |
#dom = Parse(txt) |
sizes=dom.xpath("//dir/size") |
size=getText(dom.find("size")) |
logging.debug("documentViewer (getparamfromdigilib) dirInfo:size"%sizes) |
#sizes=dom.xpath("//dir/size") |
|
logging.debug("documentViewer (getparamfromdigilib) dirInfo:size=%s"%size) |
|
|
|
if size: |
if sizes: |
docinfo['numPages'] = int(size) |
docinfo['numPages'] = int(getTextFromNode(sizes[0])) |
else: |
else: |
docinfo['numPages'] = 0 |
docinfo['numPages'] = 0 |
|
|
Line 505 class documentViewer(Folder):
|
Line 546 class documentViewer(Folder):
|
if txt is None: |
if txt is None: |
raise IOError("Unable to read index meta from %s"%(url)) |
raise IOError("Unable to read index meta from %s"%(url)) |
|
|
dom = ET.fromstring(txt) |
dom = Parse(txt) |
#dom = Parse(txt) |
|
return dom |
return dom |
|
|
def getPresentationInfoXML(self, url): |
def getPresentationInfoXML(self, url): |
Line 525 class documentViewer(Folder):
|
Line 565 class documentViewer(Folder):
|
if txt is None: |
if txt is None: |
raise IOError("Unable to read infoXMLfrom %s"%(url)) |
raise IOError("Unable to read infoXMLfrom %s"%(url)) |
|
|
dom = ET.fromstring(txt) |
dom = Parse(txt) |
#dom = Parse(txt) |
|
return dom |
return dom |
|
|
|
|
Line 544 class documentViewer(Folder):
|
Line 583 class documentViewer(Folder):
|
path=getParentDir(path) |
path=getParentDir(path) |
dom = self.getDomFromIndexMeta(path) |
dom = self.getDomFromIndexMeta(path) |
|
|
acc = dom.find(".//access-conditions/access") |
acctype = dom.xpath("//access-conditions/access/@type") |
if acc is not None: |
if acctype and (len(acctype)>0): |
acctype = acc.get('type') |
access=acctype[0].value |
#acctype = dom.xpath("//access-conditions/access/@type") |
|
if acctype: |
|
access=acctype |
|
if access in ['group', 'institution']: |
if access in ['group', 'institution']: |
access = dom.find(".//access-conditions/access/name").text.lower() |
access = getTextFromNode(dom.xpath("//access-conditions/access/name")[0]).lower() |
|
|
docinfo['accessType'] = access |
docinfo['accessType'] = access |
return docinfo |
return docinfo |
Line 559 class documentViewer(Folder):
|
Line 595 class documentViewer(Folder):
|
|
|
def getBibinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0): |
def getBibinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0): |
"""gets bibliographical info from the index.meta file at path or given by dom""" |
"""gets bibliographical info from the index.meta file at path or given by dom""" |
logging.debug("documentViewer (getbibinfofromindexmeta) path: %s"%(path)) |
#logging.debug("documentViewer (getbibinfofromindexmeta) path: %s"%(path)) |
|
|
if docinfo is None: |
if docinfo is None: |
docinfo = {} |
docinfo = {} |
Line 571 class documentViewer(Folder):
|
Line 607 class documentViewer(Folder):
|
|
|
docinfo['indexMetaPath']=self.getIndexMetaPath(path); |
docinfo['indexMetaPath']=self.getIndexMetaPath(path); |
|
|
logging.debug("documentViewer (getbibinfofromindexmeta cutted) path: %s"%(path)) |
#logging.debug("documentViewer (getbibinfofromindexmeta cutted) path: %s"%(path)) |
# put in all raw bib fields as dict "bib" |
# put in all raw bib fields as dict "bib" |
bib = getBibdataFromDom(dom) |
bib = dom.xpath("//bib/*") |
docinfo['bib'] = bib |
if bib and len(bib)>0: |
|
bibinfo = {} |
|
for e in bib: |
|
bibinfo[e.localName] = getTextFromNode(e) |
|
docinfo['bib'] = bibinfo |
|
|
# extract some fields (author, title, year) according to their mapping |
# extract some fields (author, title, year) according to their mapping |
metaData=self.metadata.main.meta.bib |
metaData=self.metadata.main.meta.bib |
bibtype=bib.get("@type") |
bibtype=dom.xpath("//bib/@type") |
#bibtype=dom.xpath("//bib/@type") |
if bibtype and (len(bibtype)>0): |
if not bibtype: |
bibtype=bibtype[0].value |
|
else: |
bibtype="generic" |
bibtype="generic" |
|
|
bibtype=bibtype.replace("-"," ") # wrong types in index meta "-" instead of " " (not wrong! ROC) |
bibtype=bibtype.replace("-"," ") # wrong typesiin index meta "-" instead of " " (not wrong! ROC) |
docinfo['bib_type'] = bibtype |
docinfo['bib_type'] = bibtype |
bibmap=metaData.generateMappingForType(bibtype) |
bibmap=metaData.generateMappingForType(bibtype) |
logging.debug("documentViewer (getbibinfofromindexmeta) bibmap:"+repr(bibmap)) |
#logging.debug("documentViewer (getbibinfofromindexmeta) bibmap:"+repr(bibmap)) |
logging.debug("documentViewer (getbibinfofromindexmeta) bibtype:"+repr(bibtype)) |
#logging.debug("documentViewer (getbibinfofromindexmeta) bibtype:"+repr(bibtype)) |
# if there is no mapping bibmap is empty (mapping sometimes has empty fields) |
# if there is no mapping bibmap is empty (mapping sometimes has empty fields) |
if len(bibmap) > 0 and bibmap.get('author',None) or bibmap.get('title',None): |
if len(bibmap) > 0 and len(bibmap['author'][0]) > 0: |
try: |
try: |
docinfo['author']=bib.get(bibmap['author'][0]) |
docinfo['author']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['author'][0])[0]) |
except: pass |
except: pass |
try: |
try: |
docinfo['title']=bib.get(bibmap['title'][0]) |
docinfo['title']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['title'][0])[0]) |
except: pass |
except: pass |
try: |
try: |
docinfo['year']=bib.get(bibmap['year'][0]) |
docinfo['year']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['year'][0])[0]) |
except: pass |
except: pass |
|
|
# ROC: why is this here? |
|
# logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype) |
# logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype) |
# try: |
try: |
# docinfo['lang']=getTextFromNode(dom.find(".//bib/lang")[0]) |
docinfo['lang']=getTextFromNode(dom.xpath("//bib/lang")[0]) |
# except: |
except: |
# docinfo['lang']='' |
docinfo['lang']='' |
# try: |
try: |
# docinfo['city']=getTextFromNode(dom.find(".//bib/city")[0]) |
docinfo['city']=getTextFromNode(dom.xpath("//bib/city")[0]) |
# except: |
except: |
# docinfo['city']='' |
docinfo['city']='' |
# try: |
try: |
# docinfo['number_of_pages']=getTextFromNode(dom.find(".//bib/number_of_pages")[0]) |
docinfo['number_of_pages']=getTextFromNode(dom.xpath("//bib/number_of_pages")[0]) |
# except: |
except: |
# docinfo['number_of_pages']='' |
docinfo['number_of_pages']='' |
# try: |
try: |
# docinfo['series_volume']=getTextFromNode(dom.find(".//bib/series_volume")[0]) |
docinfo['series_volume']=getTextFromNode(dom.xpath("//bib/series_volume")[0]) |
# except: |
except: |
# docinfo['series_volume']='' |
docinfo['series_volume']='' |
# try: |
try: |
# docinfo['number_of_volumes']=getTextFromNode(dom.find(".//bib/number_of_volumes")[0]) |
docinfo['number_of_volumes']=getTextFromNode(dom.xpath("//bib/number_of_volumes")[0]) |
# except: |
except: |
# docinfo['number_of_volumes']='' |
docinfo['number_of_volumes']='' |
# try: |
try: |
# docinfo['translator']=getTextFromNode(dom.find(".//bib/translator")[0]) |
docinfo['translator']=getTextFromNode(dom.xpath("//bib/translator")[0]) |
# except: |
except: |
# docinfo['translator']='' |
docinfo['translator']='' |
# try: |
try: |
# docinfo['edition']=getTextFromNode(dom.find(".//bib/edition")[0]) |
docinfo['edition']=getTextFromNode(dom.xpath("//bib/edition")[0]) |
# except: |
except: |
# docinfo['edition']='' |
docinfo['edition']='' |
# try: |
try: |
# docinfo['series_author']=getTextFromNode(dom.find(".//bib/series_author")[0]) |
docinfo['series_author']=getTextFromNode(dom.xpath("//bib/series_author")[0]) |
# except: |
except: |
# docinfo['series_author']='' |
docinfo['series_author']='' |
# try: |
try: |
# docinfo['publisher']=getTextFromNode(dom.find(".//bib/publisher")[0]) |
docinfo['publisher']=getTextFromNode(dom.xpath("//bib/publisher")[0]) |
# except: |
except: |
# docinfo['publisher']='' |
docinfo['publisher']='' |
# try: |
try: |
# docinfo['series_title']=getTextFromNode(dom.find(".//bib/series_title")[0]) |
docinfo['series_title']=getTextFromNode(dom.xpath("//bib/series_title")[0]) |
# except: |
except: |
# docinfo['series_title']='' |
docinfo['series_title']='' |
# try: |
try: |
# docinfo['isbn_issn']=getTextFromNode(dom.find(".//bib/isbn_issn")[0]) |
docinfo['isbn_issn']=getTextFromNode(dom.xpath("//bib/isbn_issn")[0]) |
# except: |
except: |
# docinfo['isbn_issn']='' |
docinfo['isbn_issn']='' |
|
#logging.debug("I NEED BIBTEX %s"%docinfo) |
return docinfo |
return docinfo |
|
|
|
|
# TODO: is this needed? |
|
def getNameFromIndexMeta(self,path,docinfo=None,dom=None,cut=0): |
def getNameFromIndexMeta(self,path,docinfo=None,dom=None,cut=0): |
"""gets name info from the index.meta file at path or given by dom""" |
"""gets name info from the index.meta file at path or given by dom""" |
if docinfo is None: |
if docinfo is None: |
Line 660 class documentViewer(Folder):
|
Line 699 class documentViewer(Folder):
|
path=getParentDir(path) |
path=getParentDir(path) |
dom = self.getDomFromIndexMeta(path) |
dom = self.getDomFromIndexMeta(path) |
|
|
docinfo['name']=getText(dom.find("name")) |
docinfo['name']=getTextFromNode(dom.xpath("/resource/name")[0]) |
logging.debug("documentViewer docinfo[name] %s"%docinfo['name']) |
logging.debug("documentViewer docinfo[name] %s"%docinfo['name']) |
return docinfo |
return docinfo |
|
|
Line 677 class documentViewer(Folder):
|
Line 716 class documentViewer(Folder):
|
archivePath = None |
archivePath = None |
archiveName = None |
archiveName = None |
|
|
archiveName = getText(dom.find("name")) |
archiveNames = dom.xpath("//resource/name") |
if not archiveName: |
if archiveNames and (len(archiveNames) > 0): |
|
archiveName = getTextFromNode(archiveNames[0]) |
|
else: |
logging.warning("documentViewer (getdocinfofromtexttool) resource/name missing in: %s" % (url)) |
logging.warning("documentViewer (getdocinfofromtexttool) resource/name missing in: %s" % (url)) |
|
|
archivePath = getText(dom.find("archive-path")) |
archivePaths = dom.xpath("//resource/archive-path") |
if archivePath: |
if archivePaths and (len(archivePaths) > 0): |
|
archivePath = getTextFromNode(archivePaths[0]) |
# clean up archive path |
# clean up archive path |
if archivePath[0] != '/': |
if archivePath[0] != '/': |
archivePath = '/' + archivePath |
archivePath = '/' + archivePath |
Line 698 class documentViewer(Folder):
|
Line 740 class documentViewer(Folder):
|
# we balk without archive-path |
# we balk without archive-path |
raise IOError("Missing archive-path (for text-tool) in %s" % (url)) |
raise IOError("Missing archive-path (for text-tool) in %s" % (url)) |
|
|
imageDir = getText(dom.find(".//texttool/image")) |
imageDirs = dom.xpath("//texttool/image") |
|
if imageDirs and (len(imageDirs) > 0): |
|
imageDir = getTextFromNode(imageDirs[0]) |
|
|
if not imageDir: |
else: |
# we balk with no image tag / not necessary anymore because textmode is now standard |
# we balk with no image tag / not necessary anymore because textmode is now standard |
#raise IOError("No text-tool info in %s"%(url)) |
#raise IOError("No text-tool info in %s"%(url)) |
imageDir = "" |
imageDir = "" |
Line 717 class documentViewer(Folder):
|
Line 761 class documentViewer(Folder):
|
|
|
docinfo['imageURL'] = self.digilibBaseUrl + "/servlet/Scaler?fn=" + imageDir |
docinfo['imageURL'] = self.digilibBaseUrl + "/servlet/Scaler?fn=" + imageDir |
|
|
viewerUrl = getText(dom.find(".//texttool/digiliburlprefix")) |
viewerUrls = dom.xpath("//texttool/digiliburlprefix") |
if viewerUrl: |
if viewerUrls and (len(viewerUrls) > 0): |
|
viewerUrl = getTextFromNode(viewerUrls[0]) |
docinfo['viewerURL'] = viewerUrl |
docinfo['viewerURL'] = viewerUrl |
|
|
# old style text URL |
# old style text URL |
textUrl = getText(dom.find(".//texttool/text")) |
textUrls = dom.xpath("//texttool/text") |
if textUrl: |
if textUrls and (len(textUrls) > 0): |
|
textUrl = getTextFromNode(textUrls[0]) |
if urlparse.urlparse(textUrl)[0] == "": #keine url |
if urlparse.urlparse(textUrl)[0] == "": #keine url |
textUrl = os.path.join(archivePath, textUrl) |
textUrl = os.path.join(archivePath, textUrl) |
# fix URLs starting with /mpiwg/online |
# fix URLs starting with /mpiwg/online |
Line 733 class documentViewer(Folder):
|
Line 779 class documentViewer(Folder):
|
docinfo['textURL'] = textUrl |
docinfo['textURL'] = textUrl |
|
|
# new style text-url-path |
# new style text-url-path |
textUrl = getText(dom.find(".//texttool/text-url-path")) |
textUrls = dom.xpath("//texttool/text-url-path") |
if textUrl: |
if textUrls and (len(textUrls) > 0): |
|
textUrl = getTextFromNode(textUrls[0]) |
docinfo['textURLPath'] = textUrl |
docinfo['textURLPath'] = textUrl |
textUrlkurz = string.split(textUrl, ".")[0] |
textUrlkurz = string.split(textUrl, ".")[0] |
docinfo['textURLPathkurz'] = textUrlkurz |
docinfo['textURLPathkurz'] = textUrlkurz |
Line 743 class documentViewer(Folder):
|
Line 790 class documentViewer(Folder):
|
#docinfo = self.getNumTextPages(docinfo) |
#docinfo = self.getNumTextPages(docinfo) |
|
|
|
|
presentationUrl = getText(dom.find(".//texttool/presentation")) |
presentationUrls = dom.xpath("//texttool/presentation") |
docinfo = self.getBibinfoFromIndexMeta(url, docinfo=docinfo, dom=dom) # get info von bib tag |
docinfo = self.getBibinfoFromIndexMeta(url, docinfo=docinfo, dom=dom) # get info von bib tag |
# TODO: is this needed here? |
#docinfo = self.getDownloadfromDocinfoToBibtex(url, docinfo=docinfo, dom=dom) |
docinfo = self.getNameFromIndexMeta(url, docinfo=docinfo, dom=dom) |
docinfo = self.getNameFromIndexMeta(url, docinfo=docinfo, dom=dom) |
|
|
|
|
if presentationUrl: # ueberschreibe diese durch presentation informationen |
if presentationUrls and (len(presentationUrls) > 0): # ueberschreibe diese durch presentation informationen |
# presentation url ergiebt sich ersetzen von index.meta in der url der fuer die Metadaten |
# presentation url ergiebt sich ersetzen von index.meta in der url der fuer die Metadaten |
# durch den relativen Pfad auf die presentation infos |
# durch den relativen Pfad auf die presentation infos |
presentationPath = presentationUrl |
presentationPath = getTextFromNode(presentationUrls[0]) |
if url.endswith("index.meta"): |
if url.endswith("index.meta"): |
presentationUrl = url.replace('index.meta', presentationPath) |
presentationUrl = url.replace('index.meta', presentationPath) |
else: |
else: |
Line 769 class documentViewer(Folder):
|
Line 816 class documentViewer(Folder):
|
"""gets the bibliographical information from the preseantion entry in texttools |
"""gets the bibliographical information from the preseantion entry in texttools |
""" |
""" |
dom=self.getPresentationInfoXML(url) |
dom=self.getPresentationInfoXML(url) |
docinfo['author']=getText(dom.find(".//author")) |
try: |
docinfo['title']=getText(dom.find(".//title")) |
docinfo['author']=getTextFromNode(dom.xpath("//author")[0]) |
docinfo['year']=getText(dom.find(".//date")) |
except: |
|
pass |
|
try: |
|
docinfo['title']=getTextFromNode(dom.xpath("//title")[0]) |
|
except: |
|
pass |
|
try: |
|
docinfo['year']=getTextFromNode(dom.xpath("//date")[0]) |
|
except: |
|
pass |
return docinfo |
return docinfo |
|
|
def getDocinfoFromImagePath(self,path,docinfo=None,cut=0): |
def getDocinfoFromImagePath(self,path,docinfo=None,cut=0): |
Line 792 class documentViewer(Folder):
|
Line 848 class documentViewer(Folder):
|
|
|
#path ist the path to the images it assumes that the index.meta file is one level higher. |
#path ist the path to the images it assumes that the index.meta file is one level higher. |
docinfo = self.getBibinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1) |
docinfo = self.getBibinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1) |
|
#docinfo = self.getDownloadfromDocinfoToBibtex(pathorig,docinfo=docinfo,cut=cut+1) |
docinfo = self.getAuthinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1) |
docinfo = self.getAuthinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1) |
return docinfo |
return docinfo |
|
|
Line 804 class documentViewer(Folder):
|
Line 861 class documentViewer(Folder):
|
docinfo = self.REQUEST.SESSION['docinfo'] |
docinfo = self.REQUEST.SESSION['docinfo'] |
# check if its still current |
# check if its still current |
if docinfo is not None and docinfo.get('mode') == mode and docinfo.get('url') == url: |
if docinfo is not None and docinfo.get('mode') == mode and docinfo.get('url') == url: |
logging.debug("documentViewer (getdocinfo) docinfo in session. keys=%s"%docinfo.keys()) |
logging.debug("documentViewer (getdocinfo) docinfo in session: %s"%docinfo) |
return docinfo |
return docinfo |
|
|
# new docinfo |
# new docinfo |
docinfo = {'mode': mode, 'url': url} |
docinfo = {'mode': mode, 'url': url} |
# add self url |
if mode=="texttool": #index.meta with texttool information |
docinfo['viewerUrl'] = self.getDocumentViewerURL() |
|
if mode=="texttool": |
|
# index.meta with texttool information |
|
docinfo = self.getDocinfoFromTextTool(url, docinfo=docinfo) |
docinfo = self.getDocinfoFromTextTool(url, docinfo=docinfo) |
elif mode=="imagepath": |
elif mode=="imagepath": |
# folder with images, index.meta optional |
|
docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo) |
docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo) |
elif mode=="filepath": |
elif mode=="filepath": |
# filename |
|
docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo,cut=1) |
docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo,cut=1) |
else: |
else: |
logging.error("documentViewer (getdocinfo) unknown mode: %s!"%mode) |
logging.error("documentViewer (getdocinfo) unknown mode: %s!"%mode) |
Line 828 class documentViewer(Folder):
|
Line 879 class documentViewer(Folder):
|
if not docinfo.has_key('textURLPath'): |
if not docinfo.has_key('textURLPath'): |
docinfo['textURLPath'] = None |
docinfo['textURLPath'] = None |
|
|
logging.debug("documentViewer (getdocinfo) docinfo: keys=%s"%docinfo.keys()) |
logging.debug("documentViewer (getdocinfo) docinfo: %s"%docinfo) |
#logging.debug("documentViewer (getdocinfo) docinfo: %s"%docinfo) |
#logging.debug("documentViewer (getdocinfo) docinfo: %s"%) |
self.REQUEST.SESSION['docinfo'] = docinfo |
self.REQUEST.SESSION['docinfo'] = docinfo |
return docinfo |
return docinfo |
|
|
Line 864 class documentViewer(Folder):
|
Line 915 class documentViewer(Folder):
|
pageinfo['querySearch'] =self.REQUEST.get('querySearch', 'fulltext') |
pageinfo['querySearch'] =self.REQUEST.get('querySearch', 'fulltext') |
pageinfo['textPN'] = self.REQUEST.get('textPN','1') |
pageinfo['textPN'] = self.REQUEST.get('textPN','1') |
pageinfo['highlightQuery'] = self.REQUEST.get('highlightQuery','') |
pageinfo['highlightQuery'] = self.REQUEST.get('highlightQuery','') |
|
|
|
pageinfo ['highlightElementPos'] = self.REQUEST.get('highlightElementPos','') |
|
pageinfo ['highlightElement'] = self.REQUEST.get('highlightElement','') |
|
|
|
|
pageinfo['tocPageSize'] = self.REQUEST.get('tocPageSize', '30') |
pageinfo['tocPageSize'] = self.REQUEST.get('tocPageSize', '30') |
pageinfo['queryPageSize'] =self.REQUEST.get('queryPageSize', '10') |
pageinfo['queryPageSize'] =self.REQUEST.get('queryPageSize', '10') |
pageinfo['tocPN'] = self.REQUEST.get('tocPN', '1') |
pageinfo['tocPN'] = self.REQUEST.get('tocPN', '1') |
Line 880 class documentViewer(Folder):
|
Line 936 class documentViewer(Folder):
|
tocPages=tocSize/tocPageSize |
tocPages=tocSize/tocPageSize |
pageinfo['tocPN'] = min (tocPages,toc) |
pageinfo['tocPN'] = min (tocPages,toc) |
pageinfo['searchPN'] =self.REQUEST.get('searchPN','1') |
pageinfo['searchPN'] =self.REQUEST.get('searchPN','1') |
pageinfo['sn'] =self.REQUEST.get('sn','') |
#pageinfo['sn'] =self.REQUEST.get('sn','') |
|
pageinfo['s'] =self.REQUEST.get('s','') |
return pageinfo |
return pageinfo |
|
|
def changeDocumentViewer(self,title="",digilibBaseUrl=None,thumbrows=2,thumbcols=5,authgroups='mpiwg',RESPONSE=None): |
def changeDocumentViewer(self,title="",digilibBaseUrl=None,thumbrows=2,thumbcols=5,authgroups='mpiwg',RESPONSE=None): |