version 1.112, 2010/10/08 13:49:44
|
version 1.175.2.30, 2011/08/23 13:05:11
|
Line 1
|
Line 1
|
|
|
from OFS.Folder import Folder |
from OFS.Folder import Folder |
from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate |
from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate |
from Products.PageTemplates.PageTemplateFile import PageTemplateFile |
from Products.PageTemplates.PageTemplateFile import PageTemplateFile |
|
from App.ImageFile import ImageFile |
|
#from Products.ZSimpleFile.ZSimpleFile import ZSimpleFile |
from AccessControl import ClassSecurityInfo |
from AccessControl import ClassSecurityInfo |
from AccessControl import getSecurityManager |
from AccessControl import getSecurityManager |
from Globals import package_home |
from Globals import package_home |
|
|
from Ft.Xml import EMPTY_NAMESPACE, Parse |
#from Ft.Xml import EMPTY_NAMESPACE, Parse |
import Ft.Xml.Domlette |
#import Ft.Xml.Domlette |
|
|
|
import xml.etree.ElementTree as ET |
|
|
import os.path |
import os.path |
import sys |
import sys |
import urllib |
import urllib |
import urllib2 |
|
import logging |
import logging |
import math |
import math |
import urlparse |
import urlparse |
import cStringIO |
|
import re |
import re |
|
import string |
|
|
|
from SrvTxtUtils import getInt, getText, getHttpData |
|
|
def logger(txt,method,txt2): |
def logger(txt,method,txt2): |
"""logging""" |
"""logging""" |
logging.info(txt+ txt2) |
logging.info(txt+ txt2) |
|
|
|
|
def getInt(number, default=0): |
def serializeNode(node, encoding="utf-8"): |
"""returns always an int (0 in case of problems)""" |
|
try: |
|
return int(number) |
|
except: |
|
return int(default) |
|
|
|
def getTextFromNode(nodename): |
|
"""get the cdata content of a node""" |
|
if nodename is None: |
|
return "" |
|
nodelist=nodename.childNodes |
|
rc = "" |
|
for node in nodelist: |
|
if node.nodeType == node.TEXT_NODE: |
|
rc = rc + node.data |
|
return rc |
|
|
|
def serializeNode(node, encoding='utf-8'): |
|
"""returns a string containing node as XML""" |
"""returns a string containing node as XML""" |
buf = cStringIO.StringIO() |
s = ET.tostring(node) |
Ft.Xml.Domlette.Print(node, stream=buf, encoding=encoding) |
|
s = buf.getvalue() |
|
buf.close() |
|
return s |
|
|
|
|
# 4Suite: |
|
# stream = cStringIO.StringIO() |
|
# Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding) |
|
# s = stream.getvalue() |
|
# stream.close() |
|
return s |
|
|
def getParentDir(path): |
def browserCheck(self): |
"""returns pathname shortened by one""" |
"""check the browsers request to find out the browser type""" |
return '/'.join(path.split('/')[0:-1]) |
bt = {} |
|
ua = self.REQUEST.get_header("HTTP_USER_AGENT") |
|
bt['ua'] = ua |
def getHttpData(url, data=None, num_tries=3, timeout=10): |
bt['isIE'] = False |
"""returns result from url+data HTTP request""" |
bt['isN4'] = False |
# we do GET (by appending data to url) |
bt['versFirefox']="" |
if isinstance(data, str) or isinstance(data, unicode): |
bt['versIE']="" |
# if data is string then append |
bt['versSafariChrome']="" |
url = "%s?%s"%(url,data) |
bt['versOpera']="" |
elif isinstance(data, dict) or isinstance(data, list) or isinstance(data, tuple): |
|
# urlencode |
if string.find(ua, 'MSIE') > -1: |
url = "%s?%s"%(url,urllib.urlencode(data)) |
bt['isIE'] = True |
|
else: |
response = None |
bt['isN4'] = (string.find(ua, 'Mozilla/4.') > -1) |
errmsg = None |
# Safari oder Chrome identification |
for cnt in range(num_tries): |
try: |
|
nav = ua[string.find(ua, '('):] |
|
nav1=ua[string.find(ua,')'):] |
|
nav2=nav1[string.find(nav1,'('):] |
|
nav3=nav2[string.find(nav2,')'):] |
|
ie = string.split(nav, "; ")[1] |
|
ie1 =string.split(nav1, " ")[2] |
|
ie2 =string.split(nav3, " ")[1] |
|
ie3 =string.split(nav3, " ")[2] |
|
if string.find(ie3, "Safari") >-1: |
|
bt['versSafariChrome']=string.split(ie2, "/")[1] |
|
except: pass |
|
# IE identification |
try: |
try: |
logging.debug("getHttpData(#%s %ss) url=%s"%(cnt+1,timeout,url)) |
nav = ua[string.find(ua, '('):] |
if sys.version_info < (2, 6): |
ie = string.split(nav, "; ")[1] |
# set timeout on socket -- ugly :-( |
if string.find(ie, "MSIE") > -1: |
import socket |
bt['versIE'] = string.split(ie, " ")[1] |
socket.setdefaulttimeout(float(timeout)) |
except:pass |
response = urllib2.urlopen(url) |
# Firefox identification |
else: |
try: |
response = urllib2.urlopen(url,timeout=float(timeout)) |
nav = ua[string.find(ua, '('):] |
# check result? |
nav1=ua[string.find(ua,')'):] |
break |
if string.find(ie1, "Firefox") >-1: |
except urllib2.HTTPError, e: |
nav5= string.split(ie1, "/")[1] |
logging.error("getHttpData: HTTP error(%s): %s"%(e.code,e)) |
logging.debug("FIREFOX: %s"%(nav5)) |
errmsg = str(e) |
bt['versFirefox']=nav5[0:3] |
# stop trying |
except:pass |
break |
#Opera identification |
except urllib2.URLError, e: |
try: |
logging.error("getHttpData: URLLIB error(%s): %s"%(e.reason,e)) |
if string.find(ua,"Opera") >-1: |
errmsg = str(e) |
nav = ua[string.find(ua, '('):] |
# stop trying |
nav1=nav[string.find(nav,')'):] |
#break |
bt['versOpera']=string.split(nav1,"/")[2] |
|
except:pass |
if response is not None: |
|
data = response.read() |
|
response.close() |
|
return data |
|
|
|
raise IOError("ERROR fetching HTTP data from %s: %s"%(url,errmsg)) |
|
#return None |
|
|
|
|
bt['isMac'] = string.find(ua, 'Macintosh') > -1 |
|
bt['isWin'] = string.find(ua, 'Windows') > -1 |
|
bt['isIEWin'] = bt['isIE'] and bt['isWin'] |
|
bt['isIEMac'] = bt['isIE'] and bt['isMac'] |
|
bt['staticHTML'] = False |
|
|
|
return bt |
|
|
|
def getParentPath(path, cnt=1): |
|
"""returns pathname shortened by cnt""" |
|
# make sure path doesn't end with / |
|
path = path.rstrip('/') |
|
# split by /, shorten, and reassemble |
|
return '/'.join(path.split('/')[0:-cnt]) |
|
|
|
|
## |
## |
Line 112 class documentViewer(Folder):
|
Line 120 class documentViewer(Folder):
|
{'label':'main config','action':'changeDocumentViewerForm'}, |
{'label':'main config','action':'changeDocumentViewerForm'}, |
) |
) |
|
|
|
metadataService = None |
|
"""MetaDataFolder instance""" |
|
|
# templates and forms |
# templates and forms |
|
viewer_text = PageTemplateFile('zpt/viewer_text', globals()) |
|
viewer_images = PageTemplateFile('zpt/viewer_images', globals()) |
viewer_main = PageTemplateFile('zpt/viewer_main', globals()) |
viewer_main = PageTemplateFile('zpt/viewer_main', globals()) |
toc_thumbs = PageTemplateFile('zpt/toc_thumbs', globals()) |
toc_thumbs = PageTemplateFile('zpt/toc_thumbs', globals()) |
toc_text = PageTemplateFile('zpt/toc_text', globals()) |
toc_text = PageTemplateFile('zpt/toc_text', globals()) |
toc_figures = PageTemplateFile('zpt/toc_figures', globals()) |
toc_figures = PageTemplateFile('zpt/toc_figures', globals()) |
page_main_images = PageTemplateFile('zpt/page_main_images', globals()) |
page_main_images = PageTemplateFile('zpt/page_main_images', globals()) |
|
page_main_double = PageTemplateFile('zpt/page_main_double', globals()) |
page_main_text = PageTemplateFile('zpt/page_main_text', globals()) |
page_main_text = PageTemplateFile('zpt/page_main_text', globals()) |
page_main_text_dict = PageTemplateFile('zpt/page_main_text_dict', globals()) |
page_main_text_dict = PageTemplateFile('zpt/page_main_text_dict', globals()) |
page_main_gis =PageTemplateFile ('zpt/page_main_gis', globals()) |
page_main_gis =PageTemplateFile ('zpt/page_main_gis', globals()) |
page_main_xml = PageTemplateFile('zpt/page_main_xml', globals()) |
page_main_xml = PageTemplateFile('zpt/page_main_xml', globals()) |
|
page_main_pureXml = PageTemplateFile('zpt/page_main_pureXml', globals()) |
head_main = PageTemplateFile('zpt/head_main', globals()) |
head_main = PageTemplateFile('zpt/head_main', globals()) |
docuviewer_css = PageTemplateFile('css/docuviewer.css', globals()) |
|
info_xml = PageTemplateFile('zpt/info_xml', globals()) |
info_xml = PageTemplateFile('zpt/info_xml', globals()) |
|
# TODO: can this be nicer? |
|
docuviewer_css = ImageFile('css/docuviewer.css',globals()) |
|
|
|
|
thumbs_main_rss = PageTemplateFile('zpt/thumbs_main_rss', globals()) |
thumbs_main_rss = PageTemplateFile('zpt/thumbs_main_rss', globals()) |
security.declareProtected('View management screens','changeDocumentViewerForm') |
|
changeDocumentViewerForm = PageTemplateFile('zpt/changeDocumentViewer', globals()) |
|
|
|
|
|
def __init__(self,id,imageScalerUrl=None,textServerName=None,title="",digilibBaseUrl=None,thumbcols=2,thumbrows=5,authgroups="mpiwg"): |
def __init__(self,id,imageScalerUrl=None,textServerName=None,title="",digilibBaseUrl=None,thumbcols=2,thumbrows=5,authgroups="mpiwg"): |
Line 152 class documentViewer(Folder):
|
Line 166 class documentViewer(Folder):
|
templateFolder._setObject('fulltextclient',textServer) |
templateFolder._setObject('fulltextclient',textServer) |
except Exception, e: |
except Exception, e: |
logging.error("Unable to create MpdlXmlTextServer for fulltextclient: "+str(e)) |
logging.error("Unable to create MpdlXmlTextServer for fulltextclient: "+str(e)) |
|
|
try: |
try: |
from Products.zogiLib.zogiLib import zogiLib |
from Products.zogiLib.zogiLib import zogiLib |
zogilib = zogiLib(id="zogilib", title="zogilib for docuviewer", dlServerURL=imageScalerUrl, layout="book") |
zogilib = zogiLib(id="zogilib", title="zogilib for docuviewer", dlServerURL=imageScalerUrl, layout="book") |
Line 160 class documentViewer(Folder):
|
Line 175 class documentViewer(Folder):
|
except Exception, e: |
except Exception, e: |
logging.error("Unable to create zogiLib for zogilib: "+str(e)) |
logging.error("Unable to create zogiLib for zogilib: "+str(e)) |
|
|
|
try: |
|
# assume MetaDataFolder instance is called metadata |
|
self.metadataService = getattr(self, 'metadata') |
|
except Exception, e: |
|
logging.error("Unable to find MetaDataFolder 'metadata': "+str(e)) |
|
|
|
if digilibBaseUrl is not None: |
|
self.digilibBaseUrl = digilibBaseUrl |
|
|
|
|
# proxy text server methods to fulltextclient |
# proxy text server methods to fulltextclient |
def getTextPage(self, **args): |
def getTextPage(self, **args): |
"""get page""" |
"""get page""" |
return self.template.fulltextclient.getTextPage(**args) |
return self.template.fulltextclient.getTextPage(**args) |
|
|
|
def getOrigPages(self, **args): |
|
"""get page""" |
|
return self.template.fulltextclient.getOrigPages(**args) |
|
|
|
def getOrigPagesNorm(self, **args): |
|
"""get page""" |
|
return self.template.fulltextclient.getOrigPagesNorm(**args) |
|
|
def getQuery(self, **args): |
def getQuery(self, **args): |
"""get query""" |
"""get query in search""" |
return self.template.fulltextclient.getQuery(**args) |
return self.template.fulltextclient.getQuery(**args) |
|
|
def getSearch(self, **args): |
def getSearch(self, **args): |
"""get search""" |
"""get search""" |
return self.template.fulltextclient.getSearch(**args) |
return self.template.fulltextclient.getSearch(**args) |
|
|
def getNumPages(self, docinfo): |
def getGisPlaces(self, **args): |
"""get numpages""" |
"""get gis places""" |
return self.template.fulltextclient.getNumPages(docinfo) |
return self.template.fulltextclient.getGisPlaces(**args) |
|
|
def getNumTextPages(self, docinfo): |
def getAllGisPlaces(self, **args): |
"""get numpages text""" |
"""get all gis places """ |
return self.template.fulltextclient.getNumTextPages(docinfo) |
return self.template.fulltextclient.getAllGisPlaces(**args) |
|
|
def getTranslate(self, **args): |
def getWordInfo(self, **args): |
"""get translate""" |
"""get translate""" |
return self.template.fulltextclient.getTranslate(**args) |
return self.template.fulltextclient.getWordInfo(**args) |
|
|
def getLemma(self, **args): |
def getLemma(self, **args): |
"""get lemma""" |
"""get lemma""" |
return self.template.fulltextclient.getLemma(**args) |
return self.template.fulltextclient.getLemma(**args) |
|
|
|
def getLemmaQuery(self, **args): |
|
"""get query""" |
|
return self.template.fulltextclient.getLemmaQuery(**args) |
|
|
|
def getLex(self, **args): |
|
"""get lex""" |
|
return self.template.fulltextclient.getLex(**args) |
|
|
def getToc(self, **args): |
def getToc(self, **args): |
"""get toc""" |
"""get toc""" |
return self.template.fulltextclient.getToc(**args) |
return self.template.fulltextclient.getToc(**args) |
Line 209 class documentViewer(Folder):
|
Line 249 class documentViewer(Folder):
|
|
|
''' |
''' |
logging.debug("HHHHHHHHHHHHHH:load the rss") |
logging.debug("HHHHHHHHHHHHHH:load the rss") |
logger("documentViewer (index)", logging.INFO, "mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn)) |
logging.debug("documentViewer (index) mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn)) |
|
|
if not hasattr(self, 'template'): |
if not hasattr(self, 'template'): |
# create template folder if it doesn't exist |
# create template folder if it doesn't exist |
Line 219 class documentViewer(Folder):
|
Line 259 class documentViewer(Folder):
|
self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary" |
self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary" |
|
|
docinfo = self.getDocinfo(mode=mode,url=url) |
docinfo = self.getDocinfo(mode=mode,url=url) |
|
#pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo) |
pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo) |
pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo) |
|
''' ZDES ''' |
pt = getattr(self.template, 'thumbs_main_rss') |
pt = getattr(self.template, 'thumbs_main_rss') |
|
|
if viewMode=="auto": # automodus gewaehlt |
if viewMode=="auto": # automodus gewaehlt |
if docinfo.has_key("textURL") or docinfo.has_key('textURLPath'): #texturl gesetzt und textViewer konfiguriert |
if docinfo.has_key("textURL") or docinfo.get('textURLPath',None): #texturl gesetzt und textViewer konfiguriert |
viewMode="text" |
viewMode="text" |
else: |
else: |
viewMode="images" |
viewMode="images" |
|
|
return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode) |
return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode) |
|
|
|
|
security.declareProtected('View','index_html') |
security.declareProtected('View','index_html') |
def index_html(self,url,mode="texttool",viewMode="auto",tocMode="thumbs",start=None,pn=1,mk=None, query=None, querySearch=None, characterNormalization=""): |
def index_html(self,url,mode="texttool",viewMode="auto",viewType=None,tocMode="thumbs",start=1,pn=1): |
''' |
""" |
view it |
view page |
@param mode: defines how to access the document behind url |
|
@param url: url which contains display information |
@param url: url which contains display information |
@param viewMode: if images display images, if text display text, default is auto (text,images or auto) |
@param mode: defines how to access the document behind url |
|
@param viewMode: 'images': display images, 'text': display text, default is 'auto' |
|
@param viewType: sub-type of viewMode, e.g. 'dict' for viewMode='text' |
@param tocMode: type of 'table of contents' for navigation (thumbs, text, figures, none) |
@param tocMode: type of 'table of contents' for navigation (thumbs, text, figures, none) |
@param characterNormalization type of text display (reg, norm, none) |
""" |
@param querySearch: type of different search modes (fulltext, fulltextMorph, xpath, xquery, ftIndex, ftIndexMorph, fulltextMorphLemma) |
|
''' |
|
|
|
logging.debug("documentViewer (index) mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn)) |
logging.debug("documentViewer(index_html) mode=%s url=%s viewMode=%s viewType=%s start=%s pn=%s"%(mode,url,viewMode,viewType,start,pn)) |
|
|
if not hasattr(self, 'template'): |
if not hasattr(self, 'template'): |
# this won't work |
# this won't work |
Line 258 class documentViewer(Folder):
|
Line 300 class documentViewer(Folder):
|
# get table of contents |
# get table of contents |
docinfo = self.getToc(mode=tocMode, docinfo=docinfo) |
docinfo = self.getToc(mode=tocMode, docinfo=docinfo) |
|
|
if viewMode=="auto": # automodus gewaehlt |
# auto viewMode: text if there is a text else images |
if docinfo.has_key('textURL') or docinfo.has_key('textURLPath'): #texturl gesetzt und textViewer konfiguriert |
if viewMode=="auto": |
viewMode="text_dict" |
if docinfo.get('textURL', None) or docinfo.get('textURLPath', None): |
|
viewMode = "text" |
|
viewType = "dict" |
else: |
else: |
viewMode="images" |
viewMode="images" |
|
|
pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo,viewMode=viewMode,tocMode=tocMode) |
elif viewMode == "text_dict": |
|
# legacy fix |
|
viewMode = "text" |
|
viewType = "dict" |
|
|
|
# stringify viewType |
|
if isinstance(viewType, list): |
|
logging.debug("index_html: viewType is list:%s"%viewType) |
|
viewType = ','.join([t for t in viewType if t]) |
|
|
|
pageinfo = self.getPageinfo(start=start, current=pn, docinfo=docinfo, viewMode=viewMode, viewType=viewType, tocMode=tocMode) |
|
|
|
# get template /template/viewer_$viewMode |
|
pt = getattr(self.template, 'viewer_%s'%viewMode, None) |
|
if pt is None: |
|
logging.error("No template for viewMode=%s!"%viewMode) |
|
# TODO: error page? |
|
return "No template for viewMode=%s!"%viewMode |
|
|
pt = getattr(self.template, 'viewer_main') |
# and execute with parameters |
return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode,mk=self.generateMarks(mk)) |
return pt(docinfo=docinfo, pageinfo=pageinfo) |
|
|
def generateMarks(self,mk): |
def generateMarks(self,mk): |
ret="" |
ret="" |
Line 280 class documentViewer(Folder):
|
Line 341 class documentViewer(Folder):
|
return ret |
return ret |
|
|
|
|
|
def getBrowser(self): |
|
"""getBrowser the version of browser """ |
|
bt = browserCheck(self) |
|
logging.debug("BROWSER VERSION: %s"%(bt)) |
|
return bt |
|
|
def findDigilibUrl(self): |
def findDigilibUrl(self): |
"""try to get the digilib URL from zogilib""" |
"""try to get the digilib URL from zogilib""" |
url = self.template.zogilib.getDLBaseUrl() |
url = self.template.zogilib.getDLBaseUrl() |
return url |
return url |
|
|
|
def getScalerUrl(self, fn=None, pn=None, dw=100, dh=100, docinfo=None): |
|
"""returns URL to digilib Scaler with params""" |
|
url = None |
|
if docinfo is not None: |
|
url = docinfo.get('imageURL', None) |
|
|
|
if url is None: |
|
url = "%s/servlet/Scaler?"%self.digilibBaseUrl |
|
if fn is None and docinfo is not None: |
|
fn = docinfo.get('imagePath','') |
|
|
|
url += "fn=%s"%fn |
|
|
|
if pn: |
|
url += "&pn=%s"%pn |
|
|
|
url += "&dw=%s&dh=%s"%(dw,dh) |
|
return url |
|
|
def getDocumentViewerURL(self): |
def getDocumentViewerURL(self): |
"""returns the URL of this instance""" |
"""returns the URL of this instance""" |
return self.absolute_url() |
return self.absolute_url() |
|
|
def getStyle(self, idx, selected, style=""): |
def getStyle(self, idx, selected, style=""): |
"""returns a string with the given style and append 'sel' if path == selected.""" |
"""returns a string with the given style and append 'sel' if idx == selected.""" |
#logger("documentViewer (getstyle)", logging.INFO, "idx: %s selected: %s style: %s"%(idx,selected,style)) |
#logger("documentViewer (getstyle)", logging.INFO, "idx: %s selected: %s style: %s"%(idx,selected,style)) |
if idx == selected: |
if idx == selected: |
return style + 'sel' |
return style + 'sel' |
else: |
else: |
return style |
return style |
|
|
def getLink(self,param=None,val=None): |
def getParams(self, param=None, val=None, params=None, duplicates=None): |
"""link to documentviewer with parameter param set to val""" |
"""returns dict with URL parameters. |
params=self.REQUEST.form.copy() |
|
|
Takes URL parameters and additionally param=val or dict params. |
|
Deletes key if value is None.""" |
|
# copy existing request params |
|
newParams=self.REQUEST.form.copy() |
|
# change single param |
if param is not None: |
if param is not None: |
if val is None: |
if val is None: |
if params.has_key(param): |
if newParams.has_key(param): |
del params[param] |
del newParams[param] |
else: |
else: |
params[param] = str(val) |
newParams[param] = str(val) |
|
|
if params.get("mode", None) == "filepath": #wenn beim erst Aufruf filepath gesetzt wurde aendere das nun zu imagepath |
# change more params |
params["mode"] = "imagepath" |
if params is not None: |
params["url"] = getParentDir(params["url"]) |
for (k, v) in params.items(): |
|
if v is None: |
# quote values and assemble into query string |
# val=None removes param |
#ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()]) |
if newParams.has_key(k): |
ps = urllib.urlencode(params) |
del newParams[k] |
url=self.REQUEST['URL1']+"?"+ps |
|
|
else: |
|
newParams[k] = v |
|
|
|
if duplicates: |
|
# eliminate lists (coming from duplicate keys) |
|
for (k,v) in newParams.items(): |
|
if isinstance(v, list): |
|
if duplicates == 'comma': |
|
# make comma-separated list of non-empty entries |
|
newParams[k] = ','.join([t for t in v if t]) |
|
elif duplicates == 'first': |
|
# take first non-empty entry |
|
newParams[k] = [t for t in v if t][0] |
|
|
|
return newParams |
|
|
|
def getLink(self, param=None, val=None, params=None, baseUrl=None, paramSep='&', duplicates='comma'): |
|
"""returns URL to documentviewer with parameter param set to val or from dict params""" |
|
urlParams = self.getParams(param=param, val=val, params=params, duplicates=duplicates) |
|
# quote values and assemble into query string (not escaping '/') |
|
ps = paramSep.join(["%s=%s"%(k,urllib.quote_plus(unicode(v),'/')) for (k, v) in urlParams.items()]) |
|
if baseUrl is None: |
|
baseUrl = self.getDocumentViewerURL() |
|
|
|
url = "%s?%s"%(baseUrl, ps) |
return url |
return url |
|
|
def getLinkAmp(self,param=None,val=None): |
def getLinkAmp(self, param=None, val=None, params=None, baseUrl=None, duplicates='comma'): |
"""link to documentviewer with parameter param set to val""" |
"""link to documentviewer with parameter param set to val""" |
params=self.REQUEST.form.copy() |
return self.getLink(param=param, val=val, params=params, baseUrl=baseUrl, paramSep='&', duplicates=duplicates) |
if param is not None: |
|
if val is None: |
|
if params.has_key(param): |
|
del params[param] |
|
else: |
|
params[param] = str(val) |
|
|
|
# quote values and assemble into query string |
|
logging.debug("XYXXXXX: %s"%repr(params.items())) |
|
ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()]) |
|
url=self.REQUEST['URL1']+"?"+ps |
|
return url |
|
|
|
def getInfo_xml(self,url,mode): |
def getInfo_xml(self,url,mode): |
"""returns info about the document as XML""" |
"""returns info about the document as XML""" |
|
|
if not self.digilibBaseUrl: |
if not self.digilibBaseUrl: |
self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary" |
self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary" |
|
|
Line 343 class documentViewer(Folder):
|
Line 447 class documentViewer(Folder):
|
pt = getattr(self.template, 'info_xml') |
pt = getattr(self.template, 'info_xml') |
return pt(docinfo=docinfo) |
return pt(docinfo=docinfo) |
|
|
|
|
def isAccessible(self, docinfo): |
def isAccessible(self, docinfo): |
"""returns if access to the resource is granted""" |
"""returns if access to the resource is granted""" |
access = docinfo.get('accessType', None) |
access = docinfo.get('accessType', None) |
logging.debug("documentViewer (accessOK) access type %s"%access) |
logging.debug("documentViewer (accessOK) access type %s"%access) |
if access is not None and access == 'free': |
if access == 'free': |
logging.debug("documentViewer (accessOK) access is free") |
logging.debug("documentViewer (accessOK) access is free") |
return True |
return True |
|
|
elif access is None or access in self.authgroups: |
elif access is None or access in self.authgroups: |
# only local access -- only logged in users |
# only local access -- only logged in users |
user = getSecurityManager().getUser() |
user = getSecurityManager().getUser() |
Line 365 class documentViewer(Folder):
|
Line 469 class documentViewer(Folder):
|
return False |
return False |
|
|
|
|
def getDirinfoFromDigilib(self,path,docinfo=None,cut=0): |
|
"""gibt param von dlInfo aus""" |
|
if docinfo is None: |
|
docinfo = {} |
|
|
|
for x in range(cut): |
def getDocinfo(self, mode, url): |
|
"""returns docinfo depending on mode""" |
path=getParentDir(path) |
logging.debug("getDocinfo: mode=%s, url=%s"%(mode,url)) |
|
# look for cached docinfo in session |
infoUrl=self.digilibBaseUrl+"/dirInfo-xml.jsp?mo=dir&fn="+path |
if self.REQUEST.SESSION.has_key('docinfo'): |
|
docinfo = self.REQUEST.SESSION['docinfo'] |
|
# check if its still current |
|
if docinfo is not None and docinfo.get('mode', None) == mode and docinfo.get('url', None) == url: |
|
logging.debug("getDocinfo: docinfo in session. keys=%s"%docinfo.keys()) |
|
return docinfo |
|
|
logging.debug("documentViewer (getparamfromdigilib) dirInfo from %s"%(infoUrl)) |
# new docinfo |
|
docinfo = {'mode': mode, 'url': url} |
|
# add self url |
|
docinfo['viewerUrl'] = self.getDocumentViewerURL() |
|
docinfo['digilibBaseUrl'] = self.digilibBaseUrl |
|
# get index.meta DOM |
|
docUrl = None |
|
metaDom = None |
|
if mode=="texttool": |
|
# url points to document dir or index.meta |
|
metaDom = self.metadataService.getDomFromPathOrUrl(url) |
|
docUrl = url.replace('/index.meta', '') |
|
if metaDom is None: |
|
raise IOError("Unable to find index.meta for mode=texttool!") |
|
|
txt = getHttpData(infoUrl) |
elif mode=="imagepath": |
if txt is None: |
# url points to folder with images, index.meta optional |
raise IOError("Unable to get dir-info from %s"%(infoUrl)) |
# asssume index.meta in parent dir |
|
docUrl = getParentPath(url) |
|
metaDom = self.metadataService.getDomFromPathOrUrl(docUrl) |
|
|
dom = Parse(txt) |
elif mode=="filepath": |
sizes=dom.xpath("//dir/size") |
# url points to image file, index.meta optional |
logging.debug("documentViewer (getparamfromdigilib) dirInfo:size"%sizes) |
# asssume index.meta is two path segments up |
|
docUrl = getParentPath(url, 2) |
|
metaDom = self.metadataService.getDomFromPathOrUrl(docUrl) |
|
|
if sizes: |
|
docinfo['numPages'] = int(getTextFromNode(sizes[0])) |
|
else: |
else: |
docinfo['numPages'] = 0 |
logging.error("documentViewer (getdocinfo) unknown mode: %s!"%mode) |
|
raise ValueError("Unknown mode %s! Has to be one of 'texttool','imagepath','filepath'."%(mode)) |
# TODO: produce and keep list of image names and numbers |
|
|
|
|
docinfo['documentUrl'] = docUrl |
|
# process index.meta contents |
|
if metaDom is not None and metaDom.tag == 'resource': |
|
# document directory name and path |
|
resource = self.metadataService.getResourceData(dom=metaDom) |
|
if resource: |
|
docinfo = self.getDocinfoFromResource(docinfo, resource) |
|
|
|
# texttool info |
|
texttool = self.metadataService.getTexttoolData(dom=metaDom) |
|
if texttool: |
|
docinfo = self.getDocinfoFromTexttool(docinfo, texttool) |
|
|
|
# bib info |
|
bib = self.metadataService.getBibData(dom=metaDom) |
|
if bib: |
|
docinfo = self.getDocinfoFromBib(docinfo, bib) |
|
else: |
|
# no bib - try info.xml |
|
docinfo = self.getDocinfoFromPresentationInfoXml(docinfo) |
|
|
|
# auth info |
|
access = self.metadataService.getAccessData(dom=metaDom) |
|
if access: |
|
docinfo = self.getDocinfoFromAccess(docinfo, access) |
|
|
|
# attribution info |
|
attribution = self.metadataService.getAttributionData(dom=metaDom) |
|
if attribution: |
|
logging.debug("getDocinfo: attribution=%s"%repr(attribution)) |
|
docinfo['attribution'] = attribution |
|
#docinfo = self.getDocinfoFromAccess(docinfo, access) |
|
|
|
# copyright info |
|
copyright = self.metadataService.getCopyrightData(dom=metaDom) |
|
if copyright: |
|
logging.debug("getDocinfo: copyright=%s"%repr(copyright)) |
|
docinfo['copyright'] = copyright |
|
#docinfo = self.getDocinfoFromAccess(docinfo, access) |
|
|
|
# image path |
|
if mode != 'texttool': |
|
# override image path from texttool with url |
|
docinfo['imagePath'] = url.replace('/mpiwg/online/', '', 1) |
|
|
|
# number of images from digilib |
|
if docinfo.get('imagePath', None): |
|
docinfo['imageURL'] = self.digilibBaseUrl + "/servlet/Scaler?fn=" + docinfo['imagePath'] |
|
docinfo = self.getDocinfoFromDigilib(docinfo, docinfo['imagePath']) |
|
|
|
logging.debug("documentViewer (getdocinfo) docinfo: keys=%s"%docinfo.keys()) |
|
#logging.debug("documentViewer (getdocinfo) docinfo: %s"%docinfo) |
|
# store in session |
|
self.REQUEST.SESSION['docinfo'] = docinfo |
return docinfo |
return docinfo |
|
|
def getIndexMetaPath(self,url): |
def getDocinfoFromResource(self, docinfo, resource): |
"""gib nur den Pfad zurueck""" |
"""reads contents of resource element into docinfo""" |
regexp = re.compile(r".*(experimental|permanent)/(.*)") |
docName = resource.get('name', None) |
regpath = regexp.match(url) |
docinfo['documentName'] = docName |
if (regpath==None): |
docPath = resource.get('archive-path', None) |
return "" |
if docPath: |
logging.debug("(getDomFromIndexMeta): URLXAXA: %s"%regpath.group(2)) |
# clean up document path |
return ("/mpiwg/online/"+regpath.group(1)+"/"+regpath.group(2)) |
if docPath[0] != '/': |
|
docPath = '/' + docPath |
|
|
|
if docName and (not docPath.endswith(docName)): |
|
docPath += "/" + docName |
|
|
|
else: |
|
# use docUrl as docPath |
|
docUrl = docinfo['documentURL'] |
|
if not docUrl.startswith('http:'): |
|
docPath = docUrl |
|
if docPath: |
|
# fix URLs starting with /mpiwg/online |
|
docPath = docPath.replace('/mpiwg/online', '', 1) |
|
|
|
docinfo['documentPath'] = docPath |
|
return docinfo |
|
|
|
def getDocinfoFromTexttool(self, docinfo, texttool): |
|
"""reads contents of texttool element into docinfo""" |
|
# image dir |
|
imageDir = texttool.get('image', None) |
|
docPath = docinfo.get('documentPath', None) |
|
if imageDir and docPath: |
|
#print "image: ", imageDir, " archivepath: ", archivePath |
|
imageDir = os.path.join(docPath, imageDir) |
|
imageDir = imageDir.replace('/mpiwg/online', '', 1) |
|
docinfo['imagePath'] = imageDir |
|
|
def getIndexMetaUrl(self,url): |
# old style text URL |
"""returns utr of index.meta document at url""" |
textUrl = texttool.get('text', None) |
|
if textUrl and docPath: |
|
if urlparse.urlparse(textUrl)[0] == "": #keine url |
|
textUrl = os.path.join(docPath, textUrl) |
|
|
metaUrl = None |
docinfo['textURL'] = textUrl |
if url.startswith("http://"): |
|
# real URL |
|
metaUrl = url |
|
else: |
|
# online path |
|
server=self.digilibBaseUrl+"/servlet/Texter?fn=" |
|
metaUrl=server+url.replace("/mpiwg/online","") |
|
if not metaUrl.endswith("index.meta"): |
|
metaUrl += "/index.meta" |
|
|
|
return metaUrl |
# new style text-url-path |
|
textUrl = texttool.get('text-url-path', None) |
|
if textUrl: |
|
docinfo['textURLPath'] = textUrl |
|
|
def getDomFromIndexMeta(self, url): |
# page flow |
"""get dom from index meta""" |
docinfo['pageFlow'] = texttool.get('page-flow', 'ltr') |
dom = None |
|
metaUrl = self.getIndexMetaUrl(url) |
|
|
|
logging.debug("(getDomFromIndexMeta): METAURL: %s"%metaUrl) |
# odd pages are left |
txt=getHttpData(metaUrl) |
docinfo['oddPage'] = texttool.get('odd-scan-position', 'left') |
if txt is None: |
|
raise IOError("Unable to read index meta from %s"%(url)) |
|
|
|
dom = Parse(txt) |
# number of title page (0: not defined) |
return dom |
docinfo['titlePage'] = texttool.get('title-scan-no', 0) |
|
|
def getPresentationInfoXML(self, url): |
# old presentation stuff |
"""returns dom of info.xml document at url""" |
presentation = texttool.get('presentation', None) |
dom = None |
if presentation and docPath: |
metaUrl = None |
if presentation.startswith('http:'): |
if url.startswith("http://"): |
docinfo['presentationUrl'] = presentation |
# real URL |
|
metaUrl = url |
|
else: |
else: |
# online path |
docinfo['presentationUrl'] = os.path.join(docPath, presentation) |
server=self.digilibBaseUrl+"/servlet/Texter?fn=" |
|
metaUrl=server+url.replace("/mpiwg/online","") |
|
|
|
txt=getHttpData(metaUrl) |
|
if txt is None: |
|
raise IOError("Unable to read infoXMLfrom %s"%(url)) |
|
|
|
dom = Parse(txt) |
|
return dom |
|
|
|
|
return docinfo |
|
|
def getAuthinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0): |
def getDocinfoFromBib(self, docinfo, bib): |
"""gets authorization info from the index.meta file at path or given by dom""" |
"""reads contents of bib element into docinfo""" |
logging.debug("documentViewer (getauthinfofromindexmeta) path: %s"%(path)) |
logging.debug("getDocinfoFromBib bib=%s"%repr(bib)) |
|
# put all raw bib fields in dict "bib" |
access = None |
docinfo['bib'] = bib |
|
bibtype = bib.get('@type', None) |
if docinfo is None: |
docinfo['bibType'] = bibtype |
docinfo = {} |
# also store DC metadata for convenience |
|
dc = self.metadataService.getDCMappedData(bib) |
if dom is None: |
docinfo['creator'] = dc.get('creator',None) |
for x in range(cut): |
docinfo['title'] = dc.get('title',None) |
path=getParentDir(path) |
docinfo['date'] = dc.get('date',None) |
dom = self.getDomFromIndexMeta(path) |
return docinfo |
|
|
acctype = dom.xpath("//access-conditions/access/@type") |
def getDocinfoFromAccess(self, docinfo, acc): |
if acctype and (len(acctype)>0): |
"""reads contents of access element into docinfo""" |
access=acctype[0].value |
#TODO: also read resource type |
|
logging.debug("getDocinfoFromAccess acc=%s"%repr(acc)) |
|
try: |
|
acctype = acc['@attr']['type'] |
|
if acctype: |
|
access=acctype |
if access in ['group', 'institution']: |
if access in ['group', 'institution']: |
access = getTextFromNode(dom.xpath("//access-conditions/access/name")[0]).lower() |
access = acc['name'].lower() |
|
|
docinfo['accessType'] = access |
docinfo['accessType'] = access |
return docinfo |
|
|
|
|
|
def getBibinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0): |
|
"""gets bibliographical info from the index.meta file at path or given by dom""" |
|
logging.debug("documentViewer (getbibinfofromindexmeta) path: %s"%(path)) |
|
|
|
if docinfo is None: |
|
docinfo = {} |
|
|
|
if dom is None: |
|
for x in range(cut): |
|
path=getParentDir(path) |
|
dom = self.getDomFromIndexMeta(path) |
|
|
|
docinfo['indexMetaPath']=self.getIndexMetaPath(path); |
|
|
|
logging.debug("documentViewer (getbibinfofromindexmeta cutted) path: %s"%(path)) |
|
# put in all raw bib fields as dict "bib" |
|
bib = dom.xpath("//bib/*") |
|
if bib and len(bib)>0: |
|
bibinfo = {} |
|
for e in bib: |
|
bibinfo[e.localName] = getTextFromNode(e) |
|
docinfo['bib'] = bibinfo |
|
|
|
# extract some fields (author, title, year) according to their mapping |
|
metaData=self.metadata.main.meta.bib |
|
bibtype=dom.xpath("//bib/@type") |
|
if bibtype and (len(bibtype)>0): |
|
bibtype=bibtype[0].value |
|
else: |
|
bibtype="generic" |
|
|
|
bibtype=bibtype.replace("-"," ") # wrong typesiin index meta "-" instead of " " (not wrong! ROC) |
|
docinfo['bib_type'] = bibtype |
|
bibmap=metaData.generateMappingForType(bibtype) |
|
logging.debug("documentViewer (getbibinfofromindexmeta) bibmap:"+repr(bibmap)) |
|
logging.debug("documentViewer (getbibinfofromindexmeta) bibtype:"+repr(bibtype)) |
|
# if there is no mapping bibmap is empty (mapping sometimes has empty fields) |
|
if len(bibmap) > 0 and len(bibmap['author'][0]) > 0: |
|
try: |
|
docinfo['author']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['author'][0])[0]) |
|
except: pass |
|
try: |
|
docinfo['title']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['title'][0])[0]) |
|
except: pass |
|
try: |
|
docinfo['year']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['year'][0])[0]) |
|
except: pass |
|
logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype) |
|
try: |
|
docinfo['lang']=getTextFromNode(dom.xpath("//bib/lang")[0]) |
|
except: |
except: |
docinfo['lang']='' |
pass |
try: |
|
docinfo['name']=getTextFromNode(dom.xpath("//%s"%bibmap['name'][0])[0]) |
|
except: pass |
|
|
|
return docinfo |
return docinfo |
|
|
|
def getDocinfoFromDigilib(self, docinfo, path): |
def getNameFromIndexMeta(self,path,docinfo=None,dom=None,cut=0): |
infoUrl=self.digilibBaseUrl+"/dirInfo-xml.jsp?mo=dir&fn="+path |
"""gets name info from the index.meta file at path or given by dom""" |
# fetch data |
if docinfo is None: |
txt = getHttpData(infoUrl) |
docinfo = {} |
if not txt: |
|
logging.error("Unable to get dir-info from %s"%(infoUrl)) |
if dom is None: |
|
for x in range(cut): |
|
path=getParentDir(path) |
|
dom = self.getDomFromIndexMeta(path) |
|
|
|
#docinfo['indexMetaPath']=self.getIndexMetaPath(path); |
|
|
|
#result= dom.xpath("//result/resultPage") |
|
#docinfo['numPages']=int(getTextFromNode(result[0])) |
|
|
|
if len(name) > 0: |
|
try: |
|
result =dom.xpath("//name") |
|
docinfo['name']=getTextFromNode(result[0]) |
|
logging.debug("documentViewer docinfo[name] %s"%docinfo[name]) |
|
except: pass |
|
#logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype) |
|
return docinfo |
return docinfo |
|
|
def getDocinfoFromTextTool(self, url, dom=None, docinfo=None): |
dom = ET.fromstring(txt) |
"""parse texttool tag in index meta""" |
size = getText(dom.find("size")) |
logging.debug("documentViewer (getdocinfofromtexttool) url: %s" % (url)) |
logging.debug("getDocinfoFromDigilib: size=%s"%size) |
if docinfo is None: |
if size: |
docinfo = {} |
docinfo['numPages'] = int(size) |
if docinfo.get('lang', None) is None: |
|
docinfo['lang'] = '' # default keine Sprache gesetzt |
|
if dom is None: |
|
dom = self.getDomFromIndexMeta(url) |
|
|
|
archivePath = None |
|
archiveName = None |
|
|
|
archiveNames = dom.xpath("//resource/name") |
|
if archiveNames and (len(archiveNames) > 0): |
|
archiveName = getTextFromNode(archiveNames[0]) |
|
else: |
|
logging.warning("documentViewer (getdocinfofromtexttool) resource/name missing in: %s" % (url)) |
|
|
|
archivePaths = dom.xpath("//resource/archive-path") |
|
if archivePaths and (len(archivePaths) > 0): |
|
archivePath = getTextFromNode(archivePaths[0]) |
|
# clean up archive path |
|
if archivePath[0] != '/': |
|
archivePath = '/' + archivePath |
|
if archiveName and (not archivePath.endswith(archiveName)): |
|
archivePath += "/" + archiveName |
|
else: |
|
# try to get archive-path from url |
|
logging.warning("documentViewer (getdocinfofromtexttool) resource/archive-path missing in: %s" % (url)) |
|
if (not url.startswith('http')): |
|
archivePath = url.replace('index.meta', '') |
|
|
|
if archivePath is None: |
|
# we balk without archive-path |
|
raise IOError("Missing archive-path (for text-tool) in %s" % (url)) |
|
|
|
imageDirs = dom.xpath("//texttool/image") |
|
if imageDirs and (len(imageDirs) > 0): |
|
imageDir = getTextFromNode(imageDirs[0]) |
|
|
|
else: |
|
# we balk with no image tag / not necessary anymore because textmode is now standard |
|
#raise IOError("No text-tool info in %s"%(url)) |
|
imageDir = "" |
|
#xquery="//pb" |
|
docinfo['imagePath'] = "" # keine Bilder |
|
docinfo['imageURL'] = "" |
|
|
|
if imageDir and archivePath: |
|
#print "image: ", imageDir, " archivepath: ", archivePath |
|
imageDir = os.path.join(archivePath, imageDir) |
|
imageDir = imageDir.replace("/mpiwg/online", '') |
|
docinfo = self.getDirinfoFromDigilib(imageDir, docinfo=docinfo) |
|
docinfo['imagePath'] = imageDir |
|
|
|
docinfo['imageURL'] = self.digilibBaseUrl + "/servlet/Scaler?fn=" + imageDir |
|
|
|
viewerUrls = dom.xpath("//texttool/digiliburlprefix") |
|
if viewerUrls and (len(viewerUrls) > 0): |
|
viewerUrl = getTextFromNode(viewerUrls[0]) |
|
docinfo['viewerURL'] = viewerUrl |
|
|
|
# old style text URL |
|
textUrls = dom.xpath("//texttool/text") |
|
if textUrls and (len(textUrls) > 0): |
|
textUrl = getTextFromNode(textUrls[0]) |
|
if urlparse.urlparse(textUrl)[0] == "": #keine url |
|
textUrl = os.path.join(archivePath, textUrl) |
|
# fix URLs starting with /mpiwg/online |
|
if textUrl.startswith("/mpiwg/online"): |
|
textUrl = textUrl.replace("/mpiwg/online", '', 1) |
|
|
|
docinfo['textURL'] = textUrl |
|
|
|
# new style text-url-path |
|
textUrls = dom.xpath("//texttool/text-url-path") |
|
if textUrls and (len(textUrls) > 0): |
|
textUrl = getTextFromNode(textUrls[0]) |
|
docinfo['textURLPath'] = textUrl |
|
if not docinfo['imagePath']: |
|
# text-only, no page images |
|
docinfo = self.getNumTextPages(docinfo) |
|
|
|
presentationUrls = dom.xpath("//texttool/presentation") |
|
docinfo = self.getBibinfoFromIndexMeta(url, docinfo=docinfo, dom=dom) # get info von bib tag |
|
#docinfo = self.getNameFromIndexMeta(url, docinfo=docinfo, dom=dom) |
|
|
|
if presentationUrls and (len(presentationUrls) > 0): # ueberschreibe diese durch presentation informationen |
|
# presentation url ergiebt sich ersetzen von index.meta in der url der fuer die Metadaten |
|
# durch den relativen Pfad auf die presentation infos |
|
presentationPath = getTextFromNode(presentationUrls[0]) |
|
if url.endswith("index.meta"): |
|
presentationUrl = url.replace('index.meta', presentationPath) |
|
else: |
else: |
presentationUrl = url + "/" + presentationPath |
docinfo['numPages'] = 0 |
|
|
docinfo = self.getBibinfoFromTextToolPresentation(presentationUrl, docinfo=docinfo, dom=dom) |
|
|
|
docinfo = self.getAuthinfoFromIndexMeta(url, docinfo=docinfo, dom=dom) # get access info |
|
|
|
|
# TODO: produce and keep list of image names and numbers |
return docinfo |
return docinfo |
|
|
|
|
def getBibinfoFromTextToolPresentation(self,url,docinfo=None,dom=None): |
def getDocinfoFromPresentationInfoXml(self,docinfo): |
"""gets the bibliographical information from the preseantion entry in texttools |
"""gets DC-like bibliographical information from the presentation entry in texttools""" |
""" |
url = docinfo.get('presentationUrl', None) |
dom=self.getPresentationInfoXML(url) |
if not url: |
try: |
logging.error("getDocinfoFromPresentation: no URL!") |
docinfo['author']=getTextFromNode(dom.xpath("//author")[0]) |
|
except: |
|
pass |
|
try: |
|
docinfo['title']=getTextFromNode(dom.xpath("//title")[0]) |
|
except: |
|
pass |
|
try: |
|
docinfo['year']=getTextFromNode(dom.xpath("//date")[0]) |
|
except: |
|
pass |
|
try: |
|
docinfo['name']=getTextFromNode(dom.xpath("//name")[0]) |
|
except: |
|
pass |
|
return docinfo |
return docinfo |
|
|
def getDocinfoFromImagePath(self,path,docinfo=None,cut=0): |
dom = None |
"""path ist the path to the images it assumes that the index.meta file is one level higher.""" |
metaUrl = None |
logging.debug("documentViewer (getdocinfofromimagepath) path: %s"%(path)) |
if url.startswith("http://"): |
if docinfo is None: |
# real URL |
docinfo = {} |
metaUrl = url |
path=path.replace("/mpiwg/online","") |
else: |
docinfo['imagePath'] = path |
# online path |
docinfo=self.getDirinfoFromDigilib(path,docinfo=docinfo,cut=cut) |
|
|
|
pathorig=path |
|
for x in range(cut): |
|
path=getParentDir(path) |
|
logging.debug("documentViewer (getdocinfofromimagepath) PATH:"+path) |
|
imageUrl=self.digilibBaseUrl+"/servlet/Scaler?fn="+path |
|
docinfo['imageURL'] = imageUrl |
|
|
|
#path ist the path to the images it assumes that the index.meta file is one level higher. |
|
docinfo = self.getBibinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1) |
|
docinfo = self.getAuthinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1) |
|
return docinfo |
|
|
|
|
server=self.digilibBaseUrl+"/servlet/Texter?fn=" |
|
metaUrl=server+url |
|
|
def getDocinfo(self, mode, url): |
txt=getHttpData(metaUrl) |
"""returns docinfo depending on mode""" |
if txt is None: |
logging.debug("documentViewer (getdocinfo) mode: %s, url: %s"%(mode,url)) |
logging.error("Unable to read info.xml from %s"%(url)) |
# look for cached docinfo in session |
|
if self.REQUEST.SESSION.has_key('docinfo'): |
|
docinfo = self.REQUEST.SESSION['docinfo'] |
|
# check if its still current |
|
if docinfo is not None and docinfo.get('mode') == mode and docinfo.get('url') == url: |
|
logging.debug("documentViewer (getdocinfo) docinfo in session: %s"%docinfo) |
|
return docinfo |
return docinfo |
# new docinfo |
|
docinfo = {'mode': mode, 'url': url} |
|
if mode=="texttool": #index.meta with texttool information |
|
docinfo = self.getDocinfoFromTextTool(url, docinfo=docinfo) |
|
elif mode=="imagepath": |
|
docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo) |
|
elif mode=="filepath": |
|
docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo,cut=1) |
|
else: |
|
logging.error("documentViewer (getdocinfo) unknown mode: %s!"%mode) |
|
raise ValueError("Unknown mode %s! Has to be one of 'texttool','imagepath','filepath'."%(mode)) |
|
|
|
logging.debug("documentViewer (getdocinfo) docinfo: %s"%docinfo) |
dom = ET.fromstring(txt) |
self.REQUEST.SESSION['docinfo'] = docinfo |
docinfo['creator']=getText(dom.find(".//author")) |
|
docinfo['title']=getText(dom.find(".//title")) |
|
docinfo['date']=getText(dom.find(".//date")) |
return docinfo |
return docinfo |
|
|
def getPageinfo(self, current, start=None, rows=None, cols=None, docinfo=None, viewMode=None, tocMode=None,characterNormalization=""): |
|
|
def getPageinfo(self, current=None, start=None, rows=None, cols=None, docinfo=None, viewMode=None, viewType=None, tocMode=None): |
"""returns pageinfo with the given parameters""" |
"""returns pageinfo with the given parameters""" |
|
logging.debug("getPageInfo(current=%s, start=%s, rows=%s, cols=%s, viewMode=%s, viewType=%s, tocMode=%s)"%(current,start,rows,cols,viewMode,viewType,tocMode)) |
pageinfo = {} |
pageinfo = {} |
|
pageinfo['viewMode'] = viewMode |
|
pageinfo['viewType'] = viewType |
|
pageinfo['tocMode'] = tocMode |
|
|
current = getInt(current) |
current = getInt(current) |
pageinfo['current'] = current |
pageinfo['current'] = current |
|
pageinfo['pn'] = current |
rows = int(rows or self.thumbrows) |
rows = int(rows or self.thumbrows) |
pageinfo['rows'] = rows |
pageinfo['rows'] = rows |
cols = int(cols or self.thumbcols) |
cols = int(cols or self.thumbcols) |
pageinfo['cols'] = cols |
pageinfo['cols'] = cols |
grpsize = cols * rows |
grpsize = cols * rows |
pageinfo['groupsize'] = grpsize |
pageinfo['groupsize'] = grpsize |
|
# is start is empty use one around current |
start = getInt(start, default=(math.ceil(float(current)/float(grpsize))*grpsize-(grpsize-1))) |
start = getInt(start, default=(math.ceil(float(current)/float(grpsize))*grpsize-(grpsize-1))) |
# int(current / grpsize) * grpsize +1)) |
# int(current / grpsize) * grpsize +1)) |
pageinfo['start'] = start |
pageinfo['start'] = start |
pageinfo['end'] = start + grpsize |
|
if (docinfo is not None) and ('numPages' in docinfo): |
np = int(docinfo.get('numPages', 0)) |
np = int(docinfo['numPages']) |
if np == 0: |
pageinfo['end'] = min(pageinfo['end'], np) |
# numPages unknown - maybe we can get it from text page |
|
if docinfo.get('textURLPath', None): |
|
# cache text page as well |
|
pageinfo['textPage'] = self.getTextPage(mode=viewType, pn=current, docinfo=docinfo, pageinfo=pageinfo) |
|
np = int(docinfo.get('numPages', 0)) |
|
|
pageinfo['numgroups'] = int(np / grpsize) |
pageinfo['numgroups'] = int(np / grpsize) |
if np % grpsize > 0: |
if np % grpsize > 0: |
pageinfo['numgroups'] += 1 |
pageinfo['numgroups'] += 1 |
pageinfo['viewMode'] = viewMode |
|
pageinfo['tocMode'] = tocMode |
pageFlowLtr = docinfo.get('pageFlow', 'ltr') != 'rtl' |
#pageinfo['characterNormalization'] =characterNormalization |
oddScanLeft = docinfo.get('oddPage', 'left') != 'right' |
pageinfo['characterNormalization'] = self.REQUEST.get('characterNormalization',' ') |
# add zeroth page for two columns |
|
pageZero = (cols == 2 and (pageFlowLtr != oddScanLeft)) |
|
pageinfo['pageZero'] = pageZero |
|
pageinfo['pageBatch'] = self.getPageBatch(start=start, rows=rows, cols=cols, pageFlowLtr=pageFlowLtr, pageZero=pageZero, minIdx=1, maxIdx=np) |
|
|
|
# TODO: do we need this here? |
|
pageinfo['characterNormalization'] = self.REQUEST.get('characterNormalization','reg') |
pageinfo['query'] = self.REQUEST.get('query',' ') |
pageinfo['query'] = self.REQUEST.get('query',' ') |
pageinfo['queryType'] = self.REQUEST.get('queryType',' ') |
pageinfo['queryType'] = self.REQUEST.get('queryType',' ') |
pageinfo['querySearch'] =self.REQUEST.get('querySearch', 'fulltext') |
pageinfo['querySearch'] =self.REQUEST.get('querySearch', 'fulltext') |
pageinfo['textPN'] = self.REQUEST.get('textPN','1') |
|
pageinfo['highlightQuery'] = self.REQUEST.get('highlightQuery','') |
pageinfo['highlightQuery'] = self.REQUEST.get('highlightQuery','') |
pageinfo['tocPageSize'] = self.REQUEST.get('tocPageSize', '30') |
pageinfo['tocPageSize'] = getInt(self.REQUEST.get('tocPageSize', 30)) |
pageinfo['queryPageSize'] =self.REQUEST.get('queryPageSize', '10') |
pageinfo['queryPageSize'] = getInt(self.REQUEST.get('queryPageSize', 10)) |
pageinfo['tocPN'] = self.REQUEST.get('tocPN', '1') |
pageinfo['tocPN'] = getInt(self.REQUEST.get('tocPN', '1')) |
toc = int (pageinfo['tocPN']) |
pageinfo['searchPN'] = getInt(self.REQUEST.get('searchPN','1')) |
pageinfo['textPages'] =int (toc) |
|
|
|
|
# limit tocPN |
if 'tocSize_%s'%tocMode in docinfo: |
if 'tocSize_%s'%tocMode in docinfo: |
tocSize = int(docinfo['tocSize_%s'%tocMode]) |
tocSize = docinfo['tocSize_%s'%tocMode] |
tocPageSize = int(pageinfo['tocPageSize']) |
tocPageSize = pageinfo['tocPageSize'] |
# cached toc |
# cached toc |
if tocSize%tocPageSize>0: |
if tocSize%tocPageSize>0: |
tocPages=tocSize/tocPageSize+1 |
tocPages=tocSize/tocPageSize+1 |
else: |
else: |
tocPages=tocSize/tocPageSize |
tocPages=tocSize/tocPageSize |
pageinfo['tocPN'] = min (tocPages,toc) |
|
pageinfo['searchPN'] =self.REQUEST.get('searchPN','1') |
pageinfo['tocPN'] = min(tocPages,pageinfo['tocPN']) |
pageinfo['sn'] =self.REQUEST.get('sn','') |
|
return pageinfo |
return pageinfo |
|
|
|
|
|
def getPageBatch(self, start=1, rows=10, cols=2, pageFlowLtr=True, pageZero=False, minIdx=1, maxIdx=0): |
|
"""returns dict with array of page informations for one screenfull of thumbnails""" |
|
batch = {} |
|
grpsize = rows * cols |
|
if maxIdx == 0: |
|
maxIdx = start + grpsize |
|
|
|
nb = int(math.ceil(maxIdx / float(grpsize))) |
|
# list of all batch start and end points |
|
batches = [] |
|
if pageZero: |
|
ofs = 0 |
|
else: |
|
ofs = 1 |
|
|
|
for i in range(nb): |
|
s = i * grpsize + ofs |
|
e = min((i + 1) * grpsize + ofs - 1, maxIdx) |
|
batches.append({'start':s, 'end':e}) |
|
|
|
batch['batches'] = batches |
|
|
|
pages = [] |
|
if pageZero and start == 1: |
|
# correct beginning |
|
idx = 0 |
|
else: |
|
idx = start |
|
|
|
for r in range(rows): |
|
row = [] |
|
for c in range(cols): |
|
if idx < minIdx or idx > maxIdx: |
|
page = {'idx':None} |
|
else: |
|
page = {'idx':idx} |
|
|
|
idx += 1 |
|
if pageFlowLtr: |
|
row.append(page) |
|
else: |
|
row.insert(0, page) |
|
|
|
pages.append(row) |
|
|
|
if start > 1: |
|
batch['prevStart'] = max(start - grpsize, 1) |
|
else: |
|
batch['prevStart'] = None |
|
|
|
if start + grpsize < maxIdx: |
|
batch['nextStart'] = start + grpsize |
|
else: |
|
batch['nextStart'] = None |
|
|
|
batch['pages'] = pages |
|
return batch |
|
|
|
def getBatch(self, start=1, size=10, end=0, data=None, fullData=True): |
|
"""returns dict with information for one screenfull of data.""" |
|
batch = {} |
|
if end == 0: |
|
end = start + size |
|
|
|
nb = int(math.ceil(end / float(size))) |
|
# list of all batch start and end points |
|
batches = [] |
|
for i in range(nb): |
|
s = i * size + 1 |
|
e = min((i + 1) * size, end) |
|
batches.append({'start':s, 'end':e}) |
|
|
|
batch['batches'] = batches |
|
# list of elements in this batch |
|
this = [] |
|
j = 0 |
|
for i in range(start, min(start+size, end)): |
|
if data: |
|
if fullData: |
|
d = data[i] |
|
else: |
|
d = data[j] |
|
j += 1 |
|
|
|
else: |
|
d = i+1 |
|
|
|
this.append(d) |
|
|
|
batch['this'] = this |
|
if start > 1: |
|
batch['prevStart'] = max(start - size, 1) |
|
else: |
|
batch['prevStart'] = None |
|
|
|
if start + size < end: |
|
batch['nextStart'] = start + size |
|
else: |
|
batch['nextStart'] = None |
|
|
|
return batch |
|
|
|
|
|
security.declareProtected('View management screens','changeDocumentViewerForm') |
|
changeDocumentViewerForm = PageTemplateFile('zpt/changeDocumentViewer', globals()) |
|
|
def changeDocumentViewer(self,title="",digilibBaseUrl=None,thumbrows=2,thumbcols=5,authgroups='mpiwg',RESPONSE=None): |
def changeDocumentViewer(self,title="",digilibBaseUrl=None,thumbrows=2,thumbcols=5,authgroups='mpiwg',RESPONSE=None): |
"""init document viewer""" |
"""init document viewer""" |
self.title=title |
self.title=title |
Line 792 def changeDocumentViewer(self,title="",d
|
Line 899 def changeDocumentViewer(self,title="",d
|
self.thumbrows = thumbrows |
self.thumbrows = thumbrows |
self.thumbcols = thumbcols |
self.thumbcols = thumbcols |
self.authgroups = [s.strip().lower() for s in authgroups.split(',')] |
self.authgroups = [s.strip().lower() for s in authgroups.split(',')] |
|
try: |
|
# assume MetaDataFolder instance is called metadata |
|
self.metadataService = getattr(self, 'metadata') |
|
except Exception, e: |
|
logging.error("Unable to find MetaDataFolder 'metadata': "+str(e)) |
|
|
if RESPONSE is not None: |
if RESPONSE is not None: |
RESPONSE.redirect('manage_main') |
RESPONSE.redirect('manage_main') |
|
|
Line 807 def manage_AddDocumentViewer(self,id,ima
|
Line 920 def manage_AddDocumentViewer(self,id,ima
|
|
|
if RESPONSE is not None: |
if RESPONSE is not None: |
RESPONSE.redirect('manage_main') |
RESPONSE.redirect('manage_main') |
|
|
## DocumentViewerTemplate class |
|
class DocumentViewerTemplate(ZopePageTemplate): |
|
"""Template for document viewer""" |
|
meta_type="DocumentViewer Template" |
|
|
|
|
|
def manage_addDocumentViewerTemplateForm(self): |
|
"""Form for adding""" |
|
pt=PageTemplateFile('zpt/addDocumentViewerTemplate', globals()).__of__(self) |
|
return pt() |
|
|
|
def manage_addDocumentViewerTemplate(self, id='viewer_main', title=None, text=None, |
|
REQUEST=None, submit=None): |
|
"Add a Page Template with optional file content." |
|
|
|
self._setObject(id, DocumentViewerTemplate(id)) |
|
ob = getattr(self, id) |
|
txt=file(os.path.join(package_home(globals()),'zpt/viewer_main.zpt'),'r').read() |
|
logging.info("txt %s:"%txt) |
|
ob.pt_edit(txt,"text/html") |
|
if title: |
|
ob.pt_setTitle(title) |
|
try: |
|
u = self.DestinationURL() |
|
except AttributeError: |
|
u = REQUEST['URL1'] |
|
|
|
u = "%s/%s" % (u, urllib.quote(id)) |
|
REQUEST.RESPONSE.redirect(u+'/manage_main') |
|
return '' |
|
|
|
|
|
|
|