1 | from OFS.Folder import Folder |
---|
2 | from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate |
---|
3 | from Products.PageTemplates.PageTemplateFile import PageTemplateFile |
---|
4 | from App.ImageFile import ImageFile |
---|
5 | from AccessControl import ClassSecurityInfo |
---|
6 | from AccessControl import getSecurityManager |
---|
7 | |
---|
8 | import xml.etree.ElementTree as ET |
---|
9 | |
---|
10 | import os |
---|
11 | import urllib |
---|
12 | import logging |
---|
13 | import math |
---|
14 | import urlparse |
---|
15 | import json |
---|
16 | |
---|
17 | from Products.MetaDataProvider import MetaDataFolder |
---|
18 | |
---|
19 | from SrvTxtUtils import getInt, utf8ify, getText, getHttpData, refreshingImageFileIndexHtml, sslifyUrl |
---|
20 | |
---|
21 | |
---|
22 | INDEXMETA_NS="http://md.mpiwg-berlin.mpg.de/ns/indexMeta#" |
---|
23 | |
---|
24 | def removeINDEXMETA_NS(root): #entfernt den namespace von indexmeta aus dem dom #TODO evertyhing should be changed that it can deal with NS |
---|
25 | for elem in root.getiterator(): |
---|
26 | print ("ETAG") |
---|
27 | print(elem.tag) |
---|
28 | if not hasattr(elem.tag, 'find'): continue # (1) |
---|
29 | |
---|
30 | i = elem.tag.find('{%s}'%INDEXMETA_NS) |
---|
31 | if i >= 0: |
---|
32 | elem.tag = elem.tag[i+len(('{%s}'%INDEXMETA_NS)):] |
---|
33 | |
---|
34 | print(elem.tag) |
---|
35 | |
---|
36 | def getMDText(node): |
---|
37 | """returns the @text content from the MetaDataProvider metadata node""" |
---|
38 | if isinstance(node, dict): |
---|
39 | return node.get('@text', None) |
---|
40 | |
---|
41 | if isinstance(node,list): #more than one text file if there is an attribute don't choose it |
---|
42 | for nodeInList in node: |
---|
43 | attr = nodeInList.get("@attr",None) |
---|
44 | if attr is None: |
---|
45 | return node.get('@text',None) |
---|
46 | return None |
---|
47 | |
---|
48 | return node |
---|
49 | |
---|
50 | def getParentPath(path, cnt=1): |
---|
51 | """returns pathname shortened by cnt""" |
---|
52 | # make sure path doesn't end with / |
---|
53 | path = path.rstrip('/') |
---|
54 | # split by /, shorten, and reassemble |
---|
55 | return '/'.join(path.split('/')[0:-cnt]) |
---|
56 | |
---|
57 | def getPnForPf(docinfo, pf, default=0): |
---|
58 | """returns image number for image file name or default""" |
---|
59 | if 'imgFileNames' in docinfo: |
---|
60 | pn = docinfo['imgFileNames'].get(pf, None) |
---|
61 | if pn is None: |
---|
62 | # try to cut extension |
---|
63 | xi = pf.rfind('.') |
---|
64 | if xi > 0: |
---|
65 | pf = pf[:xi] |
---|
66 | # try again, else return 0 |
---|
67 | pn = docinfo['imgFileNames'].get(pf, default) |
---|
68 | else: |
---|
69 | # no extension |
---|
70 | pn = default |
---|
71 | |
---|
72 | return pn |
---|
73 | |
---|
74 | return default |
---|
75 | |
---|
76 | def getPfForPn(docinfo, pn, default=None): |
---|
77 | """returns image file name for image number or default""" |
---|
78 | if 'imgFileIndexes' in docinfo: |
---|
79 | pn = docinfo['imgFileIndexes'].get(pn, default) |
---|
80 | return pn |
---|
81 | |
---|
82 | return default |
---|
83 | |
---|
84 | |
---|
85 | ## |
---|
86 | ## documentViewer class |
---|
87 | ## |
---|
88 | class documentViewer(Folder): |
---|
89 | """document viewer""" |
---|
90 | meta_type="Document viewer" |
---|
91 | |
---|
92 | security=ClassSecurityInfo() |
---|
93 | manage_options=Folder.manage_options+( |
---|
94 | {'label':'Configuration','action':'changeDocumentViewerForm'}, |
---|
95 | ) |
---|
96 | |
---|
97 | metadataService = None |
---|
98 | """MetaDataFolder instance""" |
---|
99 | |
---|
100 | |
---|
101 | # |
---|
102 | # templates and forms |
---|
103 | # |
---|
104 | # viewMode templates |
---|
105 | viewer_text = PageTemplateFile('zpt/viewer/viewer_text', globals()) |
---|
106 | viewer_hocr = PageTemplateFile('zpt/viewer/viewer_hocr', globals()) |
---|
107 | viewer_xml = PageTemplateFile('zpt/viewer/viewer_xml', globals()) |
---|
108 | viewer_image = PageTemplateFile('zpt/viewer/viewer_image', globals()) |
---|
109 | viewer_index = PageTemplateFile('zpt/viewer/viewer_index', globals()) |
---|
110 | viewer_thumbs = PageTemplateFile('zpt/viewer/viewer_thumbs', globals()) |
---|
111 | viewer_indexonly = PageTemplateFile('zpt/viewer/viewer_indexonly', globals()) |
---|
112 | viewer_text_image = PageTemplateFile('zpt/viewer/viewer_text_image', globals()) |
---|
113 | # available layer types (annotator not default) |
---|
114 | builtinLayers = {'text': ['dict','search','gis'], |
---|
115 | 'xml': None, 'image': None, 'index': ['extended'],'text_image': ['dict'],} |
---|
116 | availableLayers = builtinLayers; |
---|
117 | # layer templates |
---|
118 | layer_text_dict = PageTemplateFile('zpt/viewer/layer_text_dict', globals()) |
---|
119 | layer_text_image_dict = PageTemplateFile('zpt/viewer/layer_text_image_dict', globals()) |
---|
120 | layer_text_search = PageTemplateFile('zpt/viewer/layer_text_search', globals()) |
---|
121 | layer_text_annotator = PageTemplateFile('zpt/viewer/layer_text_annotator', globals()) |
---|
122 | layer_text_gis = PageTemplateFile('zpt/viewer/layer_text_gis', globals()) |
---|
123 | layer_text_pundit = PageTemplateFile('zpt/viewer/layer_text_pundit', globals()) |
---|
124 | layer_image_annotator = PageTemplateFile('zpt/viewer/layer_image_annotator', globals()) |
---|
125 | layer_image_search = PageTemplateFile('zpt/viewer/layer_image_search', globals()) |
---|
126 | layer_index_extended = PageTemplateFile('zpt/viewer/layer_index_extended', globals()) |
---|
127 | # toc templates |
---|
128 | toc_thumbs = PageTemplateFile('zpt/viewer/toc_thumbs', globals()) |
---|
129 | toc_text = PageTemplateFile('zpt/viewer/toc_text', globals()) |
---|
130 | toc_figures = PageTemplateFile('zpt/viewer/toc_figures', globals()) |
---|
131 | toc_concordance = PageTemplateFile('zpt/viewer/toc_concordance', globals()) |
---|
132 | toc_notes = PageTemplateFile('zpt/viewer/toc_notes', globals()) |
---|
133 | toc_handwritten = PageTemplateFile('zpt/viewer/toc_handwritten', globals()) |
---|
134 | toc_none = PageTemplateFile('zpt/viewer/toc_none', globals()) |
---|
135 | # other templates |
---|
136 | common_template = PageTemplateFile('zpt/viewer/common_template', globals()) |
---|
137 | info_xml = PageTemplateFile('zpt/viewer/info_xml', globals()) |
---|
138 | docuviewer_css = ImageFile('css/docuviewer.css',globals()) |
---|
139 | # make docuviewer_css refreshable for development |
---|
140 | docuviewer_css.index_html = refreshingImageFileIndexHtml |
---|
141 | docuviewer_ie_css = ImageFile('css/docuviewer_ie.css',globals()) |
---|
142 | # make docuviewer_ie_css refreshable for development |
---|
143 | #docuviewer_ie_css.index_html = refreshingImageFileIndexHtml |
---|
144 | jquery_js = ImageFile('js/jquery.js',globals()) |
---|
145 | |
---|
146 | |
---|
147 | def __init__(self,id,imageScalerUrl=None,textServerName=None,title="",digilibBaseUrl=None,thumbcols=2,thumbrows=5,authgroups="mpiwg"): |
---|
148 | """init document viewer""" |
---|
149 | self.id=id |
---|
150 | self.title=title |
---|
151 | self.thumbcols = thumbcols |
---|
152 | self.thumbrows = thumbrows |
---|
153 | # authgroups is list of authorized groups (delimited by ,) |
---|
154 | self.authgroups = [s.strip().lower() for s in authgroups.split(',')] |
---|
155 | # create template folder so we can always use template.something |
---|
156 | |
---|
157 | templateFolder = Folder('template') |
---|
158 | self['template'] = templateFolder # Zope-2.12 style |
---|
159 | #self._setObject('template',templateFolder) # old style |
---|
160 | try: |
---|
161 | import MpdlXmlTextServer |
---|
162 | textServer = MpdlXmlTextServer.MpdlXmlTextServer(id='fulltextclient',serverName=textServerName) |
---|
163 | templateFolder['fulltextclient'] = textServer |
---|
164 | #templateFolder._setObject('fulltextclient',textServer) |
---|
165 | except Exception, e: |
---|
166 | logging.error("Unable to create MpdlXmlTextServer for fulltextclient: "+str(e)) |
---|
167 | |
---|
168 | try: |
---|
169 | from Products.zogiLib.zogiLib import zogiLib |
---|
170 | zogilib = zogiLib(id="zogilib", title="zogilib for docuviewer", dlServerURL=imageScalerUrl, layout="book") |
---|
171 | templateFolder['zogilib'] = zogilib |
---|
172 | #templateFolder._setObject('zogilib',zogilib) |
---|
173 | except Exception, e: |
---|
174 | logging.error("Unable to create zogiLib for 'zogilib': "+str(e)) |
---|
175 | |
---|
176 | try: |
---|
177 | # assume MetaDataFolder instance is called metadata |
---|
178 | self.metadataService = getattr(self, 'metadata') |
---|
179 | except Exception, e: |
---|
180 | logging.error("Unable to find MetaDataFolder 'metadata': "+str(e)) |
---|
181 | |
---|
182 | if digilibBaseUrl is not None: |
---|
183 | self.digilibBaseUrl = digilibBaseUrl |
---|
184 | self.digilibScalerUrl = digilibBaseUrl + '/servlet/Scaler' |
---|
185 | self.digilibViewerUrl = digilibBaseUrl + '/jquery/digilib.html' |
---|
186 | |
---|
187 | |
---|
188 | # proxy text server methods to fulltextclient |
---|
189 | def getTextPage(self, **args): |
---|
190 | """returns full text content of page""" |
---|
191 | return self.template.fulltextclient.getTextPage(**args) |
---|
192 | |
---|
193 | def getSearchResults(self, **args): |
---|
194 | """loads list of search results and stores XML in docinfo""" |
---|
195 | return self.template.fulltextclient.getSearchResults(**args) |
---|
196 | |
---|
197 | def getResultsPage(self, **args): |
---|
198 | """returns one page of the search results""" |
---|
199 | return self.template.fulltextclient.getResultsPage(**args) |
---|
200 | |
---|
201 | def getTextInfo(self, **args): |
---|
202 | """returns document info from the text server""" |
---|
203 | return self.template.fulltextclient.getTextInfo(**args) |
---|
204 | |
---|
205 | def getToc(self, **args): |
---|
206 | """loads table of contents and stores XML in docinfo""" |
---|
207 | return self.template.fulltextclient.getToc(**args) |
---|
208 | |
---|
209 | def getTocPage(self, **args): |
---|
210 | """returns one page of the table of contents""" |
---|
211 | return self.template.fulltextclient.getTocPage(**args) |
---|
212 | |
---|
213 | def getRepositoryType(self, **args): |
---|
214 | """get repository type""" |
---|
215 | return self.template.fulltextclient.getRepositoryType(**args) |
---|
216 | |
---|
217 | def getTextDownloadUrl(self, **args): |
---|
218 | """get URL to download the full text""" |
---|
219 | return self.template.fulltextclient.getTextDownloadUrl(**args) |
---|
220 | |
---|
221 | def getPlacesOnPage(self, **args): |
---|
222 | """get list of gis places on one page""" |
---|
223 | return self.template.fulltextclient.getPlacesOnPage(**args) |
---|
224 | |
---|
225 | # Thumb list for CoolIris Plugin |
---|
226 | thumbs_main_rss = PageTemplateFile('zpt/thumbs_main_rss', globals()) |
---|
227 | security.declareProtected('View','thumbs_rss') |
---|
228 | def thumbs_rss(self,mode,url,viewMode="auto",start=None,pn=1): |
---|
229 | ''' |
---|
230 | view it |
---|
231 | @param mode: defines how to access the document behind url |
---|
232 | @param url: url which contains display information |
---|
233 | @param viewMode: image: display images, text: display text, default is auto (try text, else image) |
---|
234 | |
---|
235 | ''' |
---|
236 | |
---|
237 | if not hasattr(self, 'template'): |
---|
238 | # this won't work |
---|
239 | logging.error("template folder missing!") |
---|
240 | return "ERROR: template folder missing!" |
---|
241 | |
---|
242 | if not self.digilibBaseUrl: |
---|
243 | self.digilibBaseUrl = self.findDigilibUrl() or "http://digilib.mpiwg-berlin.mpg.de/digitallibrary" |
---|
244 | |
---|
245 | docinfo = self.getDocinfo(mode=mode,url=url) |
---|
246 | #pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo) |
---|
247 | pageinfo = self.getPageinfo(start=start,pn=pn, docinfo=docinfo) |
---|
248 | ''' ZDES ''' |
---|
249 | pt = getattr(self.template, 'thumbs_main_rss') |
---|
250 | |
---|
251 | if viewMode=="auto": # automodus gewaehlt |
---|
252 | if docinfo.has_key("textURL") or docinfo.get('textURLPath',None): #texturl gesetzt und textViewer konfiguriert |
---|
253 | viewMode="text" |
---|
254 | else: |
---|
255 | viewMode="image" |
---|
256 | |
---|
257 | return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode) |
---|
258 | |
---|
259 | |
---|
260 | security.declareProtected('View','index_html') |
---|
261 | def index_html(self, url, mode="texttool", viewMode="auto", viewLayer=None, tocMode=None, start=None, pn=None, pf=None): |
---|
262 | """ |
---|
263 | show page |
---|
264 | @param url: url which contains display information |
---|
265 | @param mode: defines how to access the document behind url |
---|
266 | @param viewMode: 'image': display images, 'text': display text, 'xml': display xml, default is 'auto', 'hocr' : hocr format |
---|
267 | @param viewLayer: sub-type of viewMode, e.g. layer 'dict' for viewMode='text' |
---|
268 | @param tocMode: type of 'table of contents' for navigation (thumbs, text, figures, none) |
---|
269 | """ |
---|
270 | |
---|
271 | logging.debug("documentViewer(index_html) mode=%s url=%s viewMode=%s viewLayer=%s start=%s pn=%s pf=%s"%(mode,url,viewMode,viewLayer,start,pn,pf)) |
---|
272 | |
---|
273 | if not hasattr(self, 'template'): |
---|
274 | # this won't work |
---|
275 | logging.error("template folder missing!") |
---|
276 | return "ERROR: template folder missing!" |
---|
277 | |
---|
278 | if not getattr(self, 'digilibBaseUrl', None): |
---|
279 | self.digilibBaseUrl = self.findDigilibUrl() or "http://digilib.mpiwg-berlin.mpg.de/digitallibrary" |
---|
280 | |
---|
281 | # mode=filepath should not have toc-thumbs |
---|
282 | if tocMode is None: |
---|
283 | if mode == "filepath": |
---|
284 | tocMode = "none" |
---|
285 | else: |
---|
286 | tocMode = "thumbs" |
---|
287 | |
---|
288 | # docinfo: information about document (cached) |
---|
289 | docinfo = self.getDocinfo(mode=mode,url=url,tocMode=tocMode) |
---|
290 | |
---|
291 | # userinfo: user settings (cached) |
---|
292 | userinfo = self.getUserinfo() |
---|
293 | |
---|
294 | # auto viewMode: text if there is a text else images |
---|
295 | if viewMode=="auto": |
---|
296 | if docinfo.get('textURLPath', None): |
---|
297 | # docinfo.get('textURL', None) not implemented yet |
---|
298 | viewMode = "text" |
---|
299 | if viewLayer is None and 'viewLayer' not in userinfo: |
---|
300 | # use layer dict as default |
---|
301 | viewLayer = "dict" |
---|
302 | else: |
---|
303 | viewMode = "image" |
---|
304 | |
---|
305 | elif viewMode == "text_dict": |
---|
306 | # legacy fix |
---|
307 | viewMode = "text" |
---|
308 | viewLayer = "dict" |
---|
309 | |
---|
310 | elif viewMode == 'images': |
---|
311 | # legacy fix |
---|
312 | viewMode = 'image' |
---|
313 | self.REQUEST['viewMode'] = 'image' |
---|
314 | |
---|
315 | # safe viewLayer in userinfo |
---|
316 | userinfo['viewLayer'] = viewLayer |
---|
317 | |
---|
318 | # pageinfo: information about page (not cached) |
---|
319 | pageinfo = self.getPageinfo(start=start, pn=pn, pf=pf, docinfo=docinfo, userinfo=userinfo, viewMode=viewMode, viewLayer=viewLayer, tocMode=tocMode) |
---|
320 | |
---|
321 | # get template /template/viewer_$viewMode |
---|
322 | pt = getattr(self.template, 'viewer_%s'%viewMode, None) |
---|
323 | if pt is None: |
---|
324 | logging.error("No template for viewMode=%s!"%viewMode) |
---|
325 | # TODO: error page? |
---|
326 | return "No template for viewMode=%s!"%viewMode |
---|
327 | |
---|
328 | # and execute with parameters |
---|
329 | return pt(docinfo=docinfo, pageinfo=pageinfo) |
---|
330 | |
---|
331 | def getAvailableLayers(self): |
---|
332 | """returns dict with list of available layers per viewMode""" |
---|
333 | return self.availableLayers |
---|
334 | |
---|
335 | def findDigilibUrl(self): |
---|
336 | """try to get the digilib URL from zogilib""" |
---|
337 | url = self.template.zogilib.getDLBaseUrl() |
---|
338 | return url |
---|
339 | |
---|
340 | def getScalerUrl(self, fn=None, pn=None, dw=100, dh=100, docinfo=None): |
---|
341 | """returns URL to digilib Scaler with params""" |
---|
342 | url = None |
---|
343 | if docinfo is not None: |
---|
344 | url = docinfo.get('imageURL', None) |
---|
345 | |
---|
346 | if url is None: |
---|
347 | url = self.digilibScalerUrl |
---|
348 | if fn is None and docinfo is not None: |
---|
349 | fn = docinfo.get('imagePath','') |
---|
350 | |
---|
351 | url += "fn=%s"%fn |
---|
352 | |
---|
353 | if pn: |
---|
354 | url += "&pn=%s"%pn |
---|
355 | |
---|
356 | url += "&dw=%s&dh=%s"%(dw,dh) |
---|
357 | return sslifyUrl(url, self, force=True) |
---|
358 | |
---|
359 | def getDocumentViewerURL(self): |
---|
360 | """returns the URL of this instance""" |
---|
361 | return self.absolute_url() |
---|
362 | |
---|
363 | def getStyle(self, idx, selected, style=""): |
---|
364 | """returns a string with the given style and append 'sel' if idx == selected.""" |
---|
365 | #logger("documentViewer (getstyle)", logging.INFO, "idx: %s selected: %s style: %s"%(idx,selected,style)) |
---|
366 | if idx == selected: |
---|
367 | return style + 'sel' |
---|
368 | else: |
---|
369 | return style |
---|
370 | |
---|
371 | def getParams(self, param=None, val=None, params=None, duplicates=None): |
---|
372 | """returns dict with URL parameters. |
---|
373 | |
---|
374 | Takes URL parameters and additionally param=val or dict params. |
---|
375 | Deletes key if value is None.""" |
---|
376 | # copy existing request params |
---|
377 | newParams=self.REQUEST.form.copy() |
---|
378 | # change single param |
---|
379 | if param is not None: |
---|
380 | if val is None: |
---|
381 | if newParams.has_key(param): |
---|
382 | del newParams[param] |
---|
383 | else: |
---|
384 | newParams[param] = str(val) |
---|
385 | |
---|
386 | # change more params |
---|
387 | if params is not None: |
---|
388 | for (k, v) in params.items(): |
---|
389 | if v is None: |
---|
390 | # val=None removes param |
---|
391 | if newParams.has_key(k): |
---|
392 | del newParams[k] |
---|
393 | |
---|
394 | else: |
---|
395 | newParams[k] = v |
---|
396 | |
---|
397 | if duplicates: |
---|
398 | # eliminate lists (coming from duplicate keys) |
---|
399 | for (k,v) in newParams.items(): |
---|
400 | if isinstance(v, list): |
---|
401 | if duplicates == 'comma': |
---|
402 | # make comma-separated list of non-empty entries |
---|
403 | newParams[k] = ','.join([t for t in v if t]) |
---|
404 | elif duplicates == 'first': |
---|
405 | # take first non-empty entry |
---|
406 | newParams[k] = [t for t in v if t][0] |
---|
407 | |
---|
408 | return newParams |
---|
409 | |
---|
410 | def getLink(self, param=None, val=None, params=None, baseUrl=None, paramSep='&', duplicates='comma'): |
---|
411 | """returns URL to documentviewer with parameter param set to val or from dict params""" |
---|
412 | urlParams = self.getParams(param=param, val=val, params=params, duplicates=duplicates) |
---|
413 | # quote values and assemble into query string (not escaping '/') |
---|
414 | ps = paramSep.join(["%s=%s"%(k, urllib.quote_plus(utf8ify(v), '/')) for (k, v) in urlParams.items()]) |
---|
415 | if baseUrl is None: |
---|
416 | baseUrl = self.getDocumentViewerURL() |
---|
417 | |
---|
418 | url = "%s?%s"%(baseUrl, ps) |
---|
419 | return url |
---|
420 | |
---|
421 | def getLinkAmp(self, param=None, val=None, params=None, baseUrl=None, duplicates='comma'): |
---|
422 | """link to documentviewer with parameter param set to val""" |
---|
423 | return self.getLink(param=param, val=val, params=params, baseUrl=baseUrl, paramSep='&', duplicates=duplicates) |
---|
424 | |
---|
425 | |
---|
426 | def setAvailableLayers(self, newLayerString=None): |
---|
427 | """sets availableLayers to newLayerString or tries to autodetect available layers. |
---|
428 | assumes layer templates have the form layer_{m}_{l} for layer l in mode m. |
---|
429 | newLayerString is parsed as JSON.""" |
---|
430 | if newLayerString is not None: |
---|
431 | try: |
---|
432 | layers = json.loads(newLayerString) |
---|
433 | if 'text' in layers and 'image' in layers: |
---|
434 | self.availableLayers = layers |
---|
435 | return |
---|
436 | except: |
---|
437 | pass |
---|
438 | |
---|
439 | logging.error("invalid layers=%s! autodetecting..."%repr(newLayerString)) |
---|
440 | |
---|
441 | # start with builtin layers |
---|
442 | self.availableLayers = self.builtinLayers.copy() |
---|
443 | # add layers from templates |
---|
444 | for t in self.template: |
---|
445 | if t.startswith('layer_'): |
---|
446 | try: |
---|
447 | (x, m, l) = t.split('_', 3) |
---|
448 | if m not in self.availableLayers: |
---|
449 | # mode m doesn't exist -> new list |
---|
450 | self.availableLayers[m] = [l] |
---|
451 | |
---|
452 | else: |
---|
453 | # m exists -> append |
---|
454 | if l not in self.availableLayers[m]: |
---|
455 | self.availableLayers[m].append() |
---|
456 | |
---|
457 | except: |
---|
458 | pass |
---|
459 | |
---|
460 | def getAvailableLayersJson(self): |
---|
461 | """returns available layers as JSON string.""" |
---|
462 | return json.dumps(self.availableLayers) |
---|
463 | |
---|
464 | |
---|
465 | def getInfo_xml(self,url,mode): |
---|
466 | """returns info about the document as XML""" |
---|
467 | if not self.digilibBaseUrl: |
---|
468 | self.digilibBaseUrl = self.findDigilibUrl() or "http://digilib.mpiwg-berlin.mpg.de/digitallibrary" |
---|
469 | |
---|
470 | docinfo = self.getDocinfo(mode=mode,url=url) |
---|
471 | pt = getattr(self.template, 'info_xml') |
---|
472 | return pt(docinfo=docinfo) |
---|
473 | |
---|
474 | def getAuthenticatedUser(self, anon=None): |
---|
475 | """returns the authenticated user object or None. (ignores Zopes anonymous user)""" |
---|
476 | user = getSecurityManager().getUser() |
---|
477 | if user is not None and user.getUserName() != "Anonymous User": |
---|
478 | return user |
---|
479 | else: |
---|
480 | return anon |
---|
481 | |
---|
482 | def isAccessible(self, docinfo): |
---|
483 | """returns if access to the resource is granted""" |
---|
484 | access = docinfo.get('accessType', None) |
---|
485 | logging.debug("documentViewer (accessOK) access type %s"%access) |
---|
486 | if access == 'free': |
---|
487 | logging.debug("documentViewer (accessOK) access is free") |
---|
488 | return True |
---|
489 | |
---|
490 | elif access is None or access in self.authgroups: |
---|
491 | # only local access -- only logged in users |
---|
492 | user = self.getAuthenticatedUser() |
---|
493 | logging.debug("documentViewer (accessOK) user=%s ip=%s"%(user,self.REQUEST.getClientAddr())) |
---|
494 | return (user is not None) |
---|
495 | |
---|
496 | logging.error("documentViewer (accessOK) unknown access type %s"%access) |
---|
497 | return False |
---|
498 | |
---|
499 | def getUserinfo(self): |
---|
500 | """returns userinfo object""" |
---|
501 | logging.debug("getUserinfo") |
---|
502 | userinfo = {} |
---|
503 | # look for cached userinfo in session |
---|
504 | if self.REQUEST.SESSION.has_key('userinfo'): |
---|
505 | userinfo = self.REQUEST.SESSION['userinfo'] |
---|
506 | # check if its still current? |
---|
507 | else: |
---|
508 | # store in session |
---|
509 | self.REQUEST.SESSION['userinfo'] = userinfo |
---|
510 | |
---|
511 | return userinfo |
---|
512 | |
---|
513 | def getDocinfoJSON(self, mode, url, tocMode=None): |
---|
514 | """returns docinfo depending on mode""" |
---|
515 | import json |
---|
516 | |
---|
517 | dc = self.getDocinfo( mode, url, tocMode) |
---|
518 | |
---|
519 | return json.dumps(dc) |
---|
520 | |
---|
521 | |
---|
522 | def getDocinfo(self, mode, url, tocMode=None): |
---|
523 | """returns docinfo depending on mode""" |
---|
524 | logging.debug("getDocinfo: mode=%s, url=%s"%(mode,url)) |
---|
525 | # look for cached docinfo in session |
---|
526 | if self.REQUEST.SESSION.has_key('docinfo'): |
---|
527 | docinfo = self.REQUEST.SESSION['docinfo'] |
---|
528 | # check if its still current |
---|
529 | if docinfo is not None and docinfo.get('mode', None) == mode and docinfo.get('url', None) == url: |
---|
530 | logging.debug("getDocinfo: docinfo in session. keys=%s"%docinfo.keys()) |
---|
531 | return docinfo |
---|
532 | |
---|
533 | # new docinfo |
---|
534 | docinfo = {'mode': mode, 'url': url} |
---|
535 | # add self url |
---|
536 | docinfo['viewerUrl'] = self.getDocumentViewerURL() |
---|
537 | docinfo['digilibBaseUrl'] = sslifyUrl(self.digilibBaseUrl, self, force=True) |
---|
538 | docinfo['digilibScalerUrl'] = sslifyUrl(self.digilibScalerUrl, self, force=True) |
---|
539 | docinfo['digilibViewerUrl'] = sslifyUrl(self.digilibViewerUrl, self, force=True) |
---|
540 | # get index.meta DOM |
---|
541 | docUrl = None |
---|
542 | metaDom = None |
---|
543 | if mode=="texttool": |
---|
544 | # url points to document dir or index.meta |
---|
545 | metaDom = self.metadataService.getDomFromPathOrUrl(url) |
---|
546 | removeINDEXMETA_NS(metaDom) |
---|
547 | |
---|
548 | if metaDom is None: |
---|
549 | raise IOError("Unable to find index.meta for mode=texttool!") |
---|
550 | |
---|
551 | docUrl = url.replace('/index.meta', '') |
---|
552 | if url.startswith('/mpiwg/online/'): |
---|
553 | docUrl = url.replace('/mpiwg/online/', '', 1) |
---|
554 | |
---|
555 | elif mode=="textpath": |
---|
556 | #url points to an textfile |
---|
557 | #index.meta optional |
---|
558 | #assume index.meta in parent dir |
---|
559 | docUrl = getParentPath(url) |
---|
560 | docinfo['viewmode'] = "text" |
---|
561 | |
---|
562 | try: |
---|
563 | metaDom = self.metadataService.getDomFromPathOrUrl(docUrl) |
---|
564 | removeINDEXMETA_NS(metaDom) |
---|
565 | |
---|
566 | except: |
---|
567 | metaDom = None |
---|
568 | |
---|
569 | #metaDom = self.metadataService.getDomFromPathOrUrl(docUrl) |
---|
570 | #docinfo['imagePath'] = url.replace('/mpiwg/online', '', 1) |
---|
571 | docinfo['textURLPath'] = url.replace('/mpiwg/online', '', 1) |
---|
572 | docinfo['textURL'] = url |
---|
573 | if docinfo.get("creator", None) is None: |
---|
574 | docinfo['creator'] = "" |
---|
575 | |
---|
576 | if docinfo.get("title", None) is None: |
---|
577 | docinfo['title'] = "" |
---|
578 | |
---|
579 | if docinfo.get("documentPath", None) is None: |
---|
580 | docinfo['documentPath'] = url.replace('/mpiwg/online', '', 1) |
---|
581 | docinfo['documentPath'] = url.replace('/pages', '', 1) |
---|
582 | |
---|
583 | docinfo['numPages'] = 1 |
---|
584 | |
---|
585 | elif mode=="imagepath": |
---|
586 | # url points to folder with images, index.meta optional |
---|
587 | # asssume index.meta in parent dir |
---|
588 | docUrl = getParentPath(url) |
---|
589 | metaDom = self.metadataService.getDomFromPathOrUrl(docUrl) |
---|
590 | docinfo['imagePath'] = url.replace('/mpiwg/online', '', 1) |
---|
591 | |
---|
592 | elif mode=="hocr": |
---|
593 | # url points to folder with images, index.meta optional |
---|
594 | # asssume index.meta in parent dir |
---|
595 | docUrl = getParentPath(url) |
---|
596 | metaDom = self.metadataService.getDomFromPathOrUrl(docUrl) |
---|
597 | docinfo['imagePath'] = url.replace('/mpiwg/online', '', 1) |
---|
598 | docinfo['textURLPath'] = url.replace('/mpiwg/online', '', 1) |
---|
599 | if docinfo.get("creator", None) is None: |
---|
600 | docinfo['creator'] = "" |
---|
601 | |
---|
602 | if docinfo.get("title", None) is None: |
---|
603 | docinfo['title'] = "" |
---|
604 | |
---|
605 | if docinfo.get("documentPath", None) is None: |
---|
606 | docinfo['documentPath'] = url.replace('/mpiwg/online', '', 1) |
---|
607 | docinfo['documentPath'] = url.replace('/pages', '', 1) |
---|
608 | |
---|
609 | elif mode=="filepath": |
---|
610 | # url points to image file, index.meta optional |
---|
611 | docinfo['imageURL'] = "%s?fn=%s"%(self.digilibScalerUrl, url) |
---|
612 | docinfo['numPages'] = 1 |
---|
613 | # asssume index.meta is two path segments up |
---|
614 | docUrl = getParentPath(url, 2) |
---|
615 | metaDom = self.metadataService.getDomFromPathOrUrl(docUrl) |
---|
616 | |
---|
617 | else: |
---|
618 | logging.error("documentViewer (getdocinfo) unknown mode: %s!"%mode) |
---|
619 | raise ValueError("Unknown mode %s! Has to be one of 'texttool','imagepath','filepath'."%(mode)) |
---|
620 | |
---|
621 | docinfo['documentUrl'] = docUrl |
---|
622 | # process index.meta contents |
---|
623 | |
---|
624 | if metaDom is not None and (metaDom.tag == 'resource' or metaDom.tag == "{%s}resource"%INDEXMETA_NS): |
---|
625 | # document directory name and path |
---|
626 | resource = self.metadataService.getResourceData(dom=metaDom, recursive=1) |
---|
627 | if resource: |
---|
628 | docinfo = self.getDocinfoFromResource(docinfo, resource) |
---|
629 | |
---|
630 | # texttool info |
---|
631 | texttool = self.metadataService.getTexttoolData(dom=metaDom, recursive=1, all=True) |
---|
632 | if texttool: |
---|
633 | docinfo = self.getDocinfoFromTexttool(docinfo, texttool) |
---|
634 | # document info from full text server |
---|
635 | if docinfo.get('textURLPath', None): |
---|
636 | docinfo = self.getTextInfo(mode=None, docinfo=docinfo) |
---|
637 | # include list of pages TODO: do we need this always? |
---|
638 | docinfo = self.getTextInfo(mode='pages', docinfo=docinfo) |
---|
639 | |
---|
640 | # bib info |
---|
641 | bib = self.metadataService.getBibData(dom=metaDom) |
---|
642 | if bib: |
---|
643 | # save extended version as 'bibx' TODO: ugly |
---|
644 | bibx = self.metadataService.getBibData(dom=metaDom, all=True, recursive=1) |
---|
645 | if len(bibx) == 1: |
---|
646 | # unwrap list if possible |
---|
647 | bibx = bibx[0] |
---|
648 | |
---|
649 | docinfo['bibx'] = bibx |
---|
650 | docinfo = self.getDocinfoFromBib(docinfo, bib, bibx) |
---|
651 | else: |
---|
652 | # no bib - try info.xml |
---|
653 | docinfo = self.getDocinfoFromPresentationInfoXml(docinfo) |
---|
654 | |
---|
655 | # auth info |
---|
656 | access = self.metadataService.getAccessData(dom=metaDom) |
---|
657 | if access: |
---|
658 | docinfo = self.getDocinfoFromAccess(docinfo, access) |
---|
659 | |
---|
660 | # attribution info |
---|
661 | attribution = self.metadataService.getAttributionData(dom=metaDom) |
---|
662 | if attribution: |
---|
663 | logging.debug("getDocinfo: attribution=%s"%repr(attribution)) |
---|
664 | docinfo['attribution'] = attribution |
---|
665 | |
---|
666 | # copyright info |
---|
667 | copyright = self.metadataService.getCopyrightData(dom=metaDom) |
---|
668 | if copyright: |
---|
669 | logging.debug("getDocinfo: copyright=%s"%repr(copyright)) |
---|
670 | docinfo['copyright'] = copyright |
---|
671 | |
---|
672 | # DRI (permanent ID) |
---|
673 | dri = self.metadataService.getDRI(dom=metaDom, type='mpiwg') |
---|
674 | if dri: |
---|
675 | docinfo['DRI'] = dri |
---|
676 | |
---|
677 | # (presentation) context |
---|
678 | ctx = self.metadataService.getContextData(dom=metaDom, all=True) |
---|
679 | if ctx: |
---|
680 | logging.debug("getcontext: ctx=%s"%repr(ctx)) |
---|
681 | docinfo['presentationContext'] = ctx |
---|
682 | |
---|
683 | # image path |
---|
684 | if mode != 'texttool': |
---|
685 | # override image path from texttool with url parameter TODO: how about mode=auto? |
---|
686 | docinfo['imagePath'] = url.replace('/mpiwg/online', '', 1) |
---|
687 | |
---|
688 | # check numPages |
---|
689 | if docinfo.get('numPages', 0) == 0: |
---|
690 | # number of images from digilib |
---|
691 | if docinfo.get('imagePath', None): |
---|
692 | imgpath = docinfo['imagePath'].replace('/mpiwg/online', '', 1) |
---|
693 | logging.debug("imgpath=%s"%imgpath) |
---|
694 | docinfo['imageURL'] = sslifyUrl("%s?fn=%s"%(self.digilibScalerUrl, imgpath), self, force=True) |
---|
695 | docinfo = self.getDocinfoFromDigilib(docinfo, imgpath) |
---|
696 | else: |
---|
697 | # imagePath still missing? try "./pageimg" |
---|
698 | imgPath = os.path.join(docUrl, 'pageimg') |
---|
699 | docinfo = self.getDocinfoFromDigilib(docinfo, imgPath) |
---|
700 | if docinfo.get('numPages', 0) > 0: |
---|
701 | # there are pages |
---|
702 | docinfo['imagePath'] = imgPath |
---|
703 | docinfo['imageURL'] = sslifyUrl("%s?fn=%s"%(self.digilibScalerUrl, docinfo['imagePath']), self, force=True) |
---|
704 | |
---|
705 | # check numPages |
---|
706 | if docinfo.get('numPages', 0) == 0: |
---|
707 | if docinfo.get('numTextPages', 0) > 0: |
---|
708 | # replace with numTextPages (text-only?) |
---|
709 | docinfo['numPages'] = docinfo['numTextPages'] |
---|
710 | |
---|
711 | # min and max page no |
---|
712 | docinfo['minPageNo'] = docinfo.get('minPageNo', 1) |
---|
713 | docinfo['maxPageNo'] = docinfo.get('maxPageNo', docinfo['numPages']) |
---|
714 | |
---|
715 | # part-of information |
---|
716 | partOfPath = docinfo.get('partOfPath', None) |
---|
717 | if partOfPath is not None: |
---|
718 | partOfDom = self.metadataService.getDomFromPathOrUrl(partOfPath) |
---|
719 | if partOfDom is not None: |
---|
720 | docinfo['partOfLabel'] = self.metadataService.getBibFormattedLabel(dom=partOfDom) |
---|
721 | docinfo['partOfUrl'] = "%s?url=%s"%(self.getDocumentViewerURL(), partOfPath) |
---|
722 | logging.debug("partOfLabel=%s partOfUrl=%s"%(docinfo['partOfLabel'],docinfo['partOfUrl'])) |
---|
723 | |
---|
724 | # normalize path |
---|
725 | if 'imagePath' in docinfo and not docinfo['imagePath'].startswith('/'): |
---|
726 | docinfo['imagePath'] = '/' + docinfo['imagePath'] |
---|
727 | |
---|
728 | logging.debug("documentViewer (getdocinfo) docinfo: keys=%s"%docinfo.keys()) |
---|
729 | # store in session |
---|
730 | self.REQUEST.SESSION['docinfo'] = docinfo |
---|
731 | return docinfo |
---|
732 | |
---|
733 | |
---|
734 | def getDocinfoFromResource(self, docinfo, resource): |
---|
735 | """reads contents of resource element into docinfo""" |
---|
736 | logging.debug("getDocinfoFromResource: resource=%s"%(repr(resource))) |
---|
737 | docName = getMDText(resource.get('name', None)) |
---|
738 | docinfo['documentName'] = docName |
---|
739 | docPath = getMDText(resource.get('archive-path', None)) |
---|
740 | if docPath: |
---|
741 | # clean up document path |
---|
742 | if docPath[0] != '/': |
---|
743 | docPath = '/' + docPath |
---|
744 | |
---|
745 | if docName and (not docPath.endswith(docName)): |
---|
746 | docPath += "/" + docName |
---|
747 | |
---|
748 | else: |
---|
749 | # use docUrl as docPath |
---|
750 | docUrl = docinfo['documentURL'] |
---|
751 | if not docUrl.startswith('http:'): |
---|
752 | docPath = docUrl |
---|
753 | |
---|
754 | if docPath: |
---|
755 | # fix URLs starting with /mpiwg/online |
---|
756 | docPath = docPath.replace('/mpiwg/online', '', 1) |
---|
757 | |
---|
758 | docinfo['documentPath'] = docPath |
---|
759 | |
---|
760 | # is this part-of? |
---|
761 | partOf = resource.get('is-part-of', None) |
---|
762 | if partOf is not None: |
---|
763 | partOf = getMDText(partOf.get('archive-path', None)) |
---|
764 | if partOf is not None: |
---|
765 | docinfo['partOfPath'] = partOf.strip() |
---|
766 | |
---|
767 | return docinfo |
---|
768 | |
---|
769 | def getDocinfoFromTexttool(self, docinfo, texttool): |
---|
770 | """reads contents of texttool element into docinfo""" |
---|
771 | logging.debug("texttool=%s"%repr(texttool)) |
---|
772 | # unpack list if necessary |
---|
773 | if isinstance(texttool, list): |
---|
774 | texttool = texttool[0] |
---|
775 | |
---|
776 | # image dir |
---|
777 | imageDir = getMDText(texttool.get('image', None)) |
---|
778 | docPath = getMDText(docinfo.get('documentPath', None)) |
---|
779 | if imageDir: |
---|
780 | if imageDir.startswith('/'): |
---|
781 | # absolute path |
---|
782 | imageDir = imageDir.replace('/mpiwg/online', '', 1) |
---|
783 | docinfo['imagePath'] = imageDir |
---|
784 | |
---|
785 | elif docPath: |
---|
786 | # relative path |
---|
787 | imageDir = os.path.join(docPath, imageDir) |
---|
788 | imageDir = imageDir.replace('/mpiwg/online', '', 1) |
---|
789 | docinfo['imagePath'] = imageDir |
---|
790 | |
---|
791 | # start and end page (for subdocuments of other documents) |
---|
792 | imgStartNo = getMDText(texttool.get('image-start-no', None)) |
---|
793 | minPageNo = getInt(imgStartNo, 1) |
---|
794 | docinfo['minPageNo'] = minPageNo |
---|
795 | |
---|
796 | imgEndNo = getMDText(texttool.get('image-end-no', None)) |
---|
797 | if imgEndNo: |
---|
798 | docinfo['maxPageNo'] = getInt(imgEndNo) |
---|
799 | |
---|
800 | # old style text URL |
---|
801 | textUrl = getMDText(texttool.get('text', None)) |
---|
802 | |
---|
803 | if textUrl and docPath: |
---|
804 | if urlparse.urlparse(textUrl)[0] == "": #keine url |
---|
805 | textUrl = os.path.join(docPath, textUrl) |
---|
806 | |
---|
807 | docinfo['textURL'] = textUrl |
---|
808 | |
---|
809 | # new style text-url-path (can be more than one with "repository" attribute) |
---|
810 | textUrlNode = texttool.get('text-url-path', None) |
---|
811 | if not isinstance(textUrlNode, list): |
---|
812 | textUrlNode = [textUrlNode] |
---|
813 | |
---|
814 | for tun in textUrlNode: |
---|
815 | textUrl = getMDText(tun) |
---|
816 | if textUrl: |
---|
817 | textUrlAtts = tun.get('@attr') |
---|
818 | if (textUrlAtts and 'repository' in textUrlAtts): |
---|
819 | textRepo = textUrlAtts['repository'] |
---|
820 | # use matching repository |
---|
821 | if self.getRepositoryType() == textRepo: |
---|
822 | docinfo['textURLPath'] = textUrl |
---|
823 | docinfo['textURLRepository'] = textRepo |
---|
824 | break |
---|
825 | |
---|
826 | else: |
---|
827 | # no repo attribute - use always |
---|
828 | docinfo['textURLPath'] = textUrl |
---|
829 | |
---|
830 | # page flow |
---|
831 | docinfo['pageFlow'] = getMDText(texttool.get('page-flow', 'ltr')) |
---|
832 | |
---|
833 | # odd pages are left |
---|
834 | docinfo['oddPage'] = getMDText(texttool.get('odd-scan-position', 'left')) |
---|
835 | |
---|
836 | # number of title page (default 1) |
---|
837 | docinfo['titlePage'] = getMDText(texttool.get('title-scan-no', minPageNo)) |
---|
838 | |
---|
839 | # old presentation stuff |
---|
840 | presentation = getMDText(texttool.get('presentation', None)) |
---|
841 | if presentation and docPath: |
---|
842 | if presentation.startswith('http:'): |
---|
843 | docinfo['presentationUrl'] = presentation |
---|
844 | else: |
---|
845 | docinfo['presentationUrl'] = os.path.join(docPath, presentation) |
---|
846 | |
---|
847 | # make sure we have at least fake DC data |
---|
848 | if 'creator' not in docinfo: |
---|
849 | docinfo['creator'] = '[no author found]' |
---|
850 | |
---|
851 | if 'title' not in docinfo: |
---|
852 | docinfo['title'] = '[no title found]' |
---|
853 | |
---|
854 | if 'date' not in docinfo: |
---|
855 | docinfo['date'] = '[no date found]' |
---|
856 | |
---|
857 | return docinfo |
---|
858 | |
---|
859 | def getDocinfoFromBib(self, docinfo, bib, bibx=None): |
---|
860 | """reads contents of bib element into docinfo""" |
---|
861 | logging.debug("getDocinfoFromBib bib=%s"%repr(bib)) |
---|
862 | # put all raw bib fields in dict "bib" |
---|
863 | docinfo['bib'] = bib |
---|
864 | bibtype = bib.get('@type', None) |
---|
865 | docinfo['bibType'] = bibtype |
---|
866 | # also store DC metadata for convenience |
---|
867 | dc = self.metadataService.getDCMappedData(bib) |
---|
868 | docinfo['creator'] = dc.get('creator','') |
---|
869 | docinfo['title'] = dc.get('title','') |
---|
870 | docinfo['date'] = dc.get('date','') |
---|
871 | return docinfo |
---|
872 | |
---|
873 | def getDocinfoFromAccess(self, docinfo, acc): |
---|
874 | """reads contents of access element into docinfo""" |
---|
875 | #TODO: also read resource type |
---|
876 | logging.debug("getDocinfoFromAccess acc=%s"%repr(acc)) |
---|
877 | try: |
---|
878 | acctype = acc['@attr']['type'] |
---|
879 | if acctype: |
---|
880 | access=acctype |
---|
881 | if access in ['group', 'institution']: |
---|
882 | access = acc['name'].lower() |
---|
883 | |
---|
884 | docinfo['accessType'] = access |
---|
885 | |
---|
886 | except: |
---|
887 | pass |
---|
888 | |
---|
889 | return docinfo |
---|
890 | |
---|
891 | def getDocinfoFromDigilib(self, docinfo, path): |
---|
892 | infoUrl=self.digilibBaseUrl+"/api/dirInfo-xml.jsp?fn="+path |
---|
893 | # fetch data |
---|
894 | txt = getHttpData(infoUrl) |
---|
895 | if not txt: |
---|
896 | logging.error("Unable to get dir-info from %s"%(infoUrl)) |
---|
897 | return docinfo |
---|
898 | |
---|
899 | dom = ET.fromstring(txt) |
---|
900 | dir = dom |
---|
901 | # save size |
---|
902 | size = dir.findtext('size') |
---|
903 | logging.debug("getDocinfoFromDigilib: size=%s"%size) |
---|
904 | if size: |
---|
905 | docinfo['numPages'] = int(size) |
---|
906 | else: |
---|
907 | docinfo['numPages'] = 0 |
---|
908 | return docinfo |
---|
909 | |
---|
910 | # save list of image names and numbers |
---|
911 | imgNames = {} |
---|
912 | imgIndexes = {} |
---|
913 | for f in dir: |
---|
914 | fn = f.findtext('name') |
---|
915 | pn = getInt(f.findtext('index')) |
---|
916 | imgNames[fn] = pn |
---|
917 | imgIndexes[pn] = fn |
---|
918 | |
---|
919 | docinfo['imgFileNames'] = imgNames |
---|
920 | docinfo['imgFileIndexes'] = imgIndexes |
---|
921 | return docinfo |
---|
922 | |
---|
923 | |
---|
924 | def getDocinfoFromPresentationInfoXml(self,docinfo): |
---|
925 | """gets DC-like bibliographical information from the presentation entry in texttools""" |
---|
926 | url = docinfo.get('presentationUrl', None) |
---|
927 | if not url: |
---|
928 | logging.error("getDocinfoFromPresentation: no URL!") |
---|
929 | return docinfo |
---|
930 | |
---|
931 | dom = None |
---|
932 | metaUrl = None |
---|
933 | if url.startswith("http://"): |
---|
934 | # real URL |
---|
935 | metaUrl = url |
---|
936 | else: |
---|
937 | # online path |
---|
938 | server=self.digilibBaseUrl+"/servlet/Texter?fn=" |
---|
939 | metaUrl=server+url |
---|
940 | |
---|
941 | txt=getHttpData(metaUrl) |
---|
942 | if txt is None: |
---|
943 | logging.error("Unable to read info.xml from %s"%(url)) |
---|
944 | return docinfo |
---|
945 | |
---|
946 | dom = ET.fromstring(txt) |
---|
947 | docinfo['creator']=getText(dom.find(".//author")) |
---|
948 | docinfo['title']=getText(dom.find(".//title")) |
---|
949 | docinfo['date']=getText(dom.find(".//date")) |
---|
950 | return docinfo |
---|
951 | |
---|
952 | |
---|
953 | def getPageinfo(self, pn=None, pf=None, start=None, rows=None, cols=None, docinfo=None, userinfo=None, viewMode=None, viewLayer=None, tocMode=None): |
---|
954 | """returns pageinfo with the given parameters""" |
---|
955 | logging.debug("getPageInfo(pn=%s, pf=%s, start=%s, rows=%s, cols=%s, viewMode=%s, viewLayer=%s, tocMode=%s)"%(pn,pf,start,rows,cols,viewMode,viewLayer,tocMode)) |
---|
956 | pageinfo = {} |
---|
957 | pageinfo['viewMode'] = viewMode |
---|
958 | # split viewLayer if necessary |
---|
959 | if isinstance(viewLayer,basestring): |
---|
960 | viewLayer = viewLayer.split(',') |
---|
961 | |
---|
962 | if isinstance(viewLayer, list): |
---|
963 | logging.debug("getPageinfo: viewLayer is list:%s"%viewLayer) |
---|
964 | # save (unique) list in viewLayers |
---|
965 | seen = set() |
---|
966 | viewLayers = [l for l in viewLayer if l and l not in seen and not seen.add(l)] |
---|
967 | pageinfo['viewLayers'] = viewLayers |
---|
968 | # stringify viewLayer |
---|
969 | viewLayer = ','.join(viewLayers) |
---|
970 | else: |
---|
971 | #create list |
---|
972 | pageinfo['viewLayers'] = [viewLayer] |
---|
973 | |
---|
974 | pageinfo['viewLayer'] = viewLayer |
---|
975 | pageinfo['tocMode'] = tocMode |
---|
976 | |
---|
977 | minPageNo = docinfo.get('minPageNo', 1) |
---|
978 | |
---|
979 | # pf takes precedence over pn |
---|
980 | if pf: |
---|
981 | pageinfo['pf'] = pf |
---|
982 | pn = getPnForPf(docinfo, pf) |
---|
983 | # replace pf in request params (used for creating new URLs) |
---|
984 | self.REQUEST.form.pop('pf', None) |
---|
985 | self.REQUEST.form['pn'] = pn |
---|
986 | else: |
---|
987 | pn = getInt(pn, minPageNo) |
---|
988 | pf = getPfForPn(docinfo, pn) |
---|
989 | pageinfo['pf'] = pf |
---|
990 | |
---|
991 | pageinfo['pn'] = pn |
---|
992 | rows = int(rows or self.thumbrows) |
---|
993 | pageinfo['rows'] = rows |
---|
994 | cols = int(cols or self.thumbcols) |
---|
995 | pageinfo['cols'] = cols |
---|
996 | grpsize = cols * rows |
---|
997 | pageinfo['groupsize'] = grpsize |
---|
998 | # if start is empty use one around pn |
---|
999 | grouppn = math.ceil(float(pn)/float(grpsize))*grpsize-(grpsize-1) |
---|
1000 | # but not smaller than minPageNo |
---|
1001 | start = getInt(start, max(grouppn, minPageNo)) |
---|
1002 | pageinfo['start'] = start |
---|
1003 | # get number of pages |
---|
1004 | numPages = int(docinfo.get('numPages', 0)) |
---|
1005 | if numPages == 0: |
---|
1006 | # try numTextPages |
---|
1007 | numPages = docinfo.get('numTextPages', 0) |
---|
1008 | if numPages != 0: |
---|
1009 | docinfo['numPages'] = numPages |
---|
1010 | |
---|
1011 | maxPageNo = docinfo.get('maxPageNo', numPages) |
---|
1012 | logging.debug("minPageNo=%s maxPageNo=%s start=%s numPages=%s"%(minPageNo,maxPageNo,start,numPages)) |
---|
1013 | np = maxPageNo |
---|
1014 | |
---|
1015 | # cache table of contents |
---|
1016 | pageinfo['tocPageSize'] = getInt(self.REQUEST.get('tocPageSize', 30)) |
---|
1017 | pageinfo['numgroups'] = int(np / grpsize) |
---|
1018 | if np % grpsize > 0: |
---|
1019 | pageinfo['numgroups'] += 1 |
---|
1020 | |
---|
1021 | pageFlowLtr = docinfo.get('pageFlow', 'ltr') != 'rtl' |
---|
1022 | oddScanLeft = docinfo.get('oddPage', 'left') != 'right' |
---|
1023 | # add zeroth page for two columns |
---|
1024 | pageZero = (cols == 2 and (pageFlowLtr != oddScanLeft)) |
---|
1025 | pageinfo['pageZero'] = pageZero |
---|
1026 | pageinfo['pageBatch'] = self.getPageBatch(start=start, rows=rows, cols=cols, pageFlowLtr=pageFlowLtr, pageZero=pageZero, minIdx=minPageNo, maxIdx=np) |
---|
1027 | # more page parameters |
---|
1028 | #pageinfo['characterNormalization'] = self.REQUEST.get('characterNormalization','reg') |
---|
1029 | #becuase it is buggy this currently disabled and set to orig. |
---|
1030 | pageinfo['characterNormalization'] = 'orig' |
---|
1031 | if docinfo.get('pageNumbers'): |
---|
1032 | # get original page numbers |
---|
1033 | pageNumber = docinfo['pageNumbers'].get(pn, None) |
---|
1034 | if pageNumber is not None: |
---|
1035 | pageinfo['pageNumberOrig'] = pageNumber['no'] |
---|
1036 | pageinfo['pageNumberOrigNorm'] = pageNumber['non'] |
---|
1037 | |
---|
1038 | # cache search results |
---|
1039 | query = self.REQUEST.get('query',None) |
---|
1040 | pageinfo['query'] = query |
---|
1041 | if query and viewMode == 'text': |
---|
1042 | pageinfo['resultPageSize'] = getInt(self.REQUEST.get('resultPageSize', 10)) |
---|
1043 | queryType = self.REQUEST.get('queryType', 'fulltextMorph') |
---|
1044 | pageinfo['queryType'] = queryType |
---|
1045 | pageinfo['resultStart'] = getInt(self.REQUEST.get('resultStart', '1')) |
---|
1046 | self.getSearchResults(mode=queryType, query=query, pageinfo=pageinfo, docinfo=docinfo) |
---|
1047 | |
---|
1048 | # highlighting |
---|
1049 | highlightQuery = self.REQUEST.get('highlightQuery', None) |
---|
1050 | if highlightQuery: |
---|
1051 | pageinfo['highlightQuery'] = highlightQuery |
---|
1052 | pageinfo['highlightElement'] = self.REQUEST.get('highlightElement', '') |
---|
1053 | pageinfo['highlightElementPos'] = self.REQUEST.get('highlightElementPos', '') |
---|
1054 | |
---|
1055 | return pageinfo |
---|
1056 | |
---|
1057 | |
---|
1058 | def getPageBatch(self, start=1, rows=10, cols=2, pageFlowLtr=True, pageZero=False, minIdx=1, maxIdx=0): |
---|
1059 | """Return dict with array of page information for one screenfull of thumbnails. |
---|
1060 | |
---|
1061 | :param start: index of current page |
---|
1062 | :param rows: number of rows in one batch |
---|
1063 | :param cols: number of columns in one batch |
---|
1064 | :param pageFlowLtr: do indexes increase from left to right |
---|
1065 | :param pageZero: is there a zeroth non-visible page |
---|
1066 | :param minIdx: minimum index to use |
---|
1067 | :param maxIdx: maximum index to use |
---|
1068 | :returns: dict with |
---|
1069 | first: first page index |
---|
1070 | last: last page index |
---|
1071 | batches: list of all possible batches(dict: 'start': index, 'end': index) |
---|
1072 | pages: list for current batch of rows(list of cols(list of pages(dict: 'idx': index))) |
---|
1073 | nextStart: first index of next batch |
---|
1074 | prevStart: first index of previous batch |
---|
1075 | """ |
---|
1076 | logging.debug("getPageBatch start=%s minIdx=%s maxIdx=%s"%(start,minIdx,maxIdx)) |
---|
1077 | batch = {} |
---|
1078 | grpsize = rows * cols |
---|
1079 | if maxIdx == 0: |
---|
1080 | maxIdx = start + grpsize |
---|
1081 | |
---|
1082 | np = maxIdx - minIdx + 1 |
---|
1083 | if pageZero: |
---|
1084 | # correct number of pages for batching |
---|
1085 | np += 1 |
---|
1086 | |
---|
1087 | nb = int(math.ceil(np / float(grpsize))) |
---|
1088 | |
---|
1089 | # list of all batch start and end points |
---|
1090 | batches = [] |
---|
1091 | if pageZero: |
---|
1092 | ofs = minIdx - 1 |
---|
1093 | else: |
---|
1094 | ofs = minIdx |
---|
1095 | |
---|
1096 | for i in range(nb): |
---|
1097 | s = i * grpsize + ofs |
---|
1098 | e = min((i + 1) * grpsize + ofs - 1, maxIdx) |
---|
1099 | batches.append({'start':s, 'end':e}) |
---|
1100 | |
---|
1101 | batch['batches'] = batches |
---|
1102 | |
---|
1103 | # list of pages for current screen |
---|
1104 | pages = [] |
---|
1105 | if pageZero and start == minIdx: |
---|
1106 | # correct beginning |
---|
1107 | idx = minIdx - 1 |
---|
1108 | else: |
---|
1109 | idx = start |
---|
1110 | |
---|
1111 | for r in range(rows): |
---|
1112 | row = [] |
---|
1113 | for c in range(cols): |
---|
1114 | if idx < minIdx or idx > maxIdx: |
---|
1115 | page = {'idx':None} |
---|
1116 | else: |
---|
1117 | page = {'idx':idx} |
---|
1118 | |
---|
1119 | idx += 1 |
---|
1120 | if pageFlowLtr: |
---|
1121 | row.append(page) |
---|
1122 | else: |
---|
1123 | row.insert(0, page) |
---|
1124 | |
---|
1125 | pages.append(row) |
---|
1126 | |
---|
1127 | if start > minIdx: |
---|
1128 | batch['prevStart'] = max(start - grpsize, minIdx) |
---|
1129 | else: |
---|
1130 | batch['prevStart'] = None |
---|
1131 | |
---|
1132 | if start + grpsize <= maxIdx: |
---|
1133 | if pageZero and start == minIdx: |
---|
1134 | # correct nextStart for pageZero |
---|
1135 | batch['nextStart'] = grpsize |
---|
1136 | else: |
---|
1137 | batch['nextStart'] = start + grpsize |
---|
1138 | else: |
---|
1139 | batch['nextStart'] = None |
---|
1140 | |
---|
1141 | batch['pages'] = pages |
---|
1142 | batch['first'] = minIdx |
---|
1143 | batch['last'] = maxIdx |
---|
1144 | logging.debug("batch: %s"%repr(batch)) |
---|
1145 | return batch |
---|
1146 | |
---|
1147 | |
---|
1148 | def getBatch(self, start=1, size=10, end=0, data=None, fullData=True): |
---|
1149 | """returns dict with information for one screenfull of data.""" |
---|
1150 | batch = {} |
---|
1151 | if end == 0: |
---|
1152 | end = start + size |
---|
1153 | |
---|
1154 | nb = int(math.ceil(end / float(size))) |
---|
1155 | # list of all batch start and end points |
---|
1156 | batches = [] |
---|
1157 | for i in range(nb): |
---|
1158 | s = i * size + 1 |
---|
1159 | e = min((i + 1) * size, end) |
---|
1160 | batches.append({'start':s, 'end':e}) |
---|
1161 | |
---|
1162 | batch['batches'] = batches |
---|
1163 | # list of elements in this batch |
---|
1164 | this = [] |
---|
1165 | j = 0 |
---|
1166 | for i in range(start, min(start+size, end+1)): |
---|
1167 | if data: |
---|
1168 | if fullData: |
---|
1169 | d = data.get(i, None) |
---|
1170 | else: |
---|
1171 | d = data.get(j, None) |
---|
1172 | j += 1 |
---|
1173 | |
---|
1174 | else: |
---|
1175 | d = i+1 |
---|
1176 | |
---|
1177 | this.append(d) |
---|
1178 | |
---|
1179 | batch['this'] = this |
---|
1180 | if start > 1: |
---|
1181 | batch['prevStart'] = max(start - size, 1) |
---|
1182 | else: |
---|
1183 | batch['prevStart'] = None |
---|
1184 | |
---|
1185 | if start + size < end: |
---|
1186 | batch['nextStart'] = start + size |
---|
1187 | else: |
---|
1188 | batch['nextStart'] = None |
---|
1189 | |
---|
1190 | batch['first'] = start |
---|
1191 | batch['last'] = end |
---|
1192 | return batch |
---|
1193 | |
---|
1194 | |
---|
1195 | def getAnnotatorGroupsForUser(self, user, annotationServerUrl="http://tuxserve03.mpiwg-berlin.mpg.de/AnnotationManager"): |
---|
1196 | """returns list of groups {name:*, id:*} on the annotation server for the user""" |
---|
1197 | groups = [] |
---|
1198 | # add matching http(s) from our URL |
---|
1199 | annotationServerUrl = sslifyUrl(annotationServerUrl, self) |
---|
1200 | |
---|
1201 | groupsUrl = "%s/annotator/groups?user=%s"%(annotationServerUrl,user) |
---|
1202 | data = getHttpData(url=groupsUrl, noExceptions=True) |
---|
1203 | if data: |
---|
1204 | res = json.loads(data) |
---|
1205 | rows = res.get('rows', None) |
---|
1206 | if rows is None: |
---|
1207 | return groups |
---|
1208 | for r in rows: |
---|
1209 | groups.append({'id': r.get('id', None), 'name': r.get('name', None), 'uri': r.get('uri', None)}) |
---|
1210 | |
---|
1211 | return groups |
---|
1212 | |
---|
1213 | def sslifyUrl(self, url, **args): |
---|
1214 | """returns URL with http or https""" |
---|
1215 | return sslifyUrl(url, **args) |
---|
1216 | |
---|
1217 | security.declareProtected('View management screens','changeDocumentViewerForm') |
---|
1218 | changeDocumentViewerForm = PageTemplateFile('zpt/changeDocumentViewer', globals()) |
---|
1219 | |
---|
1220 | def changeDocumentViewer(self,title="",digilibBaseUrl=None,thumbrows=2,thumbcols=5,authgroups='mpiwg',availableLayers=None,RESPONSE=None): |
---|
1221 | """init document viewer""" |
---|
1222 | self.title=title |
---|
1223 | self.digilibBaseUrl = digilibBaseUrl |
---|
1224 | self.digilibScalerUrl = digilibBaseUrl + '/servlet/Scaler' |
---|
1225 | self.digilibViewerUrl = digilibBaseUrl + '/jquery/digilib.html' |
---|
1226 | self.thumbrows = thumbrows |
---|
1227 | self.thumbcols = thumbcols |
---|
1228 | self.authgroups = [s.strip().lower() for s in authgroups.split(',')] |
---|
1229 | try: |
---|
1230 | # assume MetaDataFolder instance is called metadata |
---|
1231 | self.metadataService = getattr(self, 'metadata') |
---|
1232 | except Exception, e: |
---|
1233 | logging.error("Unable to find MetaDataFolder 'metadata': "+str(e)) |
---|
1234 | |
---|
1235 | self.setAvailableLayers(availableLayers) |
---|
1236 | |
---|
1237 | if RESPONSE is not None: |
---|
1238 | RESPONSE.redirect('manage_main') |
---|
1239 | |
---|
1240 | |
---|
1241 | def manage_AddDocumentViewerForm(self): |
---|
1242 | """add the viewer form""" |
---|
1243 | pt=PageTemplateFile('zpt/addDocumentViewer', globals()).__of__(self) |
---|
1244 | return pt() |
---|
1245 | |
---|
1246 | def manage_AddDocumentViewer(self,id,imageScalerUrl="",textServerName="",title="",RESPONSE=None): |
---|
1247 | """add the viewer""" |
---|
1248 | newObj=documentViewer(id,imageScalerUrl=imageScalerUrl,title=title,textServerName=textServerName) |
---|
1249 | self._setObject(id,newObj) |
---|
1250 | |
---|
1251 | if RESPONSE is not None: |
---|
1252 | RESPONSE.redirect('manage_main') |
---|