564
|
1 from OFS.SimpleItem import SimpleItem
|
|
2 from Products.PageTemplates.PageTemplateFile import PageTemplateFile
|
|
3
|
|
4 import xml.etree.ElementTree as ET
|
|
5
|
|
6 import re
|
|
7 import logging
|
|
8 import urllib
|
|
9 import urlparse
|
|
10 import base64
|
|
11
|
|
12 from SrvTxtUtils import getInt, getText, getHttpData
|
|
13
|
|
14 def serialize(node):
|
|
15 """returns a string containing an XML snippet of node"""
|
|
16 s = ET.tostring(node, 'UTF-8')
|
|
17 # snip off XML declaration
|
|
18 if s.startswith('<?xml'):
|
|
19 i = s.find('?>')
|
|
20 return s[i+3:]
|
|
21
|
|
22 return s
|
|
23
|
|
24
|
|
25 class MpiwgXmlTextServer(SimpleItem):
|
|
26 """TextServer implementation for MPIWG-XML server"""
|
|
27 meta_type="MPIWG-XML TextServer"
|
|
28
|
|
29 manage_options=(
|
|
30 {'label':'Config','action':'manage_changeMpiwgXmlTextServerForm'},
|
|
31 )+SimpleItem.manage_options
|
|
32
|
|
33 manage_changeMpiwgXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpiwgXmlTextServer", globals())
|
|
34
|
|
35 def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpiwg-mpdl-cms-web/", timeout=40, serverName=None, repositoryType='production'):
|
|
36 """constructor"""
|
|
37 self.id=id
|
|
38 self.title=title
|
|
39 self.timeout = timeout
|
|
40 self.repositoryType = repositoryType
|
|
41 if serverName is None:
|
|
42 self.serverUrl = serverUrl
|
|
43 else:
|
|
44 self.serverUrl = "http://%s/mpiwg-mpdl-cms-web/"%serverName
|
|
45
|
|
46 def getHttpData(self, url, data=None):
|
|
47 """returns result from url+data HTTP request"""
|
|
48 return getHttpData(url,data,timeout=self.timeout)
|
|
49
|
|
50 def getServerData(self, method, data=None):
|
|
51 """returns result from text server for method+data"""
|
|
52 url = self.serverUrl+method
|
|
53 return getHttpData(url,data,timeout=self.timeout)
|
|
54
|
|
55
|
|
56 def getRepositoryType(self):
|
|
57 """returns the repository type, e.g. 'production'"""
|
|
58 return self.repositoryType
|
|
59
|
|
60 def getTextDownloadUrl(self, type='xml', docinfo=None):
|
|
61 """returns a URL to download the current text"""
|
|
62 docpath = docinfo.get('textURLPath', None)
|
|
63 if not docpath:
|
|
64 return None
|
|
65
|
|
66 docpath = docpath.replace('.xml','.'+type)
|
|
67 url = '%sdoc/GetDocument?id=%s'%(self.serverUrl.replace('interface/',''), docpath)
|
|
68 return url
|
|
69
|
|
70
|
|
71 def getPlacesOnPage(self, docinfo=None, pn=None):
|
|
72 """Returns list of GIS places of page pn"""
|
|
73 #FIXME!
|
|
74 docpath = docinfo.get('textURLPath',None)
|
|
75 if not docpath:
|
|
76 return None
|
|
77
|
|
78 places=[]
|
|
79 text=self.getServerData("xpath.xql", "document=%s&xpath=//place&pn=%s"%(docpath,pn))
|
|
80 dom = ET.fromstring(text)
|
|
81 result = dom.findall(".//resultPage/place")
|
|
82 for l in result:
|
|
83 id = l.get("id")
|
|
84 name = l.text
|
|
85 place = {'id': id, 'name': name}
|
|
86 places.append(place)
|
|
87
|
|
88 return places
|
|
89
|
|
90
|
565
|
91 def getTextInfo(self, mode=None, docinfo=None):
|
564
|
92 """reads document info, including page concordance, from text server"""
|
|
93 logging.debug("getTextInfo mode=%s"%mode)
|
565
|
94
|
|
95 field = ''
|
|
96 if mode in ['pages', 'toc', 'figures']:
|
|
97 # translate mode to field param
|
|
98 field = '&field=%s'%mode
|
|
99 else:
|
|
100 mode = None
|
|
101
|
564
|
102 # check cached info
|
|
103 if mode:
|
|
104 # cached toc-request?
|
|
105 if 'full_%s'%mode in docinfo:
|
|
106 return docinfo
|
|
107
|
|
108 else:
|
565
|
109 # cached but no toc-request?
|
564
|
110 if 'numTextPages' in docinfo:
|
|
111 return docinfo
|
|
112
|
|
113 docpath = docinfo.get('textURLPath', None)
|
|
114 if docpath is None:
|
|
115 logging.error("getTextInfo: no textURLPath!")
|
|
116 return docinfo
|
|
117
|
|
118 # fetch docinfo
|
565
|
119 pagexml = self.getServerData("query/GetDocInfo","docId=%s%s"%(docpath,field))
|
564
|
120 dom = ET.fromstring(pagexml)
|
565
|
121 # all info in tag <doc>
|
|
122 doc = dom
|
564
|
123 if doc is None:
|
|
124 logging.error("getTextInfo: unable to find document-tag!")
|
|
125 else:
|
565
|
126 if mode is None:
|
|
127 # get general info from system-tag
|
|
128 cp = doc.find('system/countPages')
|
|
129 if cp is not None:
|
|
130 docinfo['numTextPages'] = getInt(cp.text)
|
|
131
|
|
132 else:
|
|
133 # result is in list-tag
|
|
134 l = doc.find('list')
|
|
135 if l is not None:
|
|
136 lt = l.get('type')
|
|
137 # pageNumbers
|
|
138 if lt == 'pages':
|
|
139 # contains tags with page numbers
|
|
140 # <item n="14" o="2" o-norm="2" file="0014"/>
|
|
141 # n=scan number, o=original page no, on=normalized original page no
|
|
142 # pageNumbers is a dict indexed by scan number
|
|
143 pages = {}
|
|
144 for i in l:
|
|
145 page = {}
|
|
146 pn = getInt(i.get('n'))
|
|
147 page['pn'] = pn
|
|
148 no = getInt(i.get('o'))
|
|
149 page['no'] = no
|
|
150 non = getInt(i.get('o-norm'))
|
|
151 page['non'] = non
|
|
152
|
|
153 if pn > 0:
|
|
154 pages[pn] = page
|
|
155
|
|
156 docinfo['pageNumbers'] = pages
|
|
157 logging.debug("got pageNumbers=%s"%repr(pages))
|
|
158
|
|
159 # toc
|
|
160 elif name == 'toc':
|
|
161 # contains tags with table of contents/figures
|
|
162 # <toc-entry><page>13</page><level>3</level><content>Chapter I</content><level-string>1.</level-string><real-level>1</real-level></toc-entry>
|
|
163 tocs = []
|
|
164 for te in tag:
|
|
165 toc = {}
|
|
166 for t in te:
|
|
167 if t.tag == 'page':
|
|
168 toc['pn'] = getInt(t.text)
|
|
169 elif t.tag == 'level':
|
|
170 toc['level'] = t.text
|
|
171 elif t.tag == 'content':
|
|
172 toc['content'] = t.text
|
|
173 elif t.tag == 'level-string':
|
|
174 toc['level-string'] = t.text
|
|
175 elif t.tag == 'real-level':
|
|
176 toc['real-level'] = t.text
|
|
177
|
|
178 tocs.append(toc)
|
564
|
179
|
565
|
180 # save as full_toc/full_figures
|
|
181 docinfo['full_%s'%mode] = tocs
|
564
|
182
|
|
183 return docinfo
|
|
184
|
|
185
|
|
186 def processPageInfo(self, dom, docinfo, pageinfo):
|
|
187 """processes page info divs from dom and stores in docinfo and pageinfo"""
|
|
188 # assume first second level div is pageMeta
|
|
189 alldivs = dom.find("div")
|
|
190
|
|
191 if alldivs is None or alldivs.get('class', '') != 'pageMeta':
|
|
192 logging.error("processPageInfo: pageMeta div not found!")
|
|
193 return
|
|
194
|
|
195 for div in alldivs:
|
|
196 dc = div.get('class')
|
|
197
|
|
198 # pageNumberOrig
|
|
199 if dc == 'pageNumberOrig':
|
|
200 pageinfo['pageNumberOrig'] = div.text
|
|
201
|
|
202 # pageNumberOrigNorm
|
|
203 elif dc == 'pageNumberOrigNorm':
|
|
204 pageinfo['pageNumberOrigNorm'] = div.text
|
|
205
|
|
206 # pageHeaderTitle
|
|
207 elif dc == 'pageHeaderTitle':
|
|
208 pageinfo['pageHeaderTitle'] = div.text
|
|
209
|
|
210 #logging.debug("processPageInfo: pageinfo=%s"%repr(pageinfo))
|
|
211 return
|
|
212
|
|
213
|
|
214 def getTextPage(self, mode="text", pn=1, docinfo=None, pageinfo=None):
|
|
215 """returns single page from fulltext"""
|
|
216
|
|
217 logging.debug("getTextPage mode=%s, pn=%s"%(mode,pn))
|
|
218 # check for cached text -- but ideally this shouldn't be called twice
|
|
219 if pageinfo.has_key('textPage'):
|
|
220 logging.debug("getTextPage: using cached text")
|
|
221 return pageinfo['textPage']
|
|
222
|
|
223 docpath = docinfo.get('textURLPath', None)
|
|
224 if not docpath:
|
|
225 return None
|
|
226
|
|
227 # just checking
|
|
228 if pageinfo['current'] != pn:
|
|
229 logging.warning("getTextPage: current!=pn!")
|
|
230
|
|
231 # stuff for constructing full urls
|
|
232 selfurl = docinfo['viewerUrl']
|
|
233 textParams = {'docId': docpath,
|
|
234 'page': pn}
|
565
|
235
|
564
|
236 if 'characterNormalization' in pageinfo:
|
565
|
237 cn = pageinfo['characterNormalization']
|
|
238 # TODO: change values in form
|
|
239 if cn == 'regPlusNorm':
|
|
240 cn = 'norm'
|
|
241
|
|
242 textParams['normalization'] = cn
|
564
|
243
|
|
244 if not mode:
|
|
245 # default is dict
|
|
246 mode = 'text'
|
|
247
|
|
248 modes = mode.split(',')
|
|
249 # check for multiple layers
|
|
250 if len(modes) > 1:
|
|
251 logging.debug("getTextPage: more than one mode=%s"%mode)
|
|
252
|
|
253 # search mode
|
|
254 if 'search' in modes:
|
|
255 # add highlighting
|
|
256 highlightQuery = pageinfo.get('highlightQuery', None)
|
|
257 if highlightQuery:
|
|
258 textParams['highlightQuery'] = highlightQuery
|
|
259 textParams['highlightElem'] = pageinfo.get('highlightElement', '')
|
|
260 textParams['highlightElemPos'] = pageinfo.get('highlightElementPos', '')
|
|
261
|
|
262 # ignore mode in the following
|
|
263 modes.remove('search')
|
|
264
|
|
265 # pundit mode
|
|
266 punditMode = False
|
|
267 if 'pundit' in modes:
|
|
268 punditMode = True
|
|
269 # ignore mode in the following
|
|
270 modes.remove('pundit')
|
|
271
|
|
272 # other modes don't combine
|
|
273 if 'dict' in modes:
|
|
274 textmode = 'dict'
|
|
275 textParams['mode'] = 'tokenized'
|
|
276 textParams['outputFormat'] = 'html'
|
|
277 elif 'xml' in modes:
|
|
278 textmode = 'xml'
|
|
279 textParams['mode'] = 'untokenized'
|
|
280 textParams['outputFormat'] = 'xmlDisplay'
|
|
281 textParams['normalization'] = 'orig'
|
|
282 elif 'gis' in modes:
|
|
283 #FIXME!
|
|
284 textmode = 'gis'
|
|
285 else:
|
|
286 # text is default mode
|
|
287 textmode = 'text'
|
|
288 textParams['mode'] = 'untokenized'
|
|
289 textParams['outputFormat'] = 'html'
|
|
290
|
|
291 # fetch the page
|
|
292 pagexml = self.getServerData("query/GetPage",urllib.urlencode(textParams))
|
565
|
293 try:
|
|
294 dom = ET.fromstring(pagexml)
|
|
295 except Exception, e:
|
|
296 logging.error("Error parsing page: %s"%e)
|
|
297 return None
|
|
298
|
564
|
299 pagediv = None
|
|
300 body = dom.find('.//body')
|
|
301 if body is None:
|
|
302 logging.error("getTextPage: no body!")
|
|
303 return None
|
|
304
|
565
|
305 # the text is in div@class=text
|
|
306 pagediv = body.find(".//div[@class='text']")
|
|
307 logging.debug("pagediv: %s"%repr(pagediv))
|
564
|
308
|
566
|
309 # plain text or text-with-links mode
|
|
310 if textmode == "text" or textmode == "dict":
|
564
|
311 if pagediv is not None:
|
567
|
312 #self._processPbTag(pagediv, pageinfo)
|
566
|
313 self._processFigures(pagediv, docinfo)
|
|
314 #self._fixEmptyDivs(pagediv)
|
565
|
315 # get full url assuming documentViewer is parent
|
|
316 selfurl = self.getLink()
|
564
|
317 # check all a-tags
|
|
318 links = pagediv.findall('.//a')
|
|
319 for l in links:
|
|
320 href = l.get('href')
|
|
321 if href:
|
|
322 # is link with href
|
|
323 linkurl = urlparse.urlparse(href)
|
|
324 if linkurl.path.endswith('GetDictionaryEntries'):
|
|
325 #TODO: replace wordInfo page
|
|
326 # is dictionary link - change href (keeping parameters)
|
|
327 #l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/template/viewer_wordinfo'%viewerurl))
|
|
328 # add target to open new page
|
|
329 l.set('target', '_blank')
|
|
330
|
566
|
331 elif href.startswith('#note-'):
|
|
332 # note link FIXME!
|
564
|
333 l.set('href', href.replace('#note-',"%s#note-"%selfurl))
|
566
|
334
|
|
335 if punditMode:
|
|
336 self._addPunditAttributes(pagediv, pageinfo, docinfo)
|
|
337
|
564
|
338 return serialize(pagediv)
|
|
339
|
|
340 # xml mode
|
|
341 elif textmode == "xml":
|
|
342 if pagediv is not None:
|
|
343 return serialize(pagediv)
|
|
344
|
|
345 # pureXml mode WTF?
|
|
346 elif textmode == "pureXml":
|
|
347 if pagediv is not None:
|
|
348 return serialize(pagediv)
|
|
349
|
566
|
350 # gis mode FIXME!
|
564
|
351 elif textmode == "gis":
|
|
352 if pagediv is not None:
|
|
353 # fix empty div tags
|
565
|
354 self._fixEmptyDivs(pagediv)
|
564
|
355 # check all a-tags
|
|
356 links = pagediv.findall(".//a")
|
|
357 # add our URL as backlink
|
|
358 selfurl = self.getLink()
|
|
359 doc = base64.b64encode(selfurl)
|
|
360 for l in links:
|
|
361 href = l.get('href')
|
|
362 if href:
|
|
363 if href.startswith('http://mappit.mpiwg-berlin.mpg.de'):
|
|
364 l.set('href', re.sub(r'doc=[\w+/=]+', 'doc=%s'%doc, href))
|
|
365 l.set('target', '_blank')
|
|
366
|
|
367 return serialize(pagediv)
|
|
368
|
|
369 return None
|
565
|
370
|
566
|
371 def _processPbTag(self, pagediv, pageinfo):
|
565
|
372 """extracts information from pb-tag and removes it from pagediv"""
|
|
373 pbdiv = pagediv.find(".//span[@class='pb']")
|
|
374 if pbdiv is None:
|
|
375 logging.warning("getTextPage: no pb-span!")
|
|
376 return pagediv
|
|
377
|
|
378 # extract running head
|
|
379 rh = pbdiv.find(".//span[@class='rhead']")
|
|
380 if rh is not None:
|
|
381 pageinfo['pageHeaderTitle'] = getText(rh)
|
|
382
|
|
383 # remove pb-div from parent
|
|
384 ppdiv = pagediv.find(".//span[@class='pb']/..")
|
|
385 ppdiv.remove(pbdiv)
|
|
386 return pagediv
|
564
|
387
|
565
|
388 def _addPunditAttributes(self, pagediv, pageinfo, docinfo):
|
564
|
389 """add about attributes for pundit annotation tool"""
|
|
390 textid = docinfo.get('DRI', "fn=%s"%docinfo.get('documentPath', '???'))
|
|
391 pn = pageinfo.get('pn', '1')
|
|
392 # TODO: use pn as well?
|
|
393 # check all div-tags
|
|
394 divs = pagediv.findall(".//div")
|
|
395 for d in divs:
|
|
396 id = d.get('id')
|
|
397 if id:
|
566
|
398 # TODO: check path (cf RFC2396)
|
564
|
399 d.set('about', "http://echo.mpiwg-berlin.mpg.de/%s/pn=%s/#%s"%(textid,pn,id))
|
|
400 cls = d.get('class','')
|
|
401 cls += ' pundit-content'
|
|
402 d.set('class', cls.strip())
|
|
403
|
|
404 return pagediv
|
|
405
|
566
|
406 def _processFigures(self, pagediv, docinfo):
|
|
407 """processes figure-tags"""
|
|
408 divs = pagediv.findall(".//span[@class='figure']")
|
|
409 scalerUrl = docinfo['digilibScalerUrl']
|
|
410 viewerUrl = docinfo['digilibViewerUrl']
|
|
411 for d in divs:
|
|
412 try:
|
|
413 a = d.find('a')
|
|
414 img = a.find('img')
|
|
415 imgsrc = img.get('src')
|
|
416 imgurl = urlparse.urlparse(imgsrc)
|
|
417 imgq = imgurl.query
|
|
418 imgparams = urlparse.parse_qs(imgq)
|
|
419 fn = imgparams.get('fn', None)
|
|
420 if fn is not None:
|
|
421 # parse_qs puts parameters in lists
|
|
422 fn = fn[0]
|
|
423 # TODO: check valid path
|
|
424 # fix img@src
|
|
425 newsrc = '%s?fn=%s&dw=200&dh=200'%(scalerUrl,fn)
|
|
426 img.set('src', newsrc)
|
|
427 # fix a@href
|
|
428 newlink = '%s?fn=%s'%(viewerUrl,fn)
|
|
429 a.set('href', newlink)
|
|
430 a.set('target', '_blank')
|
|
431
|
|
432 except:
|
|
433 logging.warn("processFigures: strange figure!")
|
|
434
|
|
435
|
565
|
436 def _fixEmptyDivs(self, pagediv):
|
|
437 """fixes empty div-tags by inserting a space"""
|
|
438 divs = pagediv.findall('.//div')
|
|
439 for d in divs:
|
|
440 if len(d) == 0 and not d.text:
|
|
441 # make empty divs non-empty
|
|
442 d.text = ' '
|
|
443
|
|
444 return pagediv
|
|
445
|
|
446
|
564
|
447 def getSearchResults(self, mode, query=None, pageinfo=None, docinfo=None):
|
|
448 """loads list of search results and stores XML in docinfo"""
|
|
449
|
|
450 logging.debug("getSearchResults mode=%s query=%s"%(mode, query))
|
|
451 if mode == "none":
|
|
452 return docinfo
|
|
453
|
|
454 cachedQuery = docinfo.get('cachedQuery', None)
|
|
455 if cachedQuery is not None:
|
|
456 # cached search result
|
|
457 if cachedQuery == '%s_%s'%(mode,query):
|
|
458 # same query
|
|
459 return docinfo
|
|
460
|
|
461 else:
|
|
462 # different query
|
|
463 del docinfo['resultSize']
|
|
464 del docinfo['resultXML']
|
|
465
|
|
466 # cache query
|
|
467 docinfo['cachedQuery'] = '%s_%s'%(mode,query)
|
|
468
|
|
469 # fetch full results
|
|
470 docpath = docinfo['textURLPath']
|
|
471 params = {'document': docpath,
|
|
472 'mode': 'text',
|
|
473 'queryType': mode,
|
|
474 'query': query,
|
|
475 'queryResultPageSize': 1000,
|
|
476 'queryResultPN': 1,
|
|
477 'characterNormalization': pageinfo.get('characterNormalization', 'reg')}
|
|
478 pagexml = self.getServerData("doc-query.xql",urllib.urlencode(params))
|
|
479 #pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&s=%s&viewMode=%s&characterNormalization=%s&highlightElementPos=%s&highlightElement=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, s, viewMode,characterNormalization, highlightElementPos, highlightElement, urllib.quote(highlightQuery)))
|
|
480 dom = ET.fromstring(pagexml)
|
|
481 # page content is in <div class="queryResultPage">
|
|
482 pagediv = None
|
|
483 # ElementTree 1.2 in Python 2.6 can't do div[@class='queryResultPage']
|
|
484 alldivs = dom.findall("div")
|
|
485 for div in alldivs:
|
|
486 dc = div.get('class')
|
|
487 # page content div
|
|
488 if dc == 'queryResultPage':
|
|
489 pagediv = div
|
|
490
|
|
491 elif dc == 'queryResultHits':
|
|
492 docinfo['resultSize'] = getInt(div.text)
|
|
493
|
|
494 if pagediv is not None:
|
|
495 # store XML in docinfo
|
|
496 docinfo['resultXML'] = ET.tostring(pagediv, 'UTF-8')
|
|
497
|
|
498 return docinfo
|
|
499
|
|
500
|
|
501 def getResultsPage(self, mode="text", query=None, pn=None, start=None, size=None, pageinfo=None, docinfo=None):
|
|
502 """returns single page from the table of contents"""
|
|
503 logging.debug("getResultsPage mode=%s, pn=%s"%(mode,pn))
|
|
504 # get (cached) result
|
|
505 self.getSearchResults(mode=mode, query=query, pageinfo=pageinfo, docinfo=docinfo)
|
|
506
|
|
507 resultxml = docinfo.get('resultXML', None)
|
|
508 if not resultxml:
|
|
509 logging.error("getResultPage: unable to find resultXML")
|
|
510 return "Error: no result!"
|
|
511
|
|
512 if size is None:
|
|
513 size = pageinfo.get('resultPageSize', 10)
|
|
514
|
|
515 if start is None:
|
|
516 start = (pn - 1) * size
|
|
517
|
|
518 fullresult = ET.fromstring(resultxml)
|
|
519
|
|
520 if fullresult is not None:
|
|
521 # paginate
|
|
522 first = start-1
|
|
523 len = size
|
|
524 del fullresult[:first]
|
|
525 del fullresult[len:]
|
|
526 tocdivs = fullresult
|
|
527
|
|
528 # check all a-tags
|
|
529 links = tocdivs.findall(".//a")
|
|
530 for l in links:
|
|
531 href = l.get('href')
|
|
532 if href:
|
|
533 # assume all links go to pages
|
|
534 linkUrl = urlparse.urlparse(href)
|
|
535 linkParams = urlparse.parse_qs(linkUrl.query)
|
|
536 # take some parameters
|
|
537 params = {'pn': linkParams['pn'],
|
|
538 'highlightQuery': linkParams.get('highlightQuery',''),
|
|
539 'highlightElement': linkParams.get('highlightElement',''),
|
|
540 'highlightElementPos': linkParams.get('highlightElementPos','')
|
|
541 }
|
|
542 url = self.getLink(params=params)
|
|
543 l.set('href', url)
|
|
544
|
|
545 return serialize(tocdivs)
|
|
546
|
|
547 return "ERROR: no results!"
|
|
548
|
|
549
|
|
550 def getToc(self, mode='text', docinfo=None):
|
|
551 """returns list of table of contents from docinfo"""
|
|
552 logging.debug("getToc mode=%s"%mode)
|
|
553 if mode == 'text':
|
|
554 queryType = 'toc'
|
|
555 else:
|
|
556 queryType = mode
|
|
557
|
|
558 if not 'full_%s'%queryType in docinfo:
|
|
559 # get new toc
|
|
560 docinfo = self.getTextInfo(queryType, docinfo)
|
|
561
|
|
562 return docinfo.get('full_%s'%queryType, [])
|
|
563
|
|
564 def getTocPage(self, mode='text', pn=None, start=None, size=None, pageinfo=None, docinfo=None):
|
|
565 """returns single page from the table of contents"""
|
|
566 logging.debug("getTocPage mode=%s, pn=%s start=%s size=%s"%(mode,repr(pn),repr(start),repr(size)))
|
|
567 fulltoc = self.getToc(mode=mode, docinfo=docinfo)
|
|
568 if len(fulltoc) < 1:
|
|
569 logging.error("getTocPage: unable to find toc!")
|
|
570 return "Error: no table of contents!"
|
|
571
|
|
572 if size is None:
|
|
573 size = pageinfo.get('tocPageSize', 30)
|
|
574
|
|
575 if start is None:
|
|
576 start = (pn - 1) * size
|
|
577
|
|
578 # paginate
|
|
579 first = (start - 1)
|
|
580 last = first + size
|
|
581 tocs = fulltoc[first:last]
|
|
582 tp = '<div>'
|
|
583 for toc in tocs:
|
|
584 pageurl = self.getLink('pn', toc['pn'])
|
|
585 tp += '<div class="tocline">'
|
|
586 tp += '<div class="toc name">[%s %s]</div>'%(toc['level-string'], toc['content'])
|
|
587 tp += '<div class="toc float right page"><a href="%s">Page: %s</a></div>'%(pageurl, toc['pn'])
|
|
588 tp += '</div>\n'
|
|
589
|
|
590 tp += '</div>\n'
|
|
591
|
|
592 return tp
|
|
593
|
|
594
|
|
595 def manage_changeMpiwgXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,repositoryType=None,RESPONSE=None):
|
|
596 """change settings"""
|
|
597 self.title=title
|
|
598 self.timeout = timeout
|
|
599 self.serverUrl = serverUrl
|
|
600 if repositoryType:
|
|
601 self.repositoryType = repositoryType
|
|
602 if RESPONSE is not None:
|
|
603 RESPONSE.redirect('manage_main')
|
|
604
|
|
605 # management methods
|
|
606 def manage_addMpiwgXmlTextServerForm(self):
|
|
607 """Form for adding"""
|
|
608 pt = PageTemplateFile("zpt/manage_addMpiwgXmlTextServer", globals()).__of__(self)
|
|
609 return pt()
|
|
610
|
|
611 def manage_addMpiwgXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
|
|
612 #def manage_addMpiwgXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):
|
|
613 """add zogiimage"""
|
|
614 newObj = MpiwgXmlTextServer(id=id,title=title,serverUrl=serverUrl,timeout=timeout)
|
|
615 self.Destination()._setObject(id, newObj)
|
|
616 if RESPONSE is not None:
|
|
617 RESPONSE.redirect('manage_main')
|
|
618
|
|
619 |