564
|
1 from OFS.SimpleItem import SimpleItem
|
|
2 from Products.PageTemplates.PageTemplateFile import PageTemplateFile
|
|
3
|
|
4 import xml.etree.ElementTree as ET
|
|
5
|
|
6 import re
|
|
7 import logging
|
|
8 import urllib
|
|
9 import urlparse
|
|
10 import base64
|
|
11
|
576
|
12 from datetime import datetime
|
|
13
|
564
|
14 from SrvTxtUtils import getInt, getText, getHttpData
|
|
15
|
610
|
16 # mapping of fields in the output of /mpiwg-mpdl-cms-web/query/GetDocInfo to documentViewer docinfo
|
|
17 textinfoFieldMap = {
|
|
18 'countPages' : 'numTextPages',
|
|
19 'countFigures' : 'numFigureEntries',
|
|
20 'countNotesHandwritten' : 'numHandwritten',
|
|
21 'countNotes' : 'numNotes',
|
|
22 'countPlaces' : 'numPlaces',
|
|
23 'countTocEntries' : 'numTocEntries'
|
|
24 }
|
|
25
|
564
|
26 def serialize(node):
|
|
27 """returns a string containing an XML snippet of node"""
|
|
28 s = ET.tostring(node, 'UTF-8')
|
|
29 # snip off XML declaration
|
|
30 if s.startswith('<?xml'):
|
|
31 i = s.find('?>')
|
|
32 return s[i+3:]
|
|
33
|
|
34 return s
|
|
35
|
|
36
|
|
37 class MpiwgXmlTextServer(SimpleItem):
|
|
38 """TextServer implementation for MPIWG-XML server"""
|
|
39 meta_type="MPIWG-XML TextServer"
|
|
40
|
|
41 manage_options=(
|
|
42 {'label':'Config','action':'manage_changeMpiwgXmlTextServerForm'},
|
|
43 )+SimpleItem.manage_options
|
|
44
|
|
45 manage_changeMpiwgXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpiwgXmlTextServer", globals())
|
|
46
|
|
47 def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpiwg-mpdl-cms-web/", timeout=40, serverName=None, repositoryType='production'):
|
|
48 """constructor"""
|
|
49 self.id=id
|
|
50 self.title=title
|
|
51 self.timeout = timeout
|
|
52 self.repositoryType = repositoryType
|
|
53 if serverName is None:
|
|
54 self.serverUrl = serverUrl
|
|
55 else:
|
|
56 self.serverUrl = "http://%s/mpiwg-mpdl-cms-web/"%serverName
|
|
57
|
|
58 def getHttpData(self, url, data=None):
|
|
59 """returns result from url+data HTTP request"""
|
|
60 return getHttpData(url,data,timeout=self.timeout)
|
|
61
|
|
62 def getServerData(self, method, data=None):
|
|
63 """returns result from text server for method+data"""
|
|
64 url = self.serverUrl+method
|
|
65 return getHttpData(url,data,timeout=self.timeout)
|
|
66
|
|
67
|
|
68 def getRepositoryType(self):
|
|
69 """returns the repository type, e.g. 'production'"""
|
572
|
70 return getattr(self, 'repositoryType', None)
|
564
|
71
|
|
72 def getTextDownloadUrl(self, type='xml', docinfo=None):
|
|
73 """returns a URL to download the current text"""
|
|
74 docpath = docinfo.get('textURLPath', None)
|
|
75 if not docpath:
|
|
76 return None
|
|
77
|
|
78 docpath = docpath.replace('.xml','.'+type)
|
|
79 url = '%sdoc/GetDocument?id=%s'%(self.serverUrl.replace('interface/',''), docpath)
|
|
80 return url
|
|
81
|
|
82
|
|
83 def getPlacesOnPage(self, docinfo=None, pn=None):
|
|
84 """Returns list of GIS places of page pn"""
|
610
|
85 logging.debug("getPlacesOnPage(pn=%s"%pn)
|
|
86 if not 'places' in docinfo:
|
|
87 self.getTextInfo('places', docinfo)
|
|
88
|
|
89 allplaces = docinfo.get('places', None)
|
|
90 if len(allplaces) == 0:
|
|
91 return []
|
|
92
|
|
93 # search for places on this page TODO: is there a better way?
|
|
94 places = [p for p in allplaces if p['pn'] == pn]
|
|
95 return places
|
|
96 """OLD:
|
564
|
97 docpath = docinfo.get('textURLPath',None)
|
|
98 if not docpath:
|
|
99 return None
|
|
100
|
|
101 places=[]
|
|
102 text=self.getServerData("xpath.xql", "document=%s&xpath=//place&pn=%s"%(docpath,pn))
|
|
103 dom = ET.fromstring(text)
|
|
104 result = dom.findall(".//resultPage/place")
|
|
105 for l in result:
|
|
106 id = l.get("id")
|
|
107 name = l.text
|
|
108 place = {'id': id, 'name': name}
|
|
109 places.append(place)
|
|
110
|
610
|
111 return places"""
|
564
|
112
|
|
113
|
565
|
114 def getTextInfo(self, mode=None, docinfo=None):
|
564
|
115 """reads document info, including page concordance, from text server"""
|
|
116 logging.debug("getTextInfo mode=%s"%mode)
|
565
|
117
|
|
118 field = ''
|
610
|
119 if mode in ['pages', 'toc', 'figures', 'notes', 'handwritten', 'places']:
|
565
|
120 # translate mode to field param
|
609
|
121 if mode == 'handwritten':
|
|
122 field = '&field=notesHandwritten'
|
|
123 else:
|
|
124 field = '&field=%s'%mode
|
565
|
125 else:
|
|
126 mode = None
|
|
127
|
564
|
128 # check cached info
|
|
129 if mode:
|
|
130 # cached toc-request?
|
|
131 if 'full_%s'%mode in docinfo:
|
|
132 return docinfo
|
|
133
|
|
134 else:
|
565
|
135 # cached but no toc-request?
|
564
|
136 if 'numTextPages' in docinfo:
|
|
137 return docinfo
|
|
138
|
|
139 docpath = docinfo.get('textURLPath', None)
|
|
140 if docpath is None:
|
|
141 logging.error("getTextInfo: no textURLPath!")
|
|
142 return docinfo
|
|
143
|
|
144 # fetch docinfo
|
565
|
145 pagexml = self.getServerData("query/GetDocInfo","docId=%s%s"%(docpath,field))
|
564
|
146 dom = ET.fromstring(pagexml)
|
565
|
147 # all info in tag <doc>
|
|
148 doc = dom
|
564
|
149 if doc is None:
|
|
150 logging.error("getTextInfo: unable to find document-tag!")
|
|
151 else:
|
565
|
152 if mode is None:
|
|
153 # get general info from system-tag
|
568
|
154 sys = doc.find('system')
|
|
155 if sys is not None:
|
610
|
156 for (k,v) in textinfoFieldMap.items():
|
|
157 # copy into docinfo (even if empty)
|
|
158 docinfo[v] = getInt(getText(sys.find(k)))
|
565
|
159
|
|
160 else:
|
|
161 # result is in list-tag
|
|
162 l = doc.find('list')
|
|
163 if l is not None:
|
610
|
164 # look for general info
|
|
165 for (k,v) in textinfoFieldMap.items():
|
|
166 # copy into docinfo (only if not empty)
|
|
167 s = doc.find(k)
|
|
168 if s is not None:
|
|
169 docinfo[v] = getInt(getText(s))
|
|
170
|
565
|
171 lt = l.get('type')
|
610
|
172 #
|
565
|
173 # pageNumbers
|
610
|
174 #
|
565
|
175 if lt == 'pages':
|
|
176 # contains tags with page numbers
|
|
177 # <item n="14" o="2" o-norm="2" file="0014"/>
|
|
178 # n=scan number, o=original page no, on=normalized original page no
|
|
179 # pageNumbers is a dict indexed by scan number
|
|
180 pages = {}
|
|
181 for i in l:
|
|
182 page = {}
|
|
183 pn = getInt(i.get('n'))
|
|
184 page['pn'] = pn
|
568
|
185 no = i.get('o')
|
565
|
186 page['no'] = no
|
568
|
187 non = i.get('o-norm')
|
565
|
188 page['non'] = non
|
|
189
|
|
190 if pn > 0:
|
|
191 pages[pn] = page
|
|
192
|
|
193 docinfo['pageNumbers'] = pages
|
610
|
194
|
|
195 #
|
565
|
196 # toc
|
610
|
197 #
|
609
|
198 elif lt in ['toc', 'figures', 'notes', 'notesHandwritten']:
|
565
|
199 # contains tags with table of contents/figures
|
568
|
200 # <item n="2.1." lv="2">CAP.I. <ref o="119">132</ref></item>
|
565
|
201 tocs = []
|
568
|
202 for te in l:
|
|
203 if te.tag == 'item':
|
|
204 toc = {}
|
|
205 toc['level-string'] = te.get('n')
|
|
206 toc['level'] = te.get('lv')
|
|
207 toc['content'] = te.text.strip()
|
|
208 ref = te.find('ref')
|
|
209 toc['pn'] = getInt(ref.text)
|
|
210 toc['no'] = ref.get('o')
|
|
211 toc['non'] = ref.get('o-norm')
|
|
212 tocs.append(toc)
|
564
|
213
|
565
|
214 # save as full_toc/full_figures
|
|
215 docinfo['full_%s'%mode] = tocs
|
564
|
216
|
610
|
217 #
|
|
218 # places
|
|
219 #
|
|
220 #
|
|
221 # toc
|
|
222 #
|
|
223 elif lt in ['places']:
|
|
224 # contains tags with place-ids
|
|
225 # <item id="N40004F-01"><ref>4</ref></item>
|
|
226 places = []
|
|
227 for p in l:
|
|
228 if p.tag == 'item':
|
|
229 place = {}
|
|
230 place['id'] = p.get('id')
|
|
231 ref = p.find('ref')
|
|
232 place['pn'] = getInt(ref.text)
|
|
233 places.append(place)
|
|
234
|
|
235 docinfo['places'] = places
|
|
236
|
564
|
237 return docinfo
|
|
238
|
|
239
|
|
240 def getTextPage(self, mode="text", pn=1, docinfo=None, pageinfo=None):
|
|
241 """returns single page from fulltext"""
|
|
242
|
|
243 logging.debug("getTextPage mode=%s, pn=%s"%(mode,pn))
|
576
|
244 startTime = datetime.now()
|
564
|
245 # check for cached text -- but ideally this shouldn't be called twice
|
|
246 if pageinfo.has_key('textPage'):
|
|
247 logging.debug("getTextPage: using cached text")
|
|
248 return pageinfo['textPage']
|
|
249
|
|
250 docpath = docinfo.get('textURLPath', None)
|
|
251 if not docpath:
|
|
252 return None
|
|
253
|
|
254 # stuff for constructing full urls
|
|
255 selfurl = docinfo['viewerUrl']
|
|
256 textParams = {'docId': docpath,
|
|
257 'page': pn}
|
565
|
258
|
575
|
259 normMode = pageinfo.get('characterNormalization', 'reg')
|
|
260 # TODO: change values in form
|
|
261 if normMode == 'regPlusNorm':
|
|
262 normMode = 'norm'
|
576
|
263
|
|
264 # TODO: this should not be necessary when the backend is fixed
|
579
|
265 #textParams['normalization'] = normMode
|
576
|
266
|
564
|
267 if not mode:
|
|
268 # default is dict
|
|
269 mode = 'text'
|
|
270
|
|
271 modes = mode.split(',')
|
|
272 # check for multiple layers
|
|
273 if len(modes) > 1:
|
|
274 logging.debug("getTextPage: more than one mode=%s"%mode)
|
|
275
|
610
|
276 # mode defaults
|
|
277 gisMode = False
|
|
278 punditMode = False
|
|
279
|
564
|
280 # search mode
|
|
281 if 'search' in modes:
|
|
282 # add highlighting
|
|
283 highlightQuery = pageinfo.get('highlightQuery', None)
|
|
284 if highlightQuery:
|
|
285 textParams['highlightQuery'] = highlightQuery
|
|
286 textParams['highlightElem'] = pageinfo.get('highlightElement', '')
|
|
287 textParams['highlightElemPos'] = pageinfo.get('highlightElementPos', '')
|
|
288
|
|
289 # ignore mode in the following
|
|
290 modes.remove('search')
|
|
291
|
|
292 # pundit mode
|
|
293 if 'pundit' in modes:
|
|
294 punditMode = True
|
|
295 # ignore mode in the following
|
|
296 modes.remove('pundit')
|
|
297
|
|
298 # other modes don't combine
|
|
299 if 'dict' in modes:
|
|
300 textmode = 'dict'
|
|
301 textParams['outputFormat'] = 'html'
|
|
302 elif 'xml' in modes:
|
|
303 textmode = 'xml'
|
|
304 textParams['outputFormat'] = 'xmlDisplay'
|
576
|
305 normMode = 'orig'
|
564
|
306 elif 'gis' in modes:
|
610
|
307 gisMode = True
|
|
308 # gis mode uses plain text
|
|
309 textmode = 'plain'
|
|
310 textParams['outputFormat'] = 'html'
|
564
|
311 else:
|
|
312 # text is default mode
|
575
|
313 textmode = 'plain'
|
564
|
314 textParams['outputFormat'] = 'html'
|
|
315
|
565
|
316 try:
|
570
|
317 # fetch the page
|
|
318 pagexml = self.getServerData("query/GetPage",urllib.urlencode(textParams))
|
565
|
319 dom = ET.fromstring(pagexml)
|
|
320 except Exception, e:
|
570
|
321 logging.error("Error reading page: %s"%e)
|
565
|
322 return None
|
|
323
|
566
|
324 # plain text or text-with-links mode
|
610
|
325 if textmode == 'plain' or textmode == 'dict':
|
574
|
326 # the text is in div@class=text
|
|
327 pagediv = dom.find(".//div[@class='text']")
|
|
328 logging.debug("pagediv: %s"%repr(pagediv))
|
564
|
329 if pagediv is not None:
|
575
|
330 # add textmode and normMode classes
|
579
|
331 #pagediv.set('class', 'text %s %s'%(textmode, normMode))
|
576
|
332 self._processWTags(textmode, normMode, pagediv)
|
567
|
333 #self._processPbTag(pagediv, pageinfo)
|
566
|
334 self._processFigures(pagediv, docinfo)
|
|
335 #self._fixEmptyDivs(pagediv)
|
565
|
336 # get full url assuming documentViewer is parent
|
|
337 selfurl = self.getLink()
|
564
|
338 # check all a-tags
|
|
339 links = pagediv.findall('.//a')
|
|
340 for l in links:
|
|
341 href = l.get('href')
|
|
342 if href:
|
|
343 # is link with href
|
|
344 linkurl = urlparse.urlparse(href)
|
|
345 if linkurl.path.endswith('GetDictionaryEntries'):
|
|
346 #TODO: replace wordInfo page
|
|
347 # add target to open new page
|
|
348 l.set('target', '_blank')
|
566
|
349
|
|
350 if punditMode:
|
|
351 self._addPunditAttributes(pagediv, pageinfo, docinfo)
|
577
|
352
|
610
|
353 if gisMode:
|
|
354 self._addGisTags(pagediv, pageinfo, docinfo)
|
|
355
|
576
|
356 s = serialize(pagediv)
|
|
357 logging.debug("getTextPage done in %s"%(datetime.now()-startTime))
|
|
358 return s
|
564
|
359
|
|
360 # xml mode
|
|
361 elif textmode == "xml":
|
574
|
362 # the text is in body
|
|
363 pagediv = dom.find(".//body")
|
|
364 logging.debug("pagediv: %s"%repr(pagediv))
|
564
|
365 if pagediv is not None:
|
|
366 return serialize(pagediv)
|
|
367
|
579
|
368 logging.error("getTextPage: error in text mode %s or in text!"%(textmode))
|
564
|
369 return None
|
565
|
370
|
575
|
371 def _processWTags(self, textMode, normMode, pagediv):
|
|
372 """selects the necessary information from w-spans and removes the rest from pagediv"""
|
|
373 logging.debug("processWTags(textMode=%s,norm=%s,pagediv"%(repr(textMode),repr(normMode)))
|
576
|
374 startTime = datetime.now()
|
575
|
375 wtags = pagediv.findall(".//span[@class='w']")
|
|
376 for wtag in wtags:
|
|
377 if textMode == 'dict':
|
576
|
378 # delete non-a-tags
|
|
379 wtag.remove(wtag.find("span[@class='nodictionary orig']"))
|
|
380 wtag.remove(wtag.find("span[@class='nodictionary reg']"))
|
|
381 wtag.remove(wtag.find("span[@class='nodictionary norm']"))
|
|
382 # delete non-matching children of a-tag and suppress remaining tag name
|
579
|
383 atag = wtag.find("*[@class='dictionary']")
|
576
|
384 if normMode == 'orig':
|
|
385 atag.remove(atag.find("span[@class='reg']"))
|
|
386 atag.remove(atag.find("span[@class='norm']"))
|
|
387 atag.find("span[@class='orig']").tag = None
|
|
388 elif normMode == 'reg':
|
|
389 atag.remove(atag.find("span[@class='orig']"))
|
|
390 atag.remove(atag.find("span[@class='norm']"))
|
|
391 atag.find("span[@class='reg']").tag = None
|
|
392 elif normMode == 'norm':
|
|
393 atag.remove(atag.find("span[@class='orig']"))
|
|
394 atag.remove(atag.find("span[@class='reg']"))
|
|
395 atag.find("span[@class='norm']").tag = None
|
|
396
|
575
|
397 else:
|
576
|
398 # delete a-tag
|
579
|
399 wtag.remove(wtag.find("*[@class='dictionary']"))
|
576
|
400 # delete non-matching children and suppress remaining tag name
|
|
401 if normMode == 'orig':
|
|
402 wtag.remove(wtag.find("span[@class='nodictionary reg']"))
|
|
403 wtag.remove(wtag.find("span[@class='nodictionary norm']"))
|
|
404 wtag.find("span[@class='nodictionary orig']").tag = None
|
|
405 elif normMode == 'reg':
|
|
406 wtag.remove(wtag.find("span[@class='nodictionary orig']"))
|
|
407 wtag.remove(wtag.find("span[@class='nodictionary norm']"))
|
|
408 wtag.find("span[@class='nodictionary reg']").tag = None
|
|
409 elif normMode == 'norm':
|
|
410 wtag.remove(wtag.find("span[@class='nodictionary orig']"))
|
|
411 wtag.remove(wtag.find("span[@class='nodictionary reg']"))
|
|
412 wtag.find("span[@class='nodictionary norm']").tag = None
|
575
|
413
|
576
|
414 # suppress w-tag name
|
|
415 wtag.tag = None
|
|
416
|
|
417 logging.debug("processWTags in %s"%(datetime.now()-startTime))
|
575
|
418 return pagediv
|
|
419
|
566
|
420 def _processPbTag(self, pagediv, pageinfo):
|
565
|
421 """extracts information from pb-tag and removes it from pagediv"""
|
|
422 pbdiv = pagediv.find(".//span[@class='pb']")
|
|
423 if pbdiv is None:
|
|
424 logging.warning("getTextPage: no pb-span!")
|
|
425 return pagediv
|
|
426
|
|
427 # extract running head
|
|
428 rh = pbdiv.find(".//span[@class='rhead']")
|
|
429 if rh is not None:
|
|
430 pageinfo['pageHeaderTitle'] = getText(rh)
|
|
431
|
|
432 # remove pb-div from parent
|
|
433 ppdiv = pagediv.find(".//span[@class='pb']/..")
|
|
434 ppdiv.remove(pbdiv)
|
|
435 return pagediv
|
564
|
436
|
565
|
437 def _addPunditAttributes(self, pagediv, pageinfo, docinfo):
|
610
|
438 """add about-attributes to divs for pundit annotation tool"""
|
564
|
439 textid = docinfo.get('DRI', "fn=%s"%docinfo.get('documentPath', '???'))
|
|
440 pn = pageinfo.get('pn', '1')
|
|
441 # check all div-tags
|
|
442 divs = pagediv.findall(".//div")
|
|
443 for d in divs:
|
|
444 id = d.get('id')
|
|
445 if id:
|
566
|
446 # TODO: check path (cf RFC2396)
|
564
|
447 d.set('about', "http://echo.mpiwg-berlin.mpg.de/%s/pn=%s/#%s"%(textid,pn,id))
|
|
448 cls = d.get('class','')
|
|
449 cls += ' pundit-content'
|
|
450 d.set('class', cls.strip())
|
|
451
|
|
452 return pagediv
|
|
453
|
610
|
454 def _addGisTags(self, pagediv, pageinfo, docinfo):
|
|
455 """add links for gis places"""
|
|
456 # use last part of documentPath as db-id
|
|
457 docpath = docinfo.get('documentPath', '')
|
|
458 textid = docpath.split('/')[-1]
|
|
459 # add our URL as backlink
|
|
460 selfurl = self.getLink()
|
|
461 doc = base64.b64encode(selfurl)
|
|
462 # check all span@class=place
|
|
463 spans = pagediv.findall(".//span[@class='place']")
|
|
464 for s in spans:
|
|
465 id = s.get('id')
|
|
466 if id:
|
|
467 # make links like http://mappit.mpiwg-berlin.mpg.de/db/RESTdb/db/mpdl/songy_tiang_zh_1637?id=N400061-02&doc=aHR...&format=gis
|
|
468 s.tag = 'a'
|
|
469 # TODO: make links configurable
|
|
470 url = "http://mappit.mpiwg-berlin.mpg.de/db/RESTdb/db/mpdl/%s?id=%s&doc=%s&format=gis"%(textid,id,doc)
|
|
471 s.set('href', url)
|
|
472 s.set('target', '_blank')
|
|
473
|
|
474 return pagediv
|
|
475
|
566
|
476 def _processFigures(self, pagediv, docinfo):
|
|
477 """processes figure-tags"""
|
576
|
478 # unfortunately etree can not select class.startswith('figure')
|
|
479 divs = pagediv.findall(".//span[@class]")
|
566
|
480 scalerUrl = docinfo['digilibScalerUrl']
|
|
481 viewerUrl = docinfo['digilibViewerUrl']
|
|
482 for d in divs:
|
576
|
483 if not d.get('class').startswith('figure'):
|
|
484 continue
|
|
485
|
566
|
486 try:
|
|
487 a = d.find('a')
|
|
488 img = a.find('img')
|
|
489 imgsrc = img.get('src')
|
|
490 imgurl = urlparse.urlparse(imgsrc)
|
|
491 imgq = imgurl.query
|
|
492 imgparams = urlparse.parse_qs(imgq)
|
|
493 fn = imgparams.get('fn', None)
|
|
494 if fn is not None:
|
|
495 # parse_qs puts parameters in lists
|
|
496 fn = fn[0]
|
|
497 # TODO: check valid path
|
|
498 # fix img@src
|
|
499 newsrc = '%s?fn=%s&dw=200&dh=200'%(scalerUrl,fn)
|
|
500 img.set('src', newsrc)
|
|
501 # fix a@href
|
|
502 newlink = '%s?fn=%s'%(viewerUrl,fn)
|
|
503 a.set('href', newlink)
|
|
504 a.set('target', '_blank')
|
|
505
|
|
506 except:
|
|
507 logging.warn("processFigures: strange figure!")
|
|
508
|
583
|
509
|
|
510 def _cleanSearchResult(self, pagediv):
|
|
511 """fixes search result html (change pbs and figures)"""
|
|
512 # replace figure-tag with figureNumText
|
|
513 for fig in pagediv.findall(".//span[@class='figure']"):
|
|
514 txt = fig.findtext(".//span[@class='figureNumText']")
|
|
515 tail = fig.tail
|
|
516 fig.clear()
|
|
517 fig.set('class', 'figure')
|
|
518 fig.text = txt
|
|
519 fig.tail = tail
|
|
520
|
|
521 # replace lb-tag with "//"
|
|
522 for lb in pagediv.findall(".//br[@class='lb']"):
|
|
523 lb.tag = 'span'
|
|
524 lb.text = '//'
|
|
525
|
|
526 # replace pb-tag with "///"
|
|
527 for pb in pagediv.findall(".//span[@class='pb']"):
|
|
528 tail = pb.tail
|
|
529 pb.clear()
|
|
530 pb.set('class', 'pb')
|
|
531 pb.text = '///'
|
|
532 pb.tail = tail
|
|
533
|
|
534 return pagediv
|
|
535
|
|
536 def _cleanSearchResult2(self, pagediv):
|
|
537 """fixes search result html (change pbs and figures)"""
|
|
538 # unfortunately etree can not select class.startswith('figure')
|
|
539 divs = pagediv.findall(".//span[@class]")
|
|
540 for d in divs:
|
|
541 cls = d.get('class')
|
|
542 if cls.startswith('figure'):
|
|
543 # replace figure-tag with figureNumText
|
|
544 txt = d.findtext(".//span[@class='figureNumText']")
|
|
545 d.clear()
|
|
546 d.set('class', 'figure')
|
|
547 d.text = txt
|
|
548
|
|
549 elif cls.startswith('pb'):
|
|
550 # replace pb-tag with "//"
|
|
551 d.clear()
|
|
552 d.set('class', 'pb')
|
|
553 d.text = '//'
|
|
554
|
|
555 return pagediv
|
|
556
|
|
557
|
566
|
558
|
565
|
559 def _fixEmptyDivs(self, pagediv):
|
|
560 """fixes empty div-tags by inserting a space"""
|
|
561 divs = pagediv.findall('.//div')
|
|
562 for d in divs:
|
|
563 if len(d) == 0 and not d.text:
|
|
564 # make empty divs non-empty
|
|
565 d.text = ' '
|
|
566
|
|
567 return pagediv
|
|
568
|
|
569
|
564
|
570 def getSearchResults(self, mode, query=None, pageinfo=None, docinfo=None):
|
|
571 """loads list of search results and stores XML in docinfo"""
|
583
|
572 normMode = pageinfo.get('characterNormalization', 'reg')
|
|
573 logging.debug("getSearchResults mode=%s query=%s norm=%s"%(mode, query, normMode))
|
564
|
574 if mode == "none":
|
|
575 return docinfo
|
|
576
|
568
|
577 #TODO: put mode into query
|
|
578
|
564
|
579 cachedQuery = docinfo.get('cachedQuery', None)
|
|
580 if cachedQuery is not None:
|
|
581 # cached search result
|
583
|
582 if cachedQuery == '%s_%s_%s'%(mode,query,normMode):
|
564
|
583 # same query
|
|
584 return docinfo
|
|
585
|
|
586 else:
|
|
587 # different query
|
|
588 del docinfo['resultSize']
|
568
|
589 del docinfo['results']
|
564
|
590
|
|
591 # cache query
|
583
|
592 docinfo['cachedQuery'] = '%s_%s_%s'%(mode,query,normMode)
|
564
|
593
|
|
594 # fetch full results
|
|
595 docpath = docinfo['textURLPath']
|
568
|
596 params = {'docId': docpath,
|
564
|
597 'query': query,
|
568
|
598 'pageSize': 1000,
|
|
599 'page': 1,
|
|
600 'outputFormat': 'html'}
|
|
601 pagexml = self.getServerData("query/QueryDocument",urllib.urlencode(params))
|
|
602 results = []
|
|
603 try:
|
|
604 dom = ET.fromstring(pagexml)
|
583
|
605 # clean html output
|
|
606 self._processWTags('plain', normMode, dom)
|
|
607 self._cleanSearchResult(dom)
|
568
|
608 # page content is currently in multiple <td align=left>
|
576
|
609 alldivs = dom.findall(".//tr[@class='hit']")
|
568
|
610 for div in alldivs:
|
576
|
611 # change tr to div
|
|
612 div.tag = 'div'
|
|
613 # change td to span
|
|
614 for d in div.findall('td'):
|
|
615 d.tag = 'span'
|
|
616
|
568
|
617 # TODO: can we put etree in the session?
|
|
618 results.append(div)
|
|
619
|
|
620 except Exception, e:
|
|
621 logging.error("GetSearchResults: Error parsing search result: %s"%e)
|
564
|
622
|
568
|
623 # store results in docinfo
|
|
624 docinfo['resultSize'] = len(results)
|
|
625 docinfo['results'] = results
|
564
|
626
|
|
627 return docinfo
|
|
628
|
|
629
|
|
630 def getResultsPage(self, mode="text", query=None, pn=None, start=None, size=None, pageinfo=None, docinfo=None):
|
583
|
631 """returns single page from the list of search results"""
|
564
|
632 logging.debug("getResultsPage mode=%s, pn=%s"%(mode,pn))
|
|
633 # get (cached) result
|
|
634 self.getSearchResults(mode=mode, query=query, pageinfo=pageinfo, docinfo=docinfo)
|
|
635
|
568
|
636 resultxml = docinfo.get('results', None)
|
564
|
637 if not resultxml:
|
568
|
638 logging.error("getResultPage: unable to find results")
|
564
|
639 return "Error: no result!"
|
|
640
|
|
641 if size is None:
|
|
642 size = pageinfo.get('resultPageSize', 10)
|
|
643
|
|
644 if start is None:
|
|
645 start = (pn - 1) * size
|
|
646
|
576
|
647 if resultxml is not None:
|
564
|
648 # paginate
|
|
649 first = start-1
|
576
|
650 last = first+size
|
|
651 tocdivs = resultxml[first:last]
|
564
|
652
|
576
|
653 toc = ET.Element('div', attrib={'class':'queryResultPage'})
|
|
654 for div in tocdivs:
|
|
655 # check all a-tags
|
|
656 links = div.findall(".//a")
|
|
657 for l in links:
|
|
658 href = l.get('href')
|
|
659 if href:
|
|
660 # assume all links go to pages
|
|
661 linkUrl = urlparse.urlparse(href)
|
|
662 linkParams = urlparse.parse_qs(linkUrl.query)
|
|
663 # take some parameters (make sure it works even if the link was already parsed)
|
|
664 params = {'pn': linkParams.get('page',linkParams.get('pn', None)),
|
|
665 'highlightQuery': linkParams.get('highlightQuery',None),
|
|
666 'highlightElement': linkParams.get('highlightElem',linkParams.get('highlightElement',None)),
|
|
667 'highlightElementPos': linkParams.get('highlightElemPos',linkParams.get('highlightElementPos',None))
|
|
668 }
|
|
669 if not params['pn']:
|
|
670 logging.warn("getResultsPage: link has no page: %s"%href)
|
|
671
|
|
672 url = self.getLink(params=params)
|
|
673 l.set('href', url)
|
564
|
674
|
576
|
675 toc.append(div)
|
|
676
|
|
677 return serialize(toc)
|
564
|
678
|
|
679 return "ERROR: no results!"
|
|
680
|
|
681
|
|
682 def getToc(self, mode='text', docinfo=None):
|
|
683 """returns list of table of contents from docinfo"""
|
|
684 logging.debug("getToc mode=%s"%mode)
|
|
685 if mode == 'text':
|
|
686 queryType = 'toc'
|
|
687 else:
|
|
688 queryType = mode
|
|
689
|
|
690 if not 'full_%s'%queryType in docinfo:
|
|
691 # get new toc
|
|
692 docinfo = self.getTextInfo(queryType, docinfo)
|
|
693
|
|
694 return docinfo.get('full_%s'%queryType, [])
|
|
695
|
568
|
696
|
564
|
697 def getTocPage(self, mode='text', pn=None, start=None, size=None, pageinfo=None, docinfo=None):
|
|
698 """returns single page from the table of contents"""
|
|
699 logging.debug("getTocPage mode=%s, pn=%s start=%s size=%s"%(mode,repr(pn),repr(start),repr(size)))
|
|
700 fulltoc = self.getToc(mode=mode, docinfo=docinfo)
|
|
701 if len(fulltoc) < 1:
|
|
702 logging.error("getTocPage: unable to find toc!")
|
|
703 return "Error: no table of contents!"
|
|
704
|
|
705 if size is None:
|
|
706 size = pageinfo.get('tocPageSize', 30)
|
|
707
|
|
708 if start is None:
|
|
709 start = (pn - 1) * size
|
|
710
|
|
711 # paginate
|
|
712 first = (start - 1)
|
|
713 last = first + size
|
|
714 tocs = fulltoc[first:last]
|
|
715 tp = '<div>'
|
609
|
716 label = {'figures': 'Figure', 'notes': 'Note', 'handwritten': 'Handwritten note'}.get(mode, 'Item')
|
564
|
717 for toc in tocs:
|
|
718 pageurl = self.getLink('pn', toc['pn'])
|
|
719 tp += '<div class="tocline">'
|
568
|
720 content = toc['content']
|
609
|
721 lvs = toc['level-string']
|
568
|
722 if content:
|
609
|
723 tp += '<div class="toc name">[%s] %s</div>'%(lvs, toc['content'])
|
|
724 elif lvs:
|
|
725 tp += '<div class="toc name">[%s %s]</div>'%(label, lvs)
|
568
|
726 else:
|
609
|
727 tp += '<div class="toc name">[%s]</div>'%(label)
|
568
|
728
|
|
729 if toc.get('no', None):
|
|
730 tp += '<div class="toc page"><a href="%s">Page: %s (%s)</a></div>'%(pageurl, toc['pn'], toc['no'])
|
|
731 else:
|
|
732 tp += '<div class="toc page"><a href="%s">Page: %s</a></div>'%(pageurl, toc['pn'])
|
|
733
|
564
|
734 tp += '</div>\n'
|
|
735
|
|
736 tp += '</div>\n'
|
|
737
|
|
738 return tp
|
|
739
|
|
740
|
|
741 def manage_changeMpiwgXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,repositoryType=None,RESPONSE=None):
|
|
742 """change settings"""
|
|
743 self.title=title
|
|
744 self.timeout = timeout
|
|
745 self.serverUrl = serverUrl
|
|
746 if repositoryType:
|
|
747 self.repositoryType = repositoryType
|
|
748 if RESPONSE is not None:
|
|
749 RESPONSE.redirect('manage_main')
|
|
750
|
|
751 # management methods
|
|
752 def manage_addMpiwgXmlTextServerForm(self):
|
|
753 """Form for adding"""
|
|
754 pt = PageTemplateFile("zpt/manage_addMpiwgXmlTextServer", globals()).__of__(self)
|
|
755 return pt()
|
|
756
|
|
757 def manage_addMpiwgXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
|
577
|
758 """add MpiwgXmlTextServer"""
|
564
|
759 newObj = MpiwgXmlTextServer(id=id,title=title,serverUrl=serverUrl,timeout=timeout)
|
|
760 self.Destination()._setObject(id, newObj)
|
|
761 if RESPONSE is not None:
|
|
762 RESPONSE.redirect('manage_main')
|
|
763
|
610
|
764
|