comparison MpiwgXmlTextServer.py @ 564:31f562fa7214

first version of MpiwgXmlTextServer.
author casties
date Mon, 08 Oct 2012 20:36:00 +0200
parents
children 1b483194901c
comparison
equal deleted inserted replaced
563:8f2dab2d2d2a 564:31f562fa7214
1 from OFS.SimpleItem import SimpleItem
2 from Products.PageTemplates.PageTemplateFile import PageTemplateFile
3
4 import xml.etree.ElementTree as ET
5
6 import re
7 import logging
8 import urllib
9 import urlparse
10 import base64
11
12 from SrvTxtUtils import getInt, getText, getHttpData
13
14 def serialize(node):
15 """returns a string containing an XML snippet of node"""
16 s = ET.tostring(node, 'UTF-8')
17 # snip off XML declaration
18 if s.startswith('<?xml'):
19 i = s.find('?>')
20 return s[i+3:]
21
22 return s
23
24
25 class MpiwgXmlTextServer(SimpleItem):
26 """TextServer implementation for MPIWG-XML server"""
27 meta_type="MPIWG-XML TextServer"
28
29 manage_options=(
30 {'label':'Config','action':'manage_changeMpiwgXmlTextServerForm'},
31 )+SimpleItem.manage_options
32
33 manage_changeMpiwgXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpiwgXmlTextServer", globals())
34
35 def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpiwg-mpdl-cms-web/", timeout=40, serverName=None, repositoryType='production'):
36 """constructor"""
37 self.id=id
38 self.title=title
39 self.timeout = timeout
40 self.repositoryType = repositoryType
41 if serverName is None:
42 self.serverUrl = serverUrl
43 else:
44 self.serverUrl = "http://%s/mpiwg-mpdl-cms-web/"%serverName
45
46 def getHttpData(self, url, data=None):
47 """returns result from url+data HTTP request"""
48 return getHttpData(url,data,timeout=self.timeout)
49
50 def getServerData(self, method, data=None):
51 """returns result from text server for method+data"""
52 url = self.serverUrl+method
53 return getHttpData(url,data,timeout=self.timeout)
54
55
56 def getRepositoryType(self):
57 """returns the repository type, e.g. 'production'"""
58 return self.repositoryType
59
60 def getTextDownloadUrl(self, type='xml', docinfo=None):
61 """returns a URL to download the current text"""
62 docpath = docinfo.get('textURLPath', None)
63 if not docpath:
64 return None
65
66 docpath = docpath.replace('.xml','.'+type)
67 url = '%sdoc/GetDocument?id=%s'%(self.serverUrl.replace('interface/',''), docpath)
68 return url
69
70
71 def getPlacesOnPage(self, docinfo=None, pn=None):
72 """Returns list of GIS places of page pn"""
73 #FIXME!
74 docpath = docinfo.get('textURLPath',None)
75 if not docpath:
76 return None
77
78 places=[]
79 text=self.getServerData("xpath.xql", "document=%s&xpath=//place&pn=%s"%(docpath,pn))
80 dom = ET.fromstring(text)
81 result = dom.findall(".//resultPage/place")
82 for l in result:
83 id = l.get("id")
84 name = l.text
85 place = {'id': id, 'name': name}
86 places.append(place)
87
88 return places
89
90
91 def getTextInfo(self, mode='', docinfo=None):
92 """reads document info, including page concordance, from text server"""
93 logging.debug("getTextInfo mode=%s"%mode)
94 if mode not in ['toc', 'figures', '']:
95 mode = ''
96 # check cached info
97 if mode:
98 # cached toc-request?
99 if 'full_%s'%mode in docinfo:
100 return docinfo
101
102 else:
103 # no toc-request
104 if 'numTextPages' in docinfo:
105 return docinfo
106
107 docpath = docinfo.get('textURLPath', None)
108 if docpath is None:
109 logging.error("getTextInfo: no textURLPath!")
110 return docinfo
111
112 # fetch docinfo
113 pagexml = self.getServerData("query/GetDocInfo","docId=%s&field=%s"%(docpath,mode))
114 dom = ET.fromstring(pagexml)
115 # all info in tag <document>
116 doc = dom.find("doc")
117 if doc is None:
118 logging.error("getTextInfo: unable to find document-tag!")
119 else:
120 # result is in list-tag
121 l = doc.find('list')
122 if l is not None:
123 lt = l.get('type')
124 # pageNumbers
125 if lt == 'pages':
126 # contains tags with page numbers
127 # <item n="14" o="2" o-norm="2" file="0014"/>
128 # n=scan number, o=original page no, on=normalized original page no
129 # pageNumbers is a dict indexed by scan number
130 pages = {}
131 for i in l:
132 page = {}
133 pn = getInt(i.get('n'))
134 page['pn'] = pn
135 no = getInt(i.get('o'))
136 page['no'] = no
137 non = getInt(i.get('o-norm'))
138 page['non'] = non
139
140 if pn > 0:
141 pages[pn] = page
142
143 docinfo['numTextPages'] = len(pages)
144 docinfo['pageNumbers'] = pages
145 logging.debug("got pageNumbers=%s"%repr(pages))
146
147 # toc
148 elif name == 'toc':
149 # contains tags with table of contents/figures
150 # <toc-entry><page>13</page><level>3</level><content>Chapter I</content><level-string>1.</level-string><real-level>1</real-level></toc-entry>
151 tocs = []
152 for te in tag:
153 toc = {}
154 for t in te:
155 if t.tag == 'page':
156 toc['pn'] = getInt(t.text)
157 elif t.tag == 'level':
158 toc['level'] = t.text
159 elif t.tag == 'content':
160 toc['content'] = t.text
161 elif t.tag == 'level-string':
162 toc['level-string'] = t.text
163 elif t.tag == 'real-level':
164 toc['real-level'] = t.text
165
166 tocs.append(toc)
167
168 # save as full_toc/full_figures
169 docinfo['full_%s'%mode] = tocs
170
171 return docinfo
172
173
174 def processPageInfo(self, dom, docinfo, pageinfo):
175 """processes page info divs from dom and stores in docinfo and pageinfo"""
176 # assume first second level div is pageMeta
177 alldivs = dom.find("div")
178
179 if alldivs is None or alldivs.get('class', '') != 'pageMeta':
180 logging.error("processPageInfo: pageMeta div not found!")
181 return
182
183 for div in alldivs:
184 dc = div.get('class')
185
186 # pageNumberOrig
187 if dc == 'pageNumberOrig':
188 pageinfo['pageNumberOrig'] = div.text
189
190 # pageNumberOrigNorm
191 elif dc == 'pageNumberOrigNorm':
192 pageinfo['pageNumberOrigNorm'] = div.text
193
194 # pageHeaderTitle
195 elif dc == 'pageHeaderTitle':
196 pageinfo['pageHeaderTitle'] = div.text
197
198 #logging.debug("processPageInfo: pageinfo=%s"%repr(pageinfo))
199 return
200
201
202 def getTextPage(self, mode="text", pn=1, docinfo=None, pageinfo=None):
203 """returns single page from fulltext"""
204
205 logging.debug("getTextPage mode=%s, pn=%s"%(mode,pn))
206 # check for cached text -- but ideally this shouldn't be called twice
207 if pageinfo.has_key('textPage'):
208 logging.debug("getTextPage: using cached text")
209 return pageinfo['textPage']
210
211 docpath = docinfo.get('textURLPath', None)
212 if not docpath:
213 return None
214
215 # just checking
216 if pageinfo['current'] != pn:
217 logging.warning("getTextPage: current!=pn!")
218
219 # stuff for constructing full urls
220 selfurl = docinfo['viewerUrl']
221 textParams = {'docId': docpath,
222 'page': pn}
223 if 'characterNormalization' in pageinfo:
224 textParams['normalization'] = pageinfo['characterNormalization']
225
226 if not mode:
227 # default is dict
228 mode = 'text'
229
230 modes = mode.split(',')
231 # check for multiple layers
232 if len(modes) > 1:
233 logging.debug("getTextPage: more than one mode=%s"%mode)
234
235 # search mode
236 if 'search' in modes:
237 # add highlighting
238 highlightQuery = pageinfo.get('highlightQuery', None)
239 if highlightQuery:
240 textParams['highlightQuery'] = highlightQuery
241 textParams['highlightElem'] = pageinfo.get('highlightElement', '')
242 textParams['highlightElemPos'] = pageinfo.get('highlightElementPos', '')
243
244 # ignore mode in the following
245 modes.remove('search')
246
247 # pundit mode
248 punditMode = False
249 if 'pundit' in modes:
250 punditMode = True
251 # ignore mode in the following
252 modes.remove('pundit')
253
254 # other modes don't combine
255 if 'dict' in modes:
256 textmode = 'dict'
257 textParams['mode'] = 'tokenized'
258 textParams['outputFormat'] = 'html'
259 elif 'xml' in modes:
260 textmode = 'xml'
261 textParams['mode'] = 'untokenized'
262 textParams['outputFormat'] = 'xmlDisplay'
263 textParams['normalization'] = 'orig'
264 elif 'gis' in modes:
265 #FIXME!
266 textmode = 'gis'
267 else:
268 # text is default mode
269 textmode = 'text'
270 textParams['mode'] = 'untokenized'
271 textParams['outputFormat'] = 'html'
272
273 # fetch the page
274 pagexml = self.getServerData("query/GetPage",urllib.urlencode(textParams))
275 dom = ET.fromstring(pagexml)
276 # extract additional info
277 #self.processPageInfo(dom, docinfo, pageinfo)
278 # page content is in <div class="pageContent">
279 pagediv = None
280 body = dom.find('.//body')
281 if body is None:
282 logging.error("getTextPage: no body!")
283 return None
284
285 # currently there's lots of divs...
286 textspan = body.find('span/span')
287 divs = textspan.findall('div')
288 logging.debug("textdivs: %s"%repr(divs))
289 pagediv = divs[0]
290 logging.debug("pagediv: %s"%serialize(pagediv))
291
292 # plain text mode
293 if textmode == "text":
294 # get full url assuming documentViewer is parent
295 selfurl = self.getLink()
296 if pagediv is not None:
297 if punditMode:
298 pagediv = self.addPunditAttributes(pagediv, pageinfo, docinfo)
299
300 # fix empty div tags
301 divs = pagediv.findall('.//div')
302 for d in divs:
303 if len(d) == 0 and not d.text:
304 # make empty divs non-empty
305 d.text = ' '
306
307 # check all a-tags
308 links = pagediv.findall('.//a')
309 for l in links:
310 href = l.get('href')
311 if href and href.startswith('#note-'):
312 href = href.replace('#note-',"%s#note-"%selfurl)
313 l.set('href', href)
314
315 return serialize(pagediv)
316
317 # text-with-links mode
318 elif textmode == "dict":
319 if pagediv is not None:
320 viewerurl = docinfo['viewerUrl']
321 selfurl = self.getLink()
322 if punditMode:
323 pagediv = self.addPunditAttributes(pagediv, pageinfo, docinfo)
324
325 # fix empty div tags
326 divs = pagediv.findall('.//div')
327 for d in divs:
328 if len(d) == 0 and not d.text:
329 # make empty divs non-empty
330 d.text = ' '
331
332 # check all a-tags
333 links = pagediv.findall(".//a")
334 for l in links:
335 href = l.get('href')
336
337 if href:
338 # is link with href
339 linkurl = urlparse.urlparse(href)
340 #logging.debug("getTextPage: linkurl=%s"%repr(linkurl))
341 if linkurl.path.endswith('GetDictionaryEntries'):
342 #TODO: replace wordInfo page
343 # is dictionary link - change href (keeping parameters)
344 #l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/template/viewer_wordinfo'%viewerurl))
345 # add target to open new page
346 l.set('target', '_blank')
347
348 if href.startswith('#note-'):
349 # note link
350 l.set('href', href.replace('#note-',"%s#note-"%selfurl))
351
352 return serialize(pagediv)
353
354 # xml mode
355 elif textmode == "xml":
356 if pagediv is not None:
357 return serialize(pagediv)
358
359 # pureXml mode WTF?
360 elif textmode == "pureXml":
361 if pagediv is not None:
362 return serialize(pagediv)
363
364 # gis mode
365 elif textmode == "gis":
366 if pagediv is not None:
367 # fix empty div tags
368 divs = pagediv.findall('.//div')
369 for d in divs:
370 if len(d) == 0 and not d.text:
371 # make empty divs non-empty
372 d.text = ' '
373
374 # check all a-tags
375 links = pagediv.findall(".//a")
376 # add our URL as backlink
377 selfurl = self.getLink()
378 doc = base64.b64encode(selfurl)
379 for l in links:
380 href = l.get('href')
381 if href:
382 if href.startswith('http://mappit.mpiwg-berlin.mpg.de'):
383 l.set('href', re.sub(r'doc=[\w+/=]+', 'doc=%s'%doc, href))
384 l.set('target', '_blank')
385
386 return serialize(pagediv)
387
388 return None
389
390 def addPunditAttributes(self, pagediv, pageinfo, docinfo):
391 """add about attributes for pundit annotation tool"""
392 textid = docinfo.get('DRI', "fn=%s"%docinfo.get('documentPath', '???'))
393 pn = pageinfo.get('pn', '1')
394 # TODO: use pn as well?
395 # check all div-tags
396 divs = pagediv.findall(".//div")
397 for d in divs:
398 id = d.get('id')
399 if id:
400 d.set('about', "http://echo.mpiwg-berlin.mpg.de/%s/pn=%s/#%s"%(textid,pn,id))
401 cls = d.get('class','')
402 cls += ' pundit-content'
403 d.set('class', cls.strip())
404
405 return pagediv
406
407 def getSearchResults(self, mode, query=None, pageinfo=None, docinfo=None):
408 """loads list of search results and stores XML in docinfo"""
409
410 logging.debug("getSearchResults mode=%s query=%s"%(mode, query))
411 if mode == "none":
412 return docinfo
413
414 cachedQuery = docinfo.get('cachedQuery', None)
415 if cachedQuery is not None:
416 # cached search result
417 if cachedQuery == '%s_%s'%(mode,query):
418 # same query
419 return docinfo
420
421 else:
422 # different query
423 del docinfo['resultSize']
424 del docinfo['resultXML']
425
426 # cache query
427 docinfo['cachedQuery'] = '%s_%s'%(mode,query)
428
429 # fetch full results
430 docpath = docinfo['textURLPath']
431 params = {'document': docpath,
432 'mode': 'text',
433 'queryType': mode,
434 'query': query,
435 'queryResultPageSize': 1000,
436 'queryResultPN': 1,
437 'characterNormalization': pageinfo.get('characterNormalization', 'reg')}
438 pagexml = self.getServerData("doc-query.xql",urllib.urlencode(params))
439 #pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&s=%s&viewMode=%s&characterNormalization=%s&highlightElementPos=%s&highlightElement=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, s, viewMode,characterNormalization, highlightElementPos, highlightElement, urllib.quote(highlightQuery)))
440 dom = ET.fromstring(pagexml)
441 # page content is in <div class="queryResultPage">
442 pagediv = None
443 # ElementTree 1.2 in Python 2.6 can't do div[@class='queryResultPage']
444 alldivs = dom.findall("div")
445 for div in alldivs:
446 dc = div.get('class')
447 # page content div
448 if dc == 'queryResultPage':
449 pagediv = div
450
451 elif dc == 'queryResultHits':
452 docinfo['resultSize'] = getInt(div.text)
453
454 if pagediv is not None:
455 # store XML in docinfo
456 docinfo['resultXML'] = ET.tostring(pagediv, 'UTF-8')
457
458 return docinfo
459
460
461 def getResultsPage(self, mode="text", query=None, pn=None, start=None, size=None, pageinfo=None, docinfo=None):
462 """returns single page from the table of contents"""
463 logging.debug("getResultsPage mode=%s, pn=%s"%(mode,pn))
464 # get (cached) result
465 self.getSearchResults(mode=mode, query=query, pageinfo=pageinfo, docinfo=docinfo)
466
467 resultxml = docinfo.get('resultXML', None)
468 if not resultxml:
469 logging.error("getResultPage: unable to find resultXML")
470 return "Error: no result!"
471
472 if size is None:
473 size = pageinfo.get('resultPageSize', 10)
474
475 if start is None:
476 start = (pn - 1) * size
477
478 fullresult = ET.fromstring(resultxml)
479
480 if fullresult is not None:
481 # paginate
482 first = start-1
483 len = size
484 del fullresult[:first]
485 del fullresult[len:]
486 tocdivs = fullresult
487
488 # check all a-tags
489 links = tocdivs.findall(".//a")
490 for l in links:
491 href = l.get('href')
492 if href:
493 # assume all links go to pages
494 linkUrl = urlparse.urlparse(href)
495 linkParams = urlparse.parse_qs(linkUrl.query)
496 # take some parameters
497 params = {'pn': linkParams['pn'],
498 'highlightQuery': linkParams.get('highlightQuery',''),
499 'highlightElement': linkParams.get('highlightElement',''),
500 'highlightElementPos': linkParams.get('highlightElementPos','')
501 }
502 url = self.getLink(params=params)
503 l.set('href', url)
504
505 return serialize(tocdivs)
506
507 return "ERROR: no results!"
508
509
510 def getToc(self, mode='text', docinfo=None):
511 """returns list of table of contents from docinfo"""
512 logging.debug("getToc mode=%s"%mode)
513 if mode == 'text':
514 queryType = 'toc'
515 else:
516 queryType = mode
517
518 if not 'full_%s'%queryType in docinfo:
519 # get new toc
520 docinfo = self.getTextInfo(queryType, docinfo)
521
522 return docinfo.get('full_%s'%queryType, [])
523
524 def getTocPage(self, mode='text', pn=None, start=None, size=None, pageinfo=None, docinfo=None):
525 """returns single page from the table of contents"""
526 logging.debug("getTocPage mode=%s, pn=%s start=%s size=%s"%(mode,repr(pn),repr(start),repr(size)))
527 fulltoc = self.getToc(mode=mode, docinfo=docinfo)
528 if len(fulltoc) < 1:
529 logging.error("getTocPage: unable to find toc!")
530 return "Error: no table of contents!"
531
532 if size is None:
533 size = pageinfo.get('tocPageSize', 30)
534
535 if start is None:
536 start = (pn - 1) * size
537
538 # paginate
539 first = (start - 1)
540 last = first + size
541 tocs = fulltoc[first:last]
542 tp = '<div>'
543 for toc in tocs:
544 pageurl = self.getLink('pn', toc['pn'])
545 tp += '<div class="tocline">'
546 tp += '<div class="toc name">[%s %s]</div>'%(toc['level-string'], toc['content'])
547 tp += '<div class="toc float right page"><a href="%s">Page: %s</a></div>'%(pageurl, toc['pn'])
548 tp += '</div>\n'
549
550 tp += '</div>\n'
551
552 return tp
553
554
555 def manage_changeMpiwgXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,repositoryType=None,RESPONSE=None):
556 """change settings"""
557 self.title=title
558 self.timeout = timeout
559 self.serverUrl = serverUrl
560 if repositoryType:
561 self.repositoryType = repositoryType
562 if RESPONSE is not None:
563 RESPONSE.redirect('manage_main')
564
565 # management methods
566 def manage_addMpiwgXmlTextServerForm(self):
567 """Form for adding"""
568 pt = PageTemplateFile("zpt/manage_addMpiwgXmlTextServer", globals()).__of__(self)
569 return pt()
570
571 def manage_addMpiwgXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
572 #def manage_addMpiwgXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):
573 """add zogiimage"""
574 newObj = MpiwgXmlTextServer(id=id,title=title,serverUrl=serverUrl,timeout=timeout)
575 self.Destination()._setObject(id, newObj)
576 if RESPONSE is not None:
577 RESPONSE.redirect('manage_main')
578
579