1: from OFS.SimpleItem import SimpleItem
2: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
3:
4: from Ft.Xml import EMPTY_NAMESPACE, Parse
5: from Ft.Xml.Domlette import NonvalidatingReader
6: import Ft.Xml.Domlette
7: import cStringIO
8:
9: import xml.etree.ElementTree as ET
10:
11: import re
12: import logging
13: import urllib
14:
15: from SrvTxtUtils import getInt, getText, getHttpData
16:
17: def serialize(node):
18: """returns a string containing an XML snippet of node"""
19: s = ET.tostring(node, 'UTF-8')
20: # snip off XML declaration
21: if s.startswith('<?xml'):
22: i = s.find('?>')
23: return s[i+3:]
24:
25: return s
26:
27:
28: def getTextFromNode(node):
29: """get the cdata content of a node"""
30: if node is None:
31: return ""
32:
33: # 4Suite:
34: nodelist=node.childNodes
35: text = ""
36: for n in nodelist:
37: if n.nodeType == node.TEXT_NODE:
38: text = text + n.data
39:
40: return text
41:
42: def serializeNode(node, encoding="utf-8"):
43: """returns a string containing node as XML"""
44: #s = ET.tostring(node)
45:
46: # 4Suite:
47: stream = cStringIO.StringIO()
48: Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding)
49: s = stream.getvalue()
50: stream.close()
51:
52: return s
53:
54:
55: class MpdlXmlTextServer(SimpleItem):
56: """TextServer implementation for MPDL-XML eXist server"""
57: meta_type="MPDL-XML TextServer"
58:
59: manage_options=(
60: {'label':'Config','action':'manage_changeMpdlXmlTextServerForm'},
61: )+SimpleItem.manage_options
62:
63: manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals())
64:
65: def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40):
66: """constructor"""
67: self.id=id
68: self.title=title
69: self.timeout = timeout
70: if serverName is None:
71: self.serverUrl = serverUrl
72: else:
73: self.serverUrl = "http://%s/mpdl/interface/"%serverName
74:
75: def getHttpData(self, url, data=None):
76: """returns result from url+data HTTP request"""
77: return getHttpData(url,data,timeout=self.timeout)
78:
79: def getServerData(self, method, data=None):
80: """returns result from text server for method+data"""
81: url = self.serverUrl+method
82: return getHttpData(url,data,timeout=self.timeout)
83:
84: # WTF: what does this really do? can it be integrated in getPage?
85: def getSearch(self, pageinfo=None, docinfo=None):
86: """get search list"""
87: logging.debug("getSearch()")
88: docpath = docinfo['textURLPath']
89: url = docinfo['url']
90: pagesize = pageinfo['queryPageSize']
91: pn = pageinfo.get('searchPN',1)
92: sn = pageinfo['sn']
93: highlightQuery = pageinfo['highlightQuery']
94: query =pageinfo['query']
95: queryType =pageinfo['queryType']
96: viewMode= pageinfo['viewMode']
97: tocMode = pageinfo['tocMode']
98: characterNormalization = pageinfo['characterNormalization']
99: #optionToggle = pageinfo['optionToggle']
100: tocPN = pageinfo['tocPN']
101: selfurl = self.absolute_url()
102: data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization, urllib.quote(highlightQuery)))
103: pagexml = data.replace('?document=%s'%str(docpath),'?url=%s'%url)
104: pagedom = Parse(pagexml)
105:
106: """
107: pagedivs = pagedom.xpath("//div[@class='queryResultHits']")
108: if (pagedivs == pagedom.xpath("//div[@class='queryResultHits']")):
109: if len(pagedivs)>0:
110: docinfo['queryResultHits'] = int(getTextFromNode(pagedivs[0]))
111: s = getTextFromNode(pagedivs[0])
112: s1 = int(s)/10+1
113: try:
114: docinfo['queryResultHits'] = int(s1)
115: logging.debug("SEARCH ENTRIES: %s"%(s1))
116: except:
117: docinfo['queryResultHits'] = 0
118: """
119: if (queryType=="fulltext")or(queryType=="xpath")or(queryType=="xquery")or(queryType=="fulltextMorphLemma"):
120: pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
121: if len(pagedivs)>0:
122: pagenode=pagedivs[0]
123: links=pagenode.xpath("//a")
124: for l in links:
125: hrefNode = l.getAttributeNodeNS(None, u"href")
126: if hrefNode:
127: href = hrefNode.nodeValue
128: if href.startswith('page-fragment.xql'):
129: selfurl = self.absolute_url()
130: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN, characterNormalization))
131: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
132: #logging.debug("PUREXML :%s"%(serializeNode(pagenode)))
133: return serializeNode(pagenode)
134: if (queryType=="fulltextMorph"):
135: pagedivs = pagedom.xpath("//div[@class='queryResult']")
136: if len(pagedivs)>0:
137: pagenode=pagedivs[0]
138: links=pagenode.xpath("//a")
139: for l in links:
140: hrefNode = l.getAttributeNodeNS(None, u"href")
141: if hrefNode:
142: href = hrefNode.nodeValue
143: if href.startswith('page-fragment.xql'):
144: selfurl = self.absolute_url()
145: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN,characterNormalization))
146: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
147: if href.startswith('../lt/lemma.xql'):
148: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_query'%(selfurl))
149: l.setAttributeNS(None, 'target', '_blank')
150: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
151: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
152: pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']")
153: return serializeNode(pagenode)
154: if (queryType=="ftIndex")or(queryType=="ftIndexMorph"):
155: pagedivs= pagedom.xpath("//div[@class='queryResultPage']")
156: if len(pagedivs)>0:
157: pagenode=pagedivs[0]
158: links=pagenode.xpath("//a")
159: for l in links:
160: hrefNode = l.getAttributeNodeNS(None, u"href")
161: if hrefNode:
162: href = hrefNode.nodeValue
163: hrefNode.nodeValue=href.replace('mode=text','mode=texttool&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s&characterNormalization=%s'%(viewMode,tocMode,tocPN,pn,characterNormalization))
164: if href.startswith('../lt/lex.xql'):
165: hrefNode.nodeValue = href.replace('../lt/lex.xql','%s/template/head_main_lex'%selfurl)
166: l.setAttributeNS(None, 'target', '_blank')
167: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
168: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
169: if href.startswith('../lt/lemma.xql'):
170: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%(selfurl))
171: l.setAttributeNS(None, 'target', '_blank')
172: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
173: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
174: return serializeNode(pagenode)
175: return "no text here"
176:
177: def getGisPlaces(self, docinfo=None, pageinfo=None):
178: """ Show all Gis Places of whole Page"""
179: xpath='//place'
180: docpath = docinfo.get('textURLPath',None)
181: if not docpath:
182: return None
183:
184: pn = pageinfo['current']
185: hrefList=[]
186: myList= ""
187: text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn))
188: dom = ET.fromstring(text)
189: result = dom.findall(".//result/resultPage/place")
190: for l in result:
191: href = l.get("id")
192: hrefList.append(href)
193: # WTF: what does this do?
194: myList = ",".join(hrefList)
195: #logging.debug("getGisPlaces :%s"%(myList))
196: return myList
197:
198: def getAllGisPlaces (self, docinfo=None, pageinfo=None):
199: """Show all Gis Places of whole Book """
200: xpath ='//echo:place'
201: hrefList=[]
202: myList=""
203: text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath))
204: dom = ET.fromstring(text)
205: result = dom.findall(".//result/resultPage/place")
206:
207: for l in result:
208: href = l.get("id")
209: hrefList.append(href)
210: # WTF: what does this do?
211: myList = ",".join(hrefList)
212: #logging.debug("getALLGisPlaces :%s"%(myList))
213: return myList
214:
215: def processPageInfo(self, dom, docinfo, pageinfo):
216: """processes page info divs from dom and stores in docinfo and pageinfo"""
217: # assume first second level div is pageMeta
218: alldivs = dom.find("div")
219:
220: if alldivs is None or alldivs.get('class', '') != 'pageMeta':
221: logging.error("processPageInfo: pageMeta div not found!")
222: return
223:
224: for div in alldivs:
225: dc = div.get('class')
226:
227: # pageNumberOrig
228: if dc == 'pageNumberOrig':
229: pageinfo['pageNumberOrig'] = div.text
230:
231: # pageNumberOrigNorm
232: elif dc == 'pageNumberOrigNorm':
233: pageinfo['pageNumberOrigNorm'] = div.text
234:
235: # pageHeaderTitle
236: elif dc == 'pageHeaderTitle':
237: pageinfo['pageHeaderTitle'] = div.text
238:
239: # numFigureEntries
240: elif dc == 'countFigureEntries':
241: docinfo['numFigureEntries'] = getInt(div.text)
242:
243: # numTocEntries
244: elif dc == 'countTocEntries':
245: # WTF: s1 = int(s)/30+1
246: docinfo['numTocEntries'] = getInt(div.text)
247:
248: # numPlaces
249: elif dc == 'countPlaces':
250: docinfo['numPlaces'] = getInt(div.text)
251:
252: # numTextPages
253: elif dc == 'countPages':
254: np = getInt(div.text)
255: if np > 0:
256: docinfo['numTextPages'] = np
257: if docinfo.get('numPages', 0) == 0:
258: # seems to be text-only - update page count
259: docinfo['numPages'] = np
260: pageinfo['end'] = min(pageinfo['end'], np)
261: pageinfo['numgroups'] = int(np / pageinfo['groupsize'])
262: if np % pageinfo['groupsize'] > 0:
263: pageinfo['numgroups'] += 1
264:
265: #logging.debug("processPageInfo: pageinfo=%s"%repr(pageinfo))
266: return
267:
268:
269: def getTextPage(self, mode="text", pn=1, docinfo=None, pageinfo=None):
270: """returns single page from fulltext"""
271: logging.debug("getTextPage mode=%s, pn=%s"%(mode,pn))
272: # check for cached text -- but this shouldn't be called twice
273: if pageinfo.has_key('textPage'):
274: logging.debug("getTextPage: using cached text")
275: return pageinfo['textPage']
276:
277: docpath = docinfo['textURLPath']
278: # just checking
279: if pageinfo['current'] != pn:
280: logging.warning("getTextPage: current!=pn!")
281:
282: # stuff for constructing full urls
283: url = docinfo['url']
284: urlmode = docinfo['mode']
285: sn = pageinfo.get('sn', None)
286: highlightQuery = pageinfo.get('highlightQuery', None)
287: tocMode = pageinfo.get('tocMode', None)
288: tocPN = pageinfo.get('tocPN',None)
289: characterNormalization = pageinfo.get('characterNormalization', None)
290: selfurl = docinfo['viewerUrl']
291:
292: if mode == "dict" or mode == "text_dict":
293: # dict is called textPollux in the backend
294: textmode = "textPollux"
295: elif not mode:
296: # default is text
297: mode = "text"
298: textmode = "text"
299: else:
300: textmode = mode
301:
302: textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s"%(docpath,textmode,pn,characterNormalization)
303: if highlightQuery:
304: textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn)
305:
306: # fetch the page
307: pagexml = self.getServerData("page-fragment.xql",textParam)
308: dom = ET.fromstring(pagexml)
309: # extract additional info
310: self.processPageInfo(dom, docinfo, pageinfo)
311: # page content is in <div class="pageContent">
312: pagediv = None
313: # ElementTree 1.2 in Python 2.6 can't do div[@class='pageContent']
314: # so we look at the second level divs
315: alldivs = dom.findall("div")
316: for div in alldivs:
317: dc = div.get('class')
318: # page content div
319: if dc == 'pageContent':
320: pagediv = div
321: break
322:
323: # plain text mode
324: if mode == "text":
325: if pagediv is not None:
326: links = pagediv.findall(".//a")
327: for l in links:
328: href = l.get('href')
329: if href and href.startswith('#note-'):
330: href = href.replace('#note-',"?mode=%s&url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn))
331: l.set('href', href)
332:
333: return serialize(pagediv)
334:
335: # text-with-links mode
336: elif mode == "dict":
337: if pagediv is not None:
338: # check all a-tags
339: links = pagediv.findall(".//a")
340: for l in links:
341: href = l.get('href')
342:
343: if href:
344: # is link with href
345: if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql'):
346: # is pollux link
347: selfurl = self.absolute_url()
348: # change href
349: l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/head_main_voc'%selfurl))
350: # add target
351: l.set('target', '_blank')
352:
353: if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql'):
354: selfurl = self.absolute_url()
355: l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl))
356: l.set('target', '_blank')
357: l.set('onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;")
358: l.set('ondblclick', 'popupWin.focus();')
359:
360: if href.startswith('#note-'):
361: l.set('href', href.replace('#note-',"?mode=%s&url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn)))
362:
363: return serialize(pagediv)
364:
365: # xml mode
366: elif mode == "xml":
367: if pagediv is not None:
368: return serialize(pagediv)
369:
370: # pureXml mode
371: elif mode == "pureXml":
372: if pagediv is not None:
373: return serialize(pagediv)
374:
375: # gis mode
376: elif mode == "gis":
377: name = docinfo['name']
378: if pagediv is not None:
379: # check all a-tags
380: links = pagediv.findall(".//a")
381: for l in links:
382: href = l.get('href')
383: if href:
384: if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'):
385: l.set('href', href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name))
386: l.set('target', '_blank')
387:
388: return serialize(pagediv)
389:
390: return "no text here"
391:
392: # WTF: is this needed?
393: def getOrigPages(self, docinfo=None, pageinfo=None):
394: logging.debug("CALLED: getOrigPages!")
395: if not pageinfo.has_key('pageNumberOrig'):
396: logging.warning("getOrigPages: not in pageinfo!")
397: return None
398:
399: return pageinfo['pageNumberOrig']
400:
401: # WTF: is this needed?
402: def getOrigPagesNorm(self, docinfo=None, pageinfo=None):
403: logging.debug("CALLED: getOrigPagesNorm!")
404: if not pageinfo.has_key('pageNumberOrigNorm'):
405: logging.warning("getOrigPagesNorm: not in pageinfo!")
406: return None
407:
408: return pageinfo['pageNumberOrigNorm']
409:
410: # TODO: should be getWordInfo
411: def getTranslate(self, word=None, language=None):
412: """translate into another languages"""
413: data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&output=html")
414: return data
415:
416: # WTF: what does this do?
417: def getLemma(self, lemma=None, language=None):
418: """simular words lemma """
419: data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html")
420: return data
421:
422: # WTF: what does this do?
423: def getLemmaQuery(self, query=None, language=None):
424: """simular words lemma """
425: data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html")
426: return data
427:
428: # WTF: what does this do?
429: def getLex(self, query=None, language=None):
430: #simular words lemma
431: data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query))
432: return data
433:
434: # WTF: what does this do?
435: def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1):
436: #number of
437: docpath = docinfo['textURLPath']
438: pagesize = pageinfo['queryPageSize']
439: pn = pageinfo['searchPN']
440: query =pageinfo['query']
441: queryType =pageinfo['queryType']
442: tocSearch = 0
443: tocDiv = None
444:
445: pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn))
446: pagedom = Parse(pagexml)
447: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
448: tocSearch = int(getTextFromNode(numdivs[0]))
449: tc=int((tocSearch/10)+1)
450: return tc
451:
452: def getToc(self, mode="text", docinfo=None):
453: """loads table of contents and stores XML in docinfo"""
454: logging.debug("getToc mode=%s"%mode)
455: if mode == "none":
456: return docinfo
457:
458: if 'tocSize_%s'%mode in docinfo:
459: # cached toc
460: return docinfo
461:
462: docpath = docinfo['textURLPath']
463: # we need to set a result set size
464: pagesize = 1000
465: pn = 1
466: if mode == "text":
467: queryType = "toc"
468: else:
469: queryType = mode
470: # number of entries in toc
471: tocSize = 0
472: tocDiv = None
473: # fetch full toc
474: pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn))
475: dom = ET.fromstring(pagexml)
476: # page content is in <div class="queryResultPage">
477: pagediv = None
478: # ElementTree 1.2 in Python 2.6 can't do div[@class='queryResultPage']
479: alldivs = dom.findall("div")
480: for div in alldivs:
481: dc = div.get('class')
482: # page content div
483: if dc == 'queryResultPage':
484: pagediv = div
485:
486: elif dc == 'queryResultHits':
487: docinfo['tocSize_%s'%mode] = getInt(div.text)
488:
489: if pagediv:
490: # store XML in docinfo
491: docinfo['tocXML_%s'%mode] = ET.tostring(pagediv, 'UTF-8')
492:
493: return docinfo
494:
495: def getTocPage(self, mode="text", pn=0, pageinfo=None, docinfo=None):
496: """returns single page from the table of contents"""
497: logging.debug("getTocPage mode=%s, pn=%s"%(mode,pn))
498: if mode == "text":
499: queryType = "toc"
500: else:
501: queryType = mode
502:
503: # check for cached TOC
504: if not docinfo.has_key('tocXML_%s'%mode):
505: self.getToc(mode=mode, docinfo=docinfo)
506:
507: tocxml = docinfo.get('tocXML_%s'%mode, None)
508: if not tocxml:
509: logging.error("getTocPage: unable to find tocXML")
510: return "No ToC"
511:
512: pagesize = pageinfo['tocPageSize']
513: tocPN = pageinfo['tocPN']
514: if not pn:
515: pn = tocPN
516:
517: fulltoc = ET.fromstring(tocxml)
518:
519: if fulltoc:
520: # paginate
521: start = (pn - 1) * pagesize * 2
522: len = pagesize * 2
523: del fulltoc[:start]
524: del fulltoc[len:]
525: tocdivs = fulltoc
526:
527: # check all a-tags
528: links = tocdivs.findall(".//a")
529: for l in links:
530: href = l.get('href')
531: if href:
532: # take pn from href
533: m = re.match(r'page-fragment\.xql.*pn=(\d+)', href)
534: if m is not None:
535: # and create new url (assuming parent is documentViewer)
536: url = self.getLink('pn', m.group(1))
537: l.set('href', url)
538: else:
539: logging.warning("getTocPage: Problem with link=%s"%href)
540:
541: return serialize(tocdivs)
542:
543:
544: def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
545: """change settings"""
546: self.title=title
547: self.timeout = timeout
548: self.serverUrl = serverUrl
549: if RESPONSE is not None:
550: RESPONSE.redirect('manage_main')
551:
552: # management methods
553: def manage_addMpdlXmlTextServerForm(self):
554: """Form for adding"""
555: pt = PageTemplateFile("zpt/manage_addMpdlXmlTextServer", globals()).__of__(self)
556: return pt()
557:
558: def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
559: #def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):
560: """add zogiimage"""
561: newObj = MpdlXmlTextServer(id,title,serverUrl,timeout)
562: self.Destination()._setObject(id, newObj)
563: if RESPONSE is not None:
564: RESPONSE.redirect('manage_main')
565:
566:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>