1:
2: from OFS.SimpleItem import SimpleItem
3: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
4: from Ft.Xml import EMPTY_NAMESPACE, Parse
5:
6: import md5
7: import sys
8: import logging
9: import urllib
10: import documentViewer
11: from documentViewer import getTextFromNode, serializeNode
12:
13: class MpdlXmlTextServer(SimpleItem):
14: """TextServer implementation for MPDL-XML eXist server"""
15: meta_type="MPDL-XML TextServer"
16:
17: manage_options=(
18: {'label':'Config','action':'manage_changeMpdlXmlTextServerForm'},
19: )+SimpleItem.manage_options
20:
21: manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals())
22:
23: def __init__(self,id,title="",serverUrl="http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40):
24: """constructor"""
25: self.id=id
26: self.title=title
27: self.timeout = timeout
28: if serverName is None:
29: self.serverUrl = serverUrl
30: else:
31: self.serverUrl = "http://%s/mpdl/interface/"%serverName
32:
33: def getHttpData(self, url, data=None):
34: """returns result from url+data HTTP request"""
35: return documentViewer.getHttpData(url,data,timeout=self.timeout)
36:
37: def getServerData(self, method, data=None):
38: """returns result from text server for method+data"""
39: url = self.serverUrl+method
40: return documentViewer.getHttpData(url,data,timeout=self.timeout)
41:
42: def getSearch(self, pageinfo=None, docinfo=None, lemma=None):
43: """get search list"""
44: docpath = docinfo['textURLPath']
45: url = docinfo['url']
46: pagesize = pageinfo['queryPageSize']
47: pn = pageinfo.get('searchPN',1)
48: sn = pageinfo['sn']
49: highlightQuery = pageinfo['highlightQuery']
50: query =pageinfo['query']
51: queryType =pageinfo['queryType']
52: viewMode= pageinfo['viewMode']
53: tocMode = pageinfo['tocMode']
54: characterNormalization = pageinfo['characterNormalization']
55: optionToggle = pageinfo['optionToggle']
56: tocPN = pageinfo['tocPN']
57: selfurl = self.absolute_url()
58: data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&optionToggle=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization,optionToggle ,urllib.quote(highlightQuery)))
59: pagexml = data.replace('?document=%s'%str(docpath),'?url=%s'%url)
60: pagedom = Parse(pagexml)
61:
62: """
63: pagedivs = pagedom.xpath("//div[@class='queryResultHits']")
64: if (pagedivs == pagedom.xpath("//div[@class='queryResultHits']")):
65: if len(pagedivs)>0:
66: docinfo['queryResultHits'] = int(getTextFromNode(pagedivs[0]))
67: s = getTextFromNode(pagedivs[0])
68: s1 = int(s)/10+1
69: try:
70: docinfo['queryResultHits'] = int(s1)
71: logging.debug("SEARCH ENTRIES: %s"%(s1))
72: except:
73: docinfo['queryResultHits'] = 0
74: """
75: if (queryType=="fulltext")or(queryType=="xpath")or(queryType=="xquery")or(queryType=="fulltextMorphLemma"):
76: pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
77: if len(pagedivs)>0:
78: pagenode=pagedivs[0]
79: links=pagenode.xpath("//a")
80: for l in links:
81: hrefNode = l.getAttributeNodeNS(None, u"href")
82: if hrefNode:
83: href = hrefNode.nodeValue
84: if href.startswith('page-fragment.xql'):
85: selfurl = self.absolute_url()
86: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&optionToggle=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN,optionToggle,characterNormalization))
87: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
88: logging.debug("PUREXML :%s"%(serializeNode(pagenode)))
89: return serializeNode(pagenode)
90: if (queryType=="fulltextMorph"):
91: pagedivs = pagedom.xpath("//div[@class='queryResult']")
92: if len(pagedivs)>0:
93: pagenode=pagedivs[0]
94: links=pagenode.xpath("//a")
95: for l in links:
96: hrefNode = l.getAttributeNodeNS(None, u"href")
97: if hrefNode:
98: href = hrefNode.nodeValue
99: if href.startswith('page-fragment.xql'):
100: selfurl = self.absolute_url()
101: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&optionToggle=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN,optionToggle,characterNormalization))
102: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
103: if href.startswith('../lt/lemma.xql'):
104: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma_New'%(selfurl))
105: l.setAttributeNS(None, 'target', '_blank')
106: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
107: l.setAttributeNS(None, 'onClick', 'popupWin.focus();')
108: pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']")
109: return serializeNode(pagenode)
110: if (queryType=="ftIndex")or(queryType=="ftIndexMorph"):
111: pagedivs= pagedom.xpath("//div[@class='queryResultPage']")
112: if len(pagedivs)>0:
113: pagenode=pagedivs[0]
114: links=pagenode.xpath("//a")
115: for l in links:
116: hrefNode = l.getAttributeNodeNS(None, u"href")
117: if hrefNode:
118: href = hrefNode.nodeValue
119: hrefNode.nodeValue=href.replace('mode=text','mode=texttool&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s&optionToggle=%s&characterNormalization=%s'%(viewMode,tocMode,tocPN,pn,optionToggle,characterNormalization))
120: if href.startswith('../lt/lex.xql'):
121: hrefNode.nodeValue = href.replace('../lt/lex.xql','%s/template/head_main_voc'%selfurl)
122: l.setAttributeNS(None, 'target', '_blank')
123: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
124: l.setAttributeNS(None, 'onClick', 'popupWin.focus();')
125: if href.startswith('../lt/lemma.xql'):
126: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%selfurl)
127: l.setAttributeNS(None, 'target', '_blank')
128: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
129: l.setAttributeNS(None, 'onClick', 'popupWin.focus();')
130: return serializeNode(pagenode)
131: return "no text here"
132:
133:
134: def getGisPlaces(self, docinfo=None, pageinfo=None):
135: """ Show all Gis Places of whole Page"""
136: xpath='//place'
137: docpath = docinfo.get('textURLPath',None)
138: if not docpath:
139: return None
140:
141: url = docinfo['url']
142: selfurl = self.absolute_url()
143: pn = pageinfo['current']
144: hrefList=[]
145: myList= ""
146: text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn))
147: dom = Parse(text)
148: result = dom.xpath("//result/resultPage/place")
149: for l in result:
150: hrefNode= l.getAttributeNodeNS(None, u"id")
151: href= hrefNode.nodeValue
152: hrefList.append(href)
153: myList = ",".join(hrefList)
154: logging.debug("getGisPlaces :%s"%(myList))
155: return myList
156:
157: def getAllGisPlaces (self, docinfo=None, pageinfo=None):
158: """Show all Gis Places of whole Book """
159: xpath ='//echo:place'
160: docpath =docinfo['textURLPath']
161: url = docinfo['url']
162: selfurl =self.absolute_url()
163: pn =pageinfo['current']
164: hrefList=[]
165: myList=""
166: text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath))
167: dom =Parse(text)
168: result = dom.xpath("//result/resultPage/place")
169:
170: for l in result:
171: hrefNode = l.getAttributeNodeNS(None, u"id")
172: href= hrefNode.nodeValue
173: hrefList.append(href)
174: myList = ",".join(hrefList)
175: logging.debug("getALLGisPlaces :%s"%(myList))
176: return myList
177:
178:
179: def getTextPage(self, mode="text_dict", pn=1, docinfo=None, pageinfo=None):
180: """returns single page from fulltext"""
181: docpath = docinfo['textURLPath']
182: path = docinfo['textURLPath']
183: url = docinfo.get('url',None)
184: name = docinfo.get('name',None)
185: pn =pageinfo['current']
186: sn = pageinfo['sn']
187: highlightQuery = pageinfo['highlightQuery']
188: #mode = pageinfo ['viewMode']
189: tocMode = pageinfo['tocMode']
190: characterNormalization=pageinfo['characterNormalization']
191: tocPN = pageinfo['tocPN']
192: selfurl = self.absolute_url()
193: if mode == "text_dict":
194: textmode = "textPollux"
195: else:
196: textmode = mode
197:
198: textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s"%(docpath,textmode,pn,characterNormalization)
199: if highlightQuery is not None:
200: textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn)
201:
202: pagexml = self.getServerData("page-fragment.xql",textParam)
203: dom = Parse(pagexml)
204:
205: #original Pages
206: pagedivs = dom.xpath("//div[@class='pageNumberOrig']")
207: if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"):
208: if len(pagedivs)>0:
209: docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0])
210: #logging.debug("ORIGINAL PAGES: %s"%(docinfo['pageNumberOrig']))
211:
212: #original Pages Norm
213: pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']")
214: if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"):
215: if len(pagedivs)>0:
216: docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0])
217: #logging.debug("ORIGINAL PAGES: %s"%(docinfo['pageNumberOrigNorm']))
218:
219:
220: #figureEntries
221: pagedivs = dom.xpath("//div[@class='countFigureEntries']")
222: if pagedivs == dom.xpath("//div[@class='countFigureEntries']"):
223: if len(pagedivs)>0:
224: docinfo['countFigureEntries'] = getTextFromNode(pagedivs[0])
225: s = getTextFromNode(pagedivs[0])
226: if s=='0':
227: try:
228: docinfo['countFigureEntries'] = int(s)
229: #logging.debug("FIGURE ENTRIES: %s"%(s))
230: except:
231: docinfo['countFigureEntries'] = 0
232: else:
233: s1 = int(s)/30+1
234: try:
235: docinfo['countFigureEntries'] = int(s1)
236: #logging.debug("FIGURE ENTRIES: %s"%(s1))
237: except:
238: docinfo['countFigureEntries'] = 0
239:
240: #allPlaces
241: pagedivs = dom.xpath("//div[@class='countPlaces']")
242: if pagedivs == dom.xpath("//div[@class='countPlaces']"):
243: if len(pagedivs)>0:
244: docinfo['countPlaces']= getTextFromNode(pagedivs[0])
245: s = getTextFromNode(pagedivs[0])
246: try:
247: docinfo['countPlaces'] = int(s)
248: #logging.debug("PLACES HERE: %s"%(s))
249: except:
250: docinfo['countPlaces'] = 0
251:
252: #tocEntries
253: pagedivs = dom.xpath("//div[@class='countTocEntries']")
254: if pagedivs == dom.xpath("//div[@class='countTocEntries']"):
255: if len(pagedivs)>0:
256: docinfo['countTocEntries'] = int(getTextFromNode(pagedivs[0]))
257: s = getTextFromNode(pagedivs[0])
258: if s=='0':
259: try:
260: docinfo['countTocEntries'] = int(s)
261: #logging.debug("TEXT ENTRIES: %s"%(s))
262: except:
263: docinfo['countTocEntries'] = 0
264: else:
265: s1 = int(s)/30+1
266: try:
267: docinfo['countTocEntries'] = int(s1)
268: #logging.debug("TEXT ENTRIES: %s"%(s1))
269: except:
270: docinfo['countTocEntries'] = 0
271:
272: #numTextPages
273: pagedivs = dom.xpath("//div[@class='countPages']")
274: if pagedivs == dom.xpath("//div[@class='countPages']"):
275: if len(pagedivs)>0:
276: docinfo['numPages'] = getTextFromNode(pagedivs[0])
277: s = getTextFromNode(pagedivs[0])
278:
279: try:
280: docinfo['numPages'] = int(s)
281: #logging.debug("PAGE NUMBER: %s"%(s))
282:
283: np = docinfo['numPages']
284: pageinfo['end'] = min(pageinfo['end'], np)
285: pageinfo['numgroups'] = int(np / pageinfo['groupsize'])
286: if np % pageinfo['groupsize'] > 0:
287: pageinfo['numgroups'] += 1
288: except:
289: docinfo['numPages'] = 0
290:
291: else:
292: #no full text -- init to 0
293: docinfo['pageNumberOrig'] = 0
294: docinfo['countFigureEntries'] = 0
295: docinfo['countPlaces'] = 0
296: docinfo['countTocEntries'] = 0
297: docinfo['numPages'] = 0
298: docinfo['pageNumberOrigNorm'] = 0
299: #return docinfo
300:
301: # plain text mode
302: if mode == "text":
303: # first div contains text
304: #mode = viewMode
305: pagedivs = dom.xpath("/div")
306: if len(pagedivs) > 0:
307: pagenode = pagedivs[0]
308: links = pagenode.xpath("//a")
309: for l in links:
310: hrefNode = l.getAttributeNodeNS(None, u"href")
311: if hrefNode:
312: href= hrefNode.nodeValue
313: if href.startswith('#note-'):
314: hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn))
315: return serializeNode(pagenode)
316: if mode == "xml":
317: # first div contains text
318: pagedivs = dom.xpath("/div")
319: if len(pagedivs) > 0:
320: pagenode = pagedivs[0]
321: return serializeNode(pagenode)
322: if mode == "gis":
323: # first div contains text
324: pagedivs = dom.xpath("/div")
325: if len(pagedivs) > 0:
326: pagenode = pagedivs[0]
327: links =pagenode.xpath("//a")
328: for l in links:
329: hrefNode =l.getAttributeNodeNS(None, u"href")
330: if hrefNode:
331: href=hrefNode.nodeValue
332: if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'):
333: hrefNode.nodeValue =href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name)
334: l.setAttributeNS(None, 'target', '_blank')
335: return serializeNode(pagenode)
336:
337: if mode == "pureXml":
338: # first div contains text
339: pagedivs = dom.xpath("/div")
340: if len(pagedivs) > 0:
341: pagenode = pagedivs[0]
342: return serializeNode(pagenode)
343: # text-with-links mode
344: if mode == "text_dict":
345: # first div contains text
346: #mode = pageinfo ['viewMode']
347: pagedivs = dom.xpath("/div")
348: if len(pagedivs) > 0:
349: pagenode = pagedivs[0]
350: # check all a-tags
351: links = pagenode.xpath("//a")
352: for l in links:
353: hrefNode = l.getAttributeNodeNS(None, u"href")
354: if hrefNode:
355: # is link with href
356: href = hrefNode.nodeValue
357: if href.startswith('lt/lex.xql'):
358: # is pollux link
359: selfurl = self.absolute_url()
360: # change href
361: hrefNode.nodeValue = href.replace('lt/lex.xql','%s/template/head_main_voc'%selfurl)
362: # add target
363: l.setAttributeNS(None, 'target', '_blank')
364: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=700, scrollbars=1'); return false;")
365: l.setAttributeNS(None, 'onClick', 'popupWin.focus();')
366: if href.startswith('lt/lemma.xql'):
367: selfurl = self.absolute_url()
368: hrefNode.nodeValue = href.replace('lt/lemma.xql','%s/template/head_main_lemma'%selfurl)
369: l.setAttributeNS(None, 'target', '_blank')
370: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=700, scrollbars=1'); return false;")
371: l.setAttributeNS(None, 'onClick', 'popupWin.focus();')
372: if href.startswith('#note-'):
373: hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn))
374: return serializeNode(pagenode)
375: return "no text here"
376:
377: def getTranslate(self, query=None, language=None):
378: """translate into another languages"""
379: data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query))
380: #pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lex.xql","document=&language="+str(language)+"&query="+url_quote(str(query)))
381: return data
382:
383: def getLemma(self, lemma=None, language=None):
384: """simular words lemma """
385: data = self.getServerData("lt/lemma.xql","document=&language="+str(language)+"&lemma="+urllib.quote(lemma))
386: #pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lemma.xql","document=&language="+str(language)+"&lemma="+url_quote(str(lemma)))
387: return data
388:
389: def getLemmaNew(self, query=None, language=None):
390: """simular words lemma """
391: data = self.getServerData("lt/lemma.xql","document=&language="+str(language)+"&lemma="+urllib.quote(query))
392: #pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lemma.xql","document=&language="+str(language)+"&lemma="+url_quote(str(query)))
393: return data
394:
395: def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1, optionToggle=None):
396: #number of
397: docpath = docinfo['textURLPath']
398: pagesize = pageinfo['queryPageSize']
399: pn = pageinfo['searchPN']
400: query =pageinfo['query']
401: queryType =pageinfo['queryType']
402: tocSearch = 0
403: tocDiv = None
404:
405: pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn))
406: pagedom = Parse(pagexml)
407: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
408: tocSearch = int(getTextFromNode(numdivs[0]))
409: #logging.debug("documentViewer (gettoc) tocSearch: %s"%(tocSearch))
410: tc=int((tocSearch/10)+1)
411: #logging.debug("documentViewer (gettoc) tc: %s"%(tc))
412: return tc
413:
414: def getToc(self, mode="text", docinfo=None):
415: """loads table of contents and stores in docinfo"""
416: #logging.debug("documentViewer (gettoc) mode: %s"%(mode))
417: if mode == "none":
418: return docinfo
419: if 'tocSize_%s'%mode in docinfo:
420: # cached toc
421: return docinfo
422:
423: docpath = docinfo['textURLPath']
424: # we need to set a result set size
425: pagesize = 1000
426: pn = 1
427: if mode == "text":
428: queryType = "toc"
429: else:
430: queryType = mode
431: # number of entries in toc
432: tocSize = 0
433: tocDiv = None
434:
435: pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn))
436:
437: # post-processing downloaded xml
438: pagedom = Parse(pagexml)
439: # get number of entries
440: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
441: if len(numdivs) > 0:
442: tocSize = int(getTextFromNode(numdivs[0]))
443: docinfo['tocSize_%s'%mode] = tocSize
444: return docinfo
445:
446: def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None):
447: """returns single page from the table of contents"""
448: # TODO: this should use the cached TOC
449: if mode == "text":
450: queryType = "toc"
451: else:
452: queryType = mode
453: docpath = docinfo['textURLPath']
454: path = docinfo['textURLPath']
455: pagesize = pageinfo['tocPageSize']
456: pn = pageinfo['tocPN']
457: url = docinfo['url']
458: selfurl = self.absolute_url()
459: viewMode= pageinfo['viewMode']
460: characterNormalization = pageinfo ['characterNormalization']
461: optionToggle =pageinfo ['optionToggle']
462: tocMode = pageinfo['tocMode']
463: tocPN = pageinfo['tocPN']
464:
465: data = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s&characterNormalization=regPlusNorm&optionToggle=1"%(docpath,queryType, pagesize, pn))
466: page = data.replace('page-fragment.xql?document=%s'%str(path),'%s?url=%s&viewMode=%s&tocMode=%s&tocPN=%s&optionToggle=1'%(selfurl,url, viewMode, tocMode, tocPN))
467: text = page.replace('mode=image','mode=texttool')
468: #logging.debug("documentViewer (characterNormalization) characterNormalization: %s"%(characterNormalization))
469: #logging.debug("TEXT %s"%(text))
470: return text
471:
472: def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
473: """change settings"""
474: self.title=title
475: self.timeout = timeout
476: self.serverUrl = serverUrl
477: if RESPONSE is not None:
478: RESPONSE.redirect('manage_main')
479:
480: # management methods
481: def manage_addMpdlXmlTextServerForm(self):
482: """Form for adding"""
483: pt = PageTemplateFile("zpt/manage_addMpdlXmlTextServer", globals()).__of__(self)
484: return pt()
485:
486: def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
487: """add zogiimage"""
488: newObj = MpdlXmlTextServer(id,title,serverUrl,timeout)
489: self.Destination()._setObject(id, newObj)
490: if RESPONSE is not None:
491: RESPONSE.redirect('manage_main')
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>