1:
2: from OFS.SimpleItem import SimpleItem
3: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
4: from Ft.Xml import EMPTY_NAMESPACE, Parse
5:
6: import md5
7: import sys
8: import logging
9: import urllib
10: import documentViewer
11: from documentViewer import getTextFromNode, serializeNode
12:
13: class MpdlXmlTextServer(SimpleItem):
14: """TextServer implementation for MPDL-XML eXist server"""
15: meta_type="MPDL-XML TextServer"
16:
17: manage_options=(
18: {'label':'Config','action':'manage_changeMpdlXmlTextServerForm'},
19: )+SimpleItem.manage_options
20:
21: manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals())
22:
23: def __init__(self,id,title="",serverUrl="http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40):
24: """constructor"""
25: self.id=id
26: self.title=title
27: self.timeout = timeout
28: if serverName is None:
29: self.serverUrl = serverUrl
30: else:
31: self.serverUrl = "http://%s/mpdl/interface/"%serverName
32:
33: def getHttpData(self, url, data=None):
34: """returns result from url+data HTTP request"""
35: return documentViewer.getHttpData(url,data,timeout=self.timeout)
36:
37: def getServerData(self, method, data=None):
38: """returns result from text server for method+data"""
39: url = self.serverUrl+method
40: return documentViewer.getHttpData(url,data,timeout=self.timeout)
41:
42: def getSearch(self, pageinfo=None, docinfo=None, lemma=None):
43: """get search list"""
44: docpath = docinfo['textURLPath']
45: url = docinfo['url']
46: pagesize = pageinfo['queryPageSize']
47: pn = pageinfo.get('searchPN',1)
48: sn = pageinfo['sn']
49: highlightQuery = pageinfo['highlightQuery']
50: query =pageinfo['query']
51: queryType =pageinfo['queryType']
52: viewMode= pageinfo['viewMode']
53: tocMode = pageinfo['tocMode']
54: characterNormalization = pageinfo['characterNormalization']
55: optionToggle = pageinfo['optionToggle']
56: tocPN = pageinfo['tocPN']
57: selfurl = self.absolute_url()
58: data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&optionToggle=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization,optionToggle ,urllib.quote(highlightQuery)))
59: pagexml = data.replace('?document=%s'%str(docpath),'?url=%s'%url)
60: pagedom = Parse(pagexml)
61:
62: """
63: pagedivs = pagedom.xpath("//div[@class='queryResultHits']")
64: if (pagedivs == pagedom.xpath("//div[@class='queryResultHits']")):
65: if len(pagedivs)>0:
66: docinfo['queryResultHits'] = int(getTextFromNode(pagedivs[0]))
67: s = getTextFromNode(pagedivs[0])
68: s1 = int(s)/10+1
69: try:
70: docinfo['queryResultHits'] = int(s1)
71: logging.debug("SEARCH ENTRIES: %s"%(s1))
72: except:
73: docinfo['queryResultHits'] = 0
74: """
75: if (queryType=="fulltext")or(queryType=="xpath")or(queryType=="xquery")or(queryType=="fulltextMorphLemma"):
76: pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
77: if len(pagedivs)>0:
78: pagenode=pagedivs[0]
79: links=pagenode.xpath("//a")
80: for l in links:
81: hrefNode = l.getAttributeNodeNS(None, u"href")
82: if hrefNode:
83: href = hrefNode.nodeValue
84: if href.startswith('page-fragment.xql'):
85: selfurl = self.absolute_url()
86: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&optionToggle=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN,optionToggle,characterNormalization))
87: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
88: logging.debug("PUREXML :%s"%(serializeNode(pagenode)))
89: return serializeNode(pagenode)
90: if (queryType=="fulltextMorph"):
91: pagedivs = pagedom.xpath("//div[@class='queryResult']")
92: if len(pagedivs)>0:
93: pagenode=pagedivs[0]
94: links=pagenode.xpath("//a")
95: for l in links:
96: hrefNode = l.getAttributeNodeNS(None, u"href")
97: if hrefNode:
98: href = hrefNode.nodeValue
99: if href.startswith('page-fragment.xql'):
100: selfurl = self.absolute_url()
101: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&optionToggle=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN,optionToggle,characterNormalization))
102: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
103: if href.startswith('../lt/lemma.xql'):
104: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma_New'%(selfurl))
105: l.setAttributeNS(None, 'target', '_blank')
106: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
107: l.setAttributeNS(None, 'onClick', 'popupWin.focus();')
108: pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']")
109: return serializeNode(pagenode)
110: if (queryType=="ftIndex")or(queryType=="ftIndexMorph"):
111: pagedivs= pagedom.xpath("//div[@class='queryResultPage']")
112: if len(pagedivs)>0:
113: pagenode=pagedivs[0]
114: links=pagenode.xpath("//a")
115: for l in links:
116: hrefNode = l.getAttributeNodeNS(None, u"href")
117: if hrefNode:
118: href = hrefNode.nodeValue
119: hrefNode.nodeValue=href.replace('mode=text','mode=texttool&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s&optionToggle=%s&characterNormalization=%s'%(viewMode,tocMode,tocPN,pn,optionToggle,characterNormalization))
120: if href.startswith('../lt/lex.xql'):
121: hrefNode.nodeValue = href.replace('../lt/lex.xql','%s/template/head_main_voc'%selfurl)
122: l.setAttributeNS(None, 'target', '_blank')
123: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
124: l.setAttributeNS(None, 'onClick', 'popupWin.focus();')
125: if href.startswith('../lt/lemma.xql'):
126: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%selfurl)
127: l.setAttributeNS(None, 'target', '_blank')
128: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
129: l.setAttributeNS(None, 'onClick', 'popupWin.focus();')
130: return serializeNode(pagenode)
131: return "no text here"
132:
133:
134: def getGisPlaces(self, docinfo=None, pageinfo=None):
135: """ Show all Gis Places of whole Page"""
136: xpath='//place'
137: docpath = docinfo.get('textURLPath',None)
138: if not docpath:
139: return None
140:
141: url = docinfo['url']
142: selfurl = self.absolute_url()
143: pn = pageinfo['current']
144: hrefList=[]
145: myList= ""
146: text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn))
147: dom = Parse(text)
148: result = dom.xpath("//result/resultPage/place")
149: for l in result:
150: hrefNode= l.getAttributeNodeNS(None, u"id")
151: href= hrefNode.nodeValue
152: hrefList.append(href)
153: myList = ",".join(hrefList)
154: logging.debug("getGisPlaces :%s"%(myList))
155: return myList
156:
157: def getAllGisPlaces (self, docinfo=None, pageinfo=None):
158: """Show all Gis Places of whole Book """
159: xpath ='//echo:place'
160: docpath =docinfo['textURLPath']
161: url = docinfo['url']
162: selfurl =self.absolute_url()
163: pn =pageinfo['current']
164: hrefList=[]
165: myList=""
166: text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath))
167: dom =Parse(text)
168: result = dom.xpath("//result/resultPage/place")
169:
170: for l in result:
171: hrefNode = l.getAttributeNodeNS(None, u"id")
172: href= hrefNode.nodeValue
173: hrefList.append(href)
174: myList = ",".join(hrefList)
175: logging.debug("getALLGisPlaces :%s"%(myList))
176: return myList
177:
178:
179: def getTextPage(self, mode="text_dict", pn=1, docinfo=None, pageinfo=None):
180: """returns single page from fulltext"""
181: docpath = docinfo['textURLPath']
182: path = docinfo['textURLPath']
183: url = docinfo.get('url',None)
184: name = docinfo.get('name',None)
185: pn =pageinfo['current']
186: #viewMode= pageinfo['viewMode']
187:
188: sn = pageinfo['sn']
189: highlightQuery = pageinfo['highlightQuery']
190: #mode = pageinfo ['viewMode']
191: tocMode = pageinfo['tocMode']
192: characterNormalization=pageinfo['characterNormalization']
193: tocPN = pageinfo['tocPN']
194: selfurl = self.absolute_url()
195: if mode == "text_dict":
196: textmode = "textPollux"
197: else:
198: textmode = mode
199:
200: textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s"%(docpath,textmode,pn,characterNormalization)
201: if highlightQuery is not None:
202: textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn)
203:
204: pagexml = self.getServerData("page-fragment.xql",textParam)
205: dom = Parse(pagexml)
206:
207: #original Pages
208: pagedivs = dom.xpath("//div[@class='pageNumberOrig']")
209: if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"):
210: if len(pagedivs)>0:
211: docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0])
212: logging.debug("ORIGINAL PAGES: %s"%(docinfo['pageNumberOrig']))
213:
214: #original Pages Norm
215: pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']")
216: if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"):
217: if len(pagedivs)>0:
218: docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0])
219: logging.debug("ORIGINAL PAGES: %s"%(docinfo['pageNumberOrigNorm']))
220:
221:
222: #figureEntries
223: pagedivs = dom.xpath("//div[@class='countFigureEntries']")
224: if pagedivs == dom.xpath("//div[@class='countFigureEntries']"):
225: if len(pagedivs)>0:
226: docinfo['countFigureEntries'] = getTextFromNode(pagedivs[0])
227: s = getTextFromNode(pagedivs[0])
228: if s=='0':
229: try:
230: docinfo['countFigureEntries'] = int(s)
231: logging.debug("FIGURE ENTRIES: %s"%(s))
232: except:
233: docinfo['countFigureEntries'] = 0
234: else:
235: s1 = int(s)/30+1
236: try:
237: docinfo['countFigureEntries'] = int(s1)
238: logging.debug("FIGURE ENTRIES: %s"%(s1))
239: except:
240: docinfo['countFigureEntries'] = 0
241:
242: #allPlaces
243: pagedivs = dom.xpath("//div[@class='countPlaces']")
244: if pagedivs == dom.xpath("//div[@class='countPlaces']"):
245: if len(pagedivs)>0:
246: docinfo['countPlaces']= getTextFromNode(pagedivs[0])
247: s = getTextFromNode(pagedivs[0])
248: try:
249: docinfo['countPlaces'] = int(s)
250: logging.debug("PLACES HERE: %s"%(s))
251: except:
252: docinfo['countPlaces'] = 0
253:
254: #tocEntries
255: pagedivs = dom.xpath("//div[@class='countTocEntries']")
256: if pagedivs == dom.xpath("//div[@class='countTocEntries']"):
257: if len(pagedivs)>0:
258: docinfo['countTocEntries'] = int(getTextFromNode(pagedivs[0]))
259: s = getTextFromNode(pagedivs[0])
260: if s=='0':
261: try:
262: docinfo['countTocEntries'] = int(s)
263: logging.debug("TEXT ENTRIES: %s"%(s))
264: except:
265: docinfo['countTocEntries'] = 0
266: else:
267: s1 = int(s)/30+1
268: try:
269: docinfo['countTocEntries'] = int(s1)
270: logging.debug("TEXT ENTRIES: %s"%(s1))
271: except:
272: docinfo['countTocEntries'] = 0
273:
274: #numTextPages
275: pagedivs = dom.xpath("//div[@class='countPages']")
276: if pagedivs == dom.xpath("//div[@class='countPages']"):
277: if len(pagedivs)>0:
278: docinfo['numPages'] = getTextFromNode(pagedivs[0])
279: s = getTextFromNode(pagedivs[0])
280:
281: try:
282: docinfo['numPages'] = int(s)
283: logging.debug("PAGE NUMBER: %s"%(s))
284:
285: np = docinfo['numPages']
286: pageinfo['end'] = min(pageinfo['end'], np)
287: pageinfo['numgroups'] = int(np / pageinfo['groupsize'])
288: if np % pageinfo['groupsize'] > 0:
289: pageinfo['numgroups'] += 1
290: except:
291: docinfo['numPages'] = 0
292:
293: else:
294: #no full text -- init to 0
295: docinfo['pageNumberOrig'] = 0
296: docinfo['countFigureEntries'] = 0
297: docinfo['countPlaces'] = 0
298: docinfo['countTocEntries'] = 0
299: docinfo['numPages'] = 0
300: docinfo['pageNumberOrigNorm'] = 0
301: #return docinfo
302:
303: # plain text mode
304: if mode == "text":
305: # first div contains text
306: #mode = viewMode
307: pagedivs = dom.xpath("/div")
308: if len(pagedivs) > 0:
309: pagenode = pagedivs[0]
310: links = pagenode.xpath("//a")
311: for l in links:
312: hrefNode = l.getAttributeNodeNS(None, u"href")
313: if hrefNode:
314: href= hrefNode.nodeValue
315: if href.startswith('#note-'):
316: hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn))
317: return serializeNode(pagenode)
318: if mode == "xml":
319: # first div contains text
320: pagedivs = dom.xpath("/div")
321: if len(pagedivs) > 0:
322: pagenode = pagedivs[0]
323: return serializeNode(pagenode)
324: if mode == "gis":
325: # first div contains text
326: pagedivs = dom.xpath("/div")
327: if len(pagedivs) > 0:
328: pagenode = pagedivs[0]
329: links =pagenode.xpath("//a")
330: for l in links:
331: hrefNode =l.getAttributeNodeNS(None, u"href")
332: if hrefNode:
333: href=hrefNode.nodeValue
334: if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'):
335: hrefNode.nodeValue =href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name)
336: l.setAttributeNS(None, 'target', '_blank')
337: return serializeNode(pagenode)
338:
339: if mode == "pureXml":
340: # first div contains text
341: pagedivs = dom.xpath("/div")
342: if len(pagedivs) > 0:
343: pagenode = pagedivs[0]
344: return serializeNode(pagenode)
345: # text-with-links mode
346: if mode == "text_dict":
347: # first div contains text
348: #mode = pageinfo ['viewMode']
349: pagedivs = dom.xpath("/div")
350: if len(pagedivs) > 0:
351: pagenode = pagedivs[0]
352: # check all a-tags
353: links = pagenode.xpath("//a")
354: for l in links:
355: hrefNode = l.getAttributeNodeNS(None, u"href")
356: if hrefNode:
357: # is link with href
358: href = hrefNode.nodeValue
359: if href.startswith('lt/lex.xql'):
360: # is pollux link
361: selfurl = self.absolute_url()
362: # change href
363: hrefNode.nodeValue = href.replace('lt/lex.xql','%s/template/head_main_voc'%selfurl)
364: # add target
365: l.setAttributeNS(None, 'target', '_blank')
366: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=700, scrollbars=1'); return false;")
367: l.setAttributeNS(None, 'onClick', 'popupWin.focus();')
368: if href.startswith('lt/lemma.xql'):
369: selfurl = self.absolute_url()
370: hrefNode.nodeValue = href.replace('lt/lemma.xql','%s/template/head_main_lemma'%selfurl)
371: l.setAttributeNS(None, 'target', '_blank')
372: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=700, scrollbars=1'); return false;")
373: l.setAttributeNS(None, 'onClick', 'popupWin.focus();')
374: if href.startswith('#note-'):
375: hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn))
376: return serializeNode(pagenode)
377: return "no text here"
378:
379: def getTranslate(self, query=None, language=None):
380: """translate into another languages"""
381: data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query))
382: #pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lex.xql","document=&language="+str(language)+"&query="+url_quote(str(query)))
383: return data
384:
385: def getLemma(self, lemma=None, language=None):
386: """simular words lemma """
387: data = self.getServerData("lt/lemma.xql","document=&language="+str(language)+"&lemma="+urllib.quote(lemma))
388: #pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lemma.xql","document=&language="+str(language)+"&lemma="+url_quote(str(lemma)))
389: return data
390:
391: def getLemmaNew(self, query=None, language=None):
392: """simular words lemma """
393: data = self.getServerData("lt/lemma.xql","document=&language="+str(language)+"&lemma="+urllib.quote(query))
394: #pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lemma.xql","document=&language="+str(language)+"&lemma="+url_quote(str(query)))
395: return data
396:
397: def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1, optionToggle=None):
398: #number of
399: docpath = docinfo['textURLPath']
400: pagesize = pageinfo['queryPageSize']
401: pn = pageinfo['searchPN']
402: query =pageinfo['query']
403: queryType =pageinfo['queryType']
404: tocSearch = 0
405: tocDiv = None
406:
407: pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn))
408: pagedom = Parse(pagexml)
409: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
410: tocSearch = int(getTextFromNode(numdivs[0]))
411: logging.debug("documentViewer (gettoc) tocSearch: %s"%(tocSearch))
412: tc=int((tocSearch/10)+1)
413: logging.debug("documentViewer (gettoc) tc: %s"%(tc))
414: return tc
415:
416: def getToc(self, mode="text", docinfo=None):
417: """loads table of contents and stores in docinfo"""
418: logging.debug("documentViewer (gettoc) mode: %s"%(mode))
419: if mode == "none":
420: return docinfo
421: if 'tocSize_%s'%mode in docinfo:
422: # cached toc
423: return docinfo
424:
425: docpath = docinfo['textURLPath']
426: # we need to set a result set size
427: pagesize = 1000
428: pn = 1
429: if mode == "text":
430: queryType = "toc"
431: else:
432: queryType = mode
433: # number of entries in toc
434: tocSize = 0
435: tocDiv = None
436:
437: pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn))
438:
439: # post-processing downloaded xml
440: pagedom = Parse(pagexml)
441: # get number of entries
442: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
443: if len(numdivs) > 0:
444: tocSize = int(getTextFromNode(numdivs[0]))
445: docinfo['tocSize_%s'%mode] = tocSize
446: return docinfo
447:
448: def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None):
449: """returns single page from the table of contents"""
450: # TODO: this should use the cached TOC
451: if mode == "text":
452: queryType = "toc"
453: else:
454: queryType = mode
455: docpath = docinfo['textURLPath']
456: path = docinfo['textURLPath']
457: pagesize = pageinfo['tocPageSize']
458: pn = pageinfo['tocPN']
459: url = docinfo['url']
460: selfurl = self.absolute_url()
461: viewMode= pageinfo['viewMode']
462: characterNormalization = pageinfo ['characterNormalization']
463: optionToggle =pageinfo ['optionToggle']
464: tocMode = pageinfo['tocMode']
465: tocPN = pageinfo['tocPN']
466:
467: data = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s&characterNormalization=regPlusNorm&optionToggle=1"%(docpath,queryType, pagesize, pn))
468: page = data.replace('page-fragment.xql?document=%s'%str(path),'%s?url=%s&viewMode=%s&tocMode=%s&tocPN=%s&optionToggle=1'%(selfurl,url, viewMode, tocMode, tocPN))
469: text = page.replace('mode=image','mode=texttool')
470: #logging.debug("documentViewer (characterNormalization) characterNormalization: %s"%(characterNormalization))
471: logging.debug("TEXT %s"%(text))
472: return text
473:
474: def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
475: """change settings"""
476: self.title=title
477: self.timeout = timeout
478: self.serverUrl = serverUrl
479: if RESPONSE is not None:
480: RESPONSE.redirect('manage_main')
481:
482: # management methods
483: def manage_addMpdlXmlTextServerForm(self):
484: """Form for adding"""
485: pt = PageTemplateFile("zpt/manage_addMpdlXmlTextServer", globals()).__of__(self)
486: return pt()
487:
488: def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
489: """add zogiimage"""
490: newObj = MpdlXmlTextServer(id,title,serverUrl,timeout)
491: self.Destination()._setObject(id, newObj)
492: if RESPONSE is not None:
493: RESPONSE.redirect('manage_main')
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>