Annotation of documentViewer/MpdlXmlTextServer.py, revision 1.245
1.2 casties 1:
2: from OFS.SimpleItem import SimpleItem
3: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
4: from Ft.Xml import EMPTY_NAMESPACE, Parse
1.238 abukhman 5: from Ft.Xml.Domlette import NonvalidatingReader
1.2 casties 6:
1.224 abukhman 7: import md5
1.2 casties 8: import sys
9: import logging
1.5 casties 10: import urllib
1.2 casties 11: import documentViewer
12: from documentViewer import getTextFromNode, serializeNode
13:
14: class MpdlXmlTextServer(SimpleItem):
15: """TextServer implementation for MPDL-XML eXist server"""
16: meta_type="MPDL-XML TextServer"
17:
18: manage_options=(
19: {'label':'Config','action':'manage_changeMpdlXmlTextServerForm'},
20: )+SimpleItem.manage_options
21:
22: manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals())
23:
1.241 abukhman 24: def __init__(self,id,title="",serverUrl="http://mpdl-test.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40):
1.234 abukhman 25: #def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/", serverName=None, timeout=40):
1.231 abukhman 26:
1.2 casties 27: """constructor"""
28: self.id=id
29: self.title=title
30: self.timeout = timeout
1.3 casties 31: if serverName is None:
32: self.serverUrl = serverUrl
33: else:
34: self.serverUrl = "http://%s/mpdl/interface/"%serverName
1.2 casties 35:
36: def getHttpData(self, url, data=None):
37: """returns result from url+data HTTP request"""
38: return documentViewer.getHttpData(url,data,timeout=self.timeout)
39:
40: def getServerData(self, method, data=None):
41: """returns result from text server for method+data"""
42: url = self.serverUrl+method
43: return documentViewer.getHttpData(url,data,timeout=self.timeout)
44:
1.235 abukhman 45: def getSearch(self, pageinfo=None, docinfo=None):
1.2 casties 46: """get search list"""
47: docpath = docinfo['textURLPath']
48: url = docinfo['url']
49: pagesize = pageinfo['queryPageSize']
1.222 abukhman 50: pn = pageinfo.get('searchPN',1)
1.241 abukhman 51: #sn = pageinfo['sn']
52: s = pageinfo['s']
53: highlightElementPos =pageinfo ['highlightElementPos']
54: highlightElement = pageinfo ['highlightElement']
55:
1.2 casties 56: highlightQuery = pageinfo['highlightQuery']
1.34 abukhman 57: query =pageinfo['query']
1.2 casties 58: queryType =pageinfo['queryType']
59: viewMode= pageinfo['viewMode']
60: tocMode = pageinfo['tocMode']
1.24 abukhman 61: characterNormalization = pageinfo['characterNormalization']
1.237 abukhman 62: #optionToggle = pageinfo['optionToggle']
1.2 casties 63: tocPN = pageinfo['tocPN']
64: selfurl = self.absolute_url()
1.241 abukhman 65: data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&s=%s&viewMode=%s&characterNormalization=%s&highlightElementPos=%s&highlightElement=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, s, viewMode,characterNormalization, highlightElementPos, highlightElement, urllib.quote(highlightQuery)))
66: #data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization, urllib.quote(highlightQuery)))
1.2 casties 67: pagexml = data.replace('?document=%s'%str(docpath),'?url=%s'%url)
68: pagedom = Parse(pagexml)
1.222 abukhman 69:
70: """
71: pagedivs = pagedom.xpath("//div[@class='queryResultHits']")
72: if (pagedivs == pagedom.xpath("//div[@class='queryResultHits']")):
73: if len(pagedivs)>0:
74: docinfo['queryResultHits'] = int(getTextFromNode(pagedivs[0]))
75: s = getTextFromNode(pagedivs[0])
76: s1 = int(s)/10+1
77: try:
78: docinfo['queryResultHits'] = int(s1)
79: logging.debug("SEARCH ENTRIES: %s"%(s1))
80: except:
81: docinfo['queryResultHits'] = 0
82: """
1.2 casties 83: if (queryType=="fulltext")or(queryType=="xpath")or(queryType=="xquery")or(queryType=="fulltextMorphLemma"):
84: pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
85: if len(pagedivs)>0:
86: pagenode=pagedivs[0]
87: links=pagenode.xpath("//a")
88: for l in links:
89: hrefNode = l.getAttributeNodeNS(None, u"href")
90: if hrefNode:
91: href = hrefNode.nodeValue
92: if href.startswith('page-fragment.xql'):
93: selfurl = self.absolute_url()
1.237 abukhman 94: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN, characterNormalization))
1.2 casties 95: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
1.230 abukhman 96: #logging.debug("PUREXML :%s"%(serializeNode(pagenode)))
1.2 casties 97: return serializeNode(pagenode)
98: if (queryType=="fulltextMorph"):
99: pagedivs = pagedom.xpath("//div[@class='queryResult']")
100: if len(pagedivs)>0:
101: pagenode=pagedivs[0]
102: links=pagenode.xpath("//a")
103: for l in links:
104: hrefNode = l.getAttributeNodeNS(None, u"href")
105: if hrefNode:
106: href = hrefNode.nodeValue
107: if href.startswith('page-fragment.xql'):
108: selfurl = self.absolute_url()
1.237 abukhman 109: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN,characterNormalization))
1.2 casties 110: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
111: if href.startswith('../lt/lemma.xql'):
1.235 abukhman 112: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_query'%(selfurl))
1.2 casties 113: l.setAttributeNS(None, 'target', '_blank')
1.239 abukhman 114: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=300,height=400,top=180, left=400, scrollbars=1'); return false;")
1.235 abukhman 115: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
1.2 casties 116: pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']")
117: return serializeNode(pagenode)
118: if (queryType=="ftIndex")or(queryType=="ftIndexMorph"):
119: pagedivs= pagedom.xpath("//div[@class='queryResultPage']")
120: if len(pagedivs)>0:
121: pagenode=pagedivs[0]
122: links=pagenode.xpath("//a")
123: for l in links:
124: hrefNode = l.getAttributeNodeNS(None, u"href")
125: if hrefNode:
126: href = hrefNode.nodeValue
1.237 abukhman 127: hrefNode.nodeValue=href.replace('mode=text','mode=texttool&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s&characterNormalization=%s'%(viewMode,tocMode,tocPN,pn,characterNormalization))
1.2 casties 128: if href.startswith('../lt/lex.xql'):
1.235 abukhman 129: hrefNode.nodeValue = href.replace('../lt/lex.xql','%s/template/head_main_lex'%selfurl)
1.2 casties 130: l.setAttributeNS(None, 'target', '_blank')
131: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
1.235 abukhman 132: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
1.2 casties 133: if href.startswith('../lt/lemma.xql'):
1.235 abukhman 134: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%(selfurl))
1.2 casties 135: l.setAttributeNS(None, 'target', '_blank')
1.239 abukhman 136: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=300,height=400,top=180, left=400, scrollbars=1'); return false;")
1.235 abukhman 137: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
1.2 casties 138: return serializeNode(pagenode)
139: return "no text here"
1.222 abukhman 140:
1.89 abukhman 141: def getGisPlaces(self, docinfo=None, pageinfo=None):
1.58 abukhman 142: """ Show all Gis Places of whole Page"""
1.100 abukhman 143: xpath='//place'
1.214 casties 144: docpath = docinfo.get('textURLPath',None)
145: if not docpath:
146: return None
147:
1.89 abukhman 148: url = docinfo['url']
149: selfurl = self.absolute_url()
1.93 abukhman 150: pn = pageinfo['current']
1.127 abukhman 151: hrefList=[]
1.142 abukhman 152: myList= ""
1.100 abukhman 153: text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn))
154: dom = Parse(text)
1.101 abukhman 155: result = dom.xpath("//result/resultPage/place")
1.72 abukhman 156: for l in result:
1.86 abukhman 157: hrefNode= l.getAttributeNodeNS(None, u"id")
1.108 abukhman 158: href= hrefNode.nodeValue
1.128 abukhman 159: hrefList.append(href)
1.145 abukhman 160: myList = ",".join(hrefList)
1.230 abukhman 161: #logging.debug("getGisPlaces :%s"%(myList))
1.143 abukhman 162: return myList
163:
164: def getAllGisPlaces (self, docinfo=None, pageinfo=None):
165: """Show all Gis Places of whole Book """
166: xpath ='//echo:place'
167: docpath =docinfo['textURLPath']
168: url = docinfo['url']
169: selfurl =self.absolute_url()
170: pn =pageinfo['current']
171: hrefList=[]
172: myList=""
173: text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath))
174: dom =Parse(text)
175: result = dom.xpath("//result/resultPage/place")
1.205 abukhman 176:
1.143 abukhman 177: for l in result:
178: hrefNode = l.getAttributeNodeNS(None, u"id")
179: href= hrefNode.nodeValue
180: hrefList.append(href)
1.136 abukhman 181: myList = ",".join(hrefList)
1.230 abukhman 182: #logging.debug("getALLGisPlaces :%s"%(myList))
1.145 abukhman 183: return myList
1.222 abukhman 184:
1.215 abukhman 185:
1.227 abukhman 186: def getTextPage(self, mode="text_dict", pn=1, docinfo=None, pageinfo=None):
1.2 casties 187: """returns single page from fulltext"""
188: docpath = docinfo['textURLPath']
189: path = docinfo['textURLPath']
1.222 abukhman 190: url = docinfo.get('url',None)
191: name = docinfo.get('name',None)
192: pn =pageinfo['current']
1.241 abukhman 193: #sn = pageinfo['sn']
194: s = pageinfo['s']
195: highlightElementPos =pageinfo ['highlightElementPos']
196: highlightElement = pageinfo ['highlightElement']
1.237 abukhman 197: #optionToggle =pageinfo ['optionToggle']
1.187 abukhman 198: highlightQuery = pageinfo['highlightQuery']
1.225 abukhman 199: #mode = pageinfo ['viewMode']
1.2 casties 200: tocMode = pageinfo['tocMode']
1.242 abukhman 201: xpointer = pageinfo['xpointer']
1.20 abukhman 202: characterNormalization=pageinfo['characterNormalization']
1.2 casties 203: tocPN = pageinfo['tocPN']
204: selfurl = self.absolute_url()
1.243 abukhman 205:
1.2 casties 206: if mode == "text_dict":
207: textmode = "textPollux"
208: else:
209: textmode = mode
1.222 abukhman 210:
1.245 ! casties 211: textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s&xpointer=%s"%(docpath,textmode,pn,characterNormalization, xpointer)
! 212: #textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s&xpointer=%s&options=withIdentifier"%(docpath,textmode,pn,characterNormalization, xpointer)
1.190 abukhman 213: if highlightQuery is not None:
1.241 abukhman 214: #textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn)
215: textParam +="&highlightQuery=%s&s=%s&highlightElement=%s&highlightElementPos=%s"%(urllib.quote(highlightQuery),s, highlightElement, highlightElementPos)
1.222 abukhman 216:
1.38 abukhman 217: pagexml = self.getServerData("page-fragment.xql",textParam)
1.222 abukhman 218: dom = Parse(pagexml)
1.238 abukhman 219: #dom = NonvalidatingReader.parseStream(pagexml)
1.236 abukhman 220:
1.222 abukhman 221: #original Pages
1.233 abukhman 222: pagedivs = dom.xpath("//div[@class='pageNumberOrig']")
1.236 abukhman 223:
1.230 abukhman 224: """if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"):
1.222 abukhman 225: if len(pagedivs)>0:
226: docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0])
1.230 abukhman 227: logging.debug("ORIGINAL PAGE: %s"%(docinfo['pageNumberOrig']))
1.226 abukhman 228:
229: #original Pages Norm
230: pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']")
231: if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"):
232: if len(pagedivs)>0:
233: docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0])
1.230 abukhman 234: logging.debug("ORIGINAL PAGE NORM: %s"%(docinfo['pageNumberOrigNorm']))
235: """
1.222 abukhman 236: #figureEntries
237: pagedivs = dom.xpath("//div[@class='countFigureEntries']")
238: if pagedivs == dom.xpath("//div[@class='countFigureEntries']"):
239: if len(pagedivs)>0:
240: docinfo['countFigureEntries'] = getTextFromNode(pagedivs[0])
241: s = getTextFromNode(pagedivs[0])
242: if s=='0':
243: try:
244: docinfo['countFigureEntries'] = int(s)
245: except:
246: docinfo['countFigureEntries'] = 0
247: else:
248: s1 = int(s)/30+1
249: try:
250: docinfo['countFigureEntries'] = int(s1)
251: except:
252: docinfo['countFigureEntries'] = 0
253:
254: #allPlaces
255: pagedivs = dom.xpath("//div[@class='countPlaces']")
256: if pagedivs == dom.xpath("//div[@class='countPlaces']"):
257: if len(pagedivs)>0:
258: docinfo['countPlaces']= getTextFromNode(pagedivs[0])
259: s = getTextFromNode(pagedivs[0])
260: try:
261: docinfo['countPlaces'] = int(s)
262: except:
263: docinfo['countPlaces'] = 0
264:
265: #tocEntries
266: pagedivs = dom.xpath("//div[@class='countTocEntries']")
267: if pagedivs == dom.xpath("//div[@class='countTocEntries']"):
268: if len(pagedivs)>0:
269: docinfo['countTocEntries'] = int(getTextFromNode(pagedivs[0]))
270: s = getTextFromNode(pagedivs[0])
271: if s=='0':
272: try:
273: docinfo['countTocEntries'] = int(s)
274: except:
275: docinfo['countTocEntries'] = 0
276: else:
277: s1 = int(s)/30+1
278: try:
279: docinfo['countTocEntries'] = int(s1)
280: except:
281: docinfo['countTocEntries'] = 0
282:
283: #numTextPages
284: pagedivs = dom.xpath("//div[@class='countPages']")
285: if pagedivs == dom.xpath("//div[@class='countPages']"):
286: if len(pagedivs)>0:
287: docinfo['numPages'] = getTextFromNode(pagedivs[0])
288: s = getTextFromNode(pagedivs[0])
289:
290: try:
291: docinfo['numPages'] = int(s)
1.228 abukhman 292: #logging.debug("PAGE NUMBER: %s"%(s))
1.222 abukhman 293:
294: np = docinfo['numPages']
295: pageinfo['end'] = min(pageinfo['end'], np)
296: pageinfo['numgroups'] = int(np / pageinfo['groupsize'])
297: if np % pageinfo['groupsize'] > 0:
298: pageinfo['numgroups'] += 1
299: except:
300: docinfo['numPages'] = 0
301:
302: else:
303: #no full text -- init to 0
304: docinfo['pageNumberOrig'] = 0
305: docinfo['countFigureEntries'] = 0
306: docinfo['countPlaces'] = 0
307: docinfo['countTocEntries'] = 0
308: docinfo['numPages'] = 0
1.226 abukhman 309: docinfo['pageNumberOrigNorm'] = 0
1.222 abukhman 310: #return docinfo
1.2 casties 311:
312: # plain text mode
313: if mode == "text":
314: # first div contains text
1.222 abukhman 315: pagedivs = dom.xpath("/div")
1.2 casties 316: if len(pagedivs) > 0:
317: pagenode = pagedivs[0]
318: links = pagenode.xpath("//a")
319: for l in links:
320: hrefNode = l.getAttributeNodeNS(None, u"href")
321: if hrefNode:
322: href= hrefNode.nodeValue
323: if href.startswith('#note-'):
1.227 abukhman 324: hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn))
1.242 abukhman 325: #if href.startswith():
1.2 casties 326: return serializeNode(pagenode)
327: if mode == "xml":
328: # first div contains text
1.222 abukhman 329: pagedivs = dom.xpath("/div")
1.2 casties 330: if len(pagedivs) > 0:
331: pagenode = pagedivs[0]
332: return serializeNode(pagenode)
1.7 abukhman 333: if mode == "gis":
334: # first div contains text
1.222 abukhman 335: pagedivs = dom.xpath("/div")
1.7 abukhman 336: if len(pagedivs) > 0:
337: pagenode = pagedivs[0]
1.28 abukhman 338: links =pagenode.xpath("//a")
339: for l in links:
340: hrefNode =l.getAttributeNodeNS(None, u"href")
341: if hrefNode:
342: href=hrefNode.nodeValue
1.244 abukhman 343: if href.startswith('http://mappit.mpiwg-berlin.mpg.de'):
344: hrefNode.nodeValue =href.replace('db/REST/db/chgis/mpdl','db/RESTdb/db/mpdl/%s'%name)
1.62 abukhman 345: l.setAttributeNS(None, 'target', '_blank')
1.61 abukhman 346: return serializeNode(pagenode)
1.7 abukhman 347:
1.2 casties 348: if mode == "pureXml":
349: # first div contains text
1.222 abukhman 350: pagedivs = dom.xpath("/div")
1.2 casties 351: if len(pagedivs) > 0:
352: pagenode = pagedivs[0]
353: return serializeNode(pagenode)
354: # text-with-links mode
355: if mode == "text_dict":
356: # first div contains text
1.227 abukhman 357: #mode = pageinfo ['viewMode']
1.222 abukhman 358: pagedivs = dom.xpath("/div")
1.2 casties 359: if len(pagedivs) > 0:
360: pagenode = pagedivs[0]
361: # check all a-tags
362: links = pagenode.xpath("//a")
1.236 abukhman 363:
1.2 casties 364: for l in links:
365: hrefNode = l.getAttributeNodeNS(None, u"href")
1.236 abukhman 366:
1.2 casties 367: if hrefNode:
368: # is link with href
369: href = hrefNode.nodeValue
1.241 abukhman 370: if href.startswith('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql'):
1.2 casties 371: # is pollux link
372: selfurl = self.absolute_url()
373: # change href
1.241 abukhman 374: hrefNode.nodeValue = href.replace('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/head_main_voc'%selfurl)
1.2 casties 375: # add target
376: l.setAttributeNS(None, 'target', '_blank')
1.238 abukhman 377: #l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;")
378: #l.setAttributeNS(None, "ondblclick", "popupWin.focus();")
379: #window.open("this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=yes, scrollbars=1'"); return false;")
1.235 abukhman 380:
1.241 abukhman 381: if href.startswith('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql'):
1.2 casties 382: selfurl = self.absolute_url()
1.241 abukhman 383: hrefNode.nodeValue = href.replace('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl)
1.2 casties 384: l.setAttributeNS(None, 'target', '_blank')
1.239 abukhman 385: l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=300,height=400,top=180, left=700, toolbar=no, scrollbars=1'); return false;")
1.236 abukhman 386: l.setAttributeNS(None, 'ondblclick', 'popupWin.focus();')
387:
1.2 casties 388: if href.startswith('#note-'):
1.236 abukhman 389: hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn))
390:
1.2 casties 391: return serializeNode(pagenode)
392: return "no text here"
1.225 abukhman 393:
1.230 abukhman 394: def getOrigPages(self, docinfo=None, pageinfo=None):
395: docpath = docinfo['textURLPath']
396: pn =pageinfo['current']
397: selfurl = self.absolute_url()
398: pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn))
399: dom = Parse(pagexml)
400: pagedivs = dom.xpath("//div[@class='pageNumberOrig']")
401: if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"):
402: if len(pagedivs)>0:
403: docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0])
404: return docinfo['pageNumberOrig']
405:
406: def getOrigPagesNorm(self, docinfo=None, pageinfo=None):
407: docpath = docinfo['textURLPath']
408: pn =pageinfo['current']
409: selfurl = self.absolute_url()
410: pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn))
411: dom = Parse(pagexml)
412: pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']")
413: if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"):
414: if len(pagedivs)>0:
415: docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0])
416: return docinfo['pageNumberOrigNorm']
417:
418:
1.240 abukhman 419: def getTranslate(self, word=None, language=None, display=None):
1.2 casties 420: """translate into another languages"""
1.239 abukhman 421: data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&display="+urllib.quote(display)+"&output=html")
1.2 casties 422: #pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lex.xql","document=&language="+str(language)+"&query="+url_quote(str(query)))
423: return data
424:
425: def getLemma(self, lemma=None, language=None):
426: """simular words lemma """
1.235 abukhman 427: data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html")
1.2 casties 428: return data
429:
1.235 abukhman 430: def getLemmaQuery(self, query=None, language=None):
1.2 casties 431: """simular words lemma """
1.235 abukhman 432: data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html")
433: return data
434:
435: def getLex(self, query=None, language=None):
436: #simular words lemma
437: data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query))
1.2 casties 438: return data
1.28 abukhman 439:
1.237 abukhman 440: def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1):
1.222 abukhman 441: #number of
1.2 casties 442: docpath = docinfo['textURLPath']
443: pagesize = pageinfo['queryPageSize']
444: pn = pageinfo['searchPN']
1.34 abukhman 445: query =pageinfo['query']
1.2 casties 446: queryType =pageinfo['queryType']
447: tocSearch = 0
448: tocDiv = None
449:
1.32 abukhman 450: pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn))
1.2 casties 451: pagedom = Parse(pagexml)
452: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
453: tocSearch = int(getTextFromNode(numdivs[0]))
454: tc=int((tocSearch/10)+1)
455: return tc
1.222 abukhman 456:
1.2 casties 457: def getToc(self, mode="text", docinfo=None):
458: """loads table of contents and stores in docinfo"""
459: if mode == "none":
460: return docinfo
461: if 'tocSize_%s'%mode in docinfo:
462: # cached toc
463: return docinfo
464:
465: docpath = docinfo['textURLPath']
466: # we need to set a result set size
467: pagesize = 1000
468: pn = 1
469: if mode == "text":
470: queryType = "toc"
471: else:
472: queryType = mode
473: # number of entries in toc
474: tocSize = 0
475: tocDiv = None
476:
477: pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn))
1.222 abukhman 478:
1.2 casties 479: # post-processing downloaded xml
480: pagedom = Parse(pagexml)
481: # get number of entries
482: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
483: if len(numdivs) > 0:
484: tocSize = int(getTextFromNode(numdivs[0]))
485: docinfo['tocSize_%s'%mode] = tocSize
486: return docinfo
487:
488: def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None):
489: """returns single page from the table of contents"""
490: # TODO: this should use the cached TOC
491: if mode == "text":
492: queryType = "toc"
493: else:
494: queryType = mode
495: docpath = docinfo['textURLPath']
496: path = docinfo['textURLPath']
497: pagesize = pageinfo['tocPageSize']
498: pn = pageinfo['tocPN']
499: url = docinfo['url']
500: selfurl = self.absolute_url()
501: viewMode= pageinfo['viewMode']
1.26 abukhman 502: characterNormalization = pageinfo ['characterNormalization']
1.237 abukhman 503: #optionToggle =pageinfo ['optionToggle']
1.2 casties 504: tocMode = pageinfo['tocMode']
505: tocPN = pageinfo['tocPN']
506:
1.237 abukhman 507: data = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s&characterNormalization=regPlusNorm"%(docpath,queryType, pagesize, pn))
508: page = data.replace('page-fragment.xql?document=%s'%str(path),'%s?url=%s&viewMode=%s&tocMode=%s&tocPN=%s'%(selfurl,url, viewMode, tocMode, tocPN))
1.2 casties 509: text = page.replace('mode=image','mode=texttool')
510: return text
511:
1.234 abukhman 512: def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
513: #def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):
1.2 casties 514: """change settings"""
515: self.title=title
516: self.timeout = timeout
517: self.serverUrl = serverUrl
518: if RESPONSE is not None:
519: RESPONSE.redirect('manage_main')
520:
521: # management methods
522: def manage_addMpdlXmlTextServerForm(self):
523: """Form for adding"""
524: pt = PageTemplateFile("zpt/manage_addMpdlXmlTextServer", globals()).__of__(self)
525: return pt()
526:
1.234 abukhman 527: def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
528: #def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):
1.2 casties 529: """add zogiimage"""
530: newObj = MpdlXmlTextServer(id,title,serverUrl,timeout)
531: self.Destination()._setObject(id, newObj)
532: if RESPONSE is not None:
1.245 ! casties 533: RESPONSE.redirect('manage_main')
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>