1:
2: from OFS.SimpleItem import SimpleItem
3: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
4: from Ft.Xml import EMPTY_NAMESPACE, Parse
5: from Ft.Xml.Domlette import NonvalidatingReader
6:
7: import md5
8: import sys
9: import logging
10: import urllib
11: import documentViewer
12: from documentViewer import getTextFromNode, serializeNode
13:
14: class MpdlXmlTextServer(SimpleItem):
15: """TextServer implementation for MPDL-XML eXist server"""
16: meta_type="MPDL-XML TextServer"
17:
18: manage_options=(
19: {'label':'Config','action':'manage_changeMpdlXmlTextServerForm'},
20: )+SimpleItem.manage_options
21:
22: manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals())
23:
24: def __init__(self,id,title="",serverUrl="http://mpdl-test.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40):
25: #def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/", serverName=None, timeout=40):
26:
27: """constructor"""
28: self.id=id
29: self.title=title
30: self.timeout = timeout
31: if serverName is None:
32: self.serverUrl = serverUrl
33: else:
34: self.serverUrl = "http://%s/mpdl/interface/"%serverName
35:
36: def getHttpData(self, url, data=None):
37: """returns result from url+data HTTP request"""
38: return documentViewer.getHttpData(url,data,timeout=self.timeout)
39:
40: def getServerData(self, method, data=None):
41: """returns result from text server for method+data"""
42: url = self.serverUrl+method
43: return documentViewer.getHttpData(url,data,timeout=self.timeout)
44:
45: def getSearch(self, pageinfo=None, docinfo=None):
46: """get search list"""
47: docpath = docinfo['textURLPath']
48: url = docinfo['url']
49: pagesize = pageinfo['queryPageSize']
50: pn = pageinfo.get('searchPN',1)
51: #sn = pageinfo['sn']
52: s = pageinfo['s']
53: highlightElementPos =pageinfo ['highlightElementPos']
54: highlightElement = pageinfo ['highlightElement']
55:
56: highlightQuery = pageinfo['highlightQuery']
57: query =pageinfo['query']
58: queryType =pageinfo['queryType']
59: viewMode= pageinfo['viewMode']
60: tocMode = pageinfo['tocMode']
61: characterNormalization = pageinfo['characterNormalization']
62: #optionToggle = pageinfo['optionToggle']
63: tocPN = pageinfo['tocPN']
64: selfurl = self.absolute_url()
65: data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&s=%s&viewMode=%s&characterNormalization=%s&highlightElementPos=%s&highlightElement=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, s, viewMode,characterNormalization, highlightElementPos, highlightElement, urllib.quote(highlightQuery)))
66: #data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization, urllib.quote(highlightQuery)))
67: pagexml = data.replace('?document=%s'%str(docpath),'?url=%s'%url)
68: pagedom = Parse(pagexml)
69:
70: """
71: pagedivs = pagedom.xpath("//div[@class='queryResultHits']")
72: if (pagedivs == pagedom.xpath("//div[@class='queryResultHits']")):
73: if len(pagedivs)>0:
74: docinfo['queryResultHits'] = int(getTextFromNode(pagedivs[0]))
75: s = getTextFromNode(pagedivs[0])
76: s1 = int(s)/10+1
77: try:
78: docinfo['queryResultHits'] = int(s1)
79: logging.debug("SEARCH ENTRIES: %s"%(s1))
80: except:
81: docinfo['queryResultHits'] = 0
82: """
83: if (queryType=="fulltext")or(queryType=="xpath")or(queryType=="xquery")or(queryType=="fulltextMorphLemma"):
84: pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
85: if len(pagedivs)>0:
86: pagenode=pagedivs[0]
87: links=pagenode.xpath("//a")
88: for l in links:
89: hrefNode = l.getAttributeNodeNS(None, u"href")
90: if hrefNode:
91: href = hrefNode.nodeValue
92: if href.startswith('page-fragment.xql'):
93: selfurl = self.absolute_url()
94: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN, characterNormalization))
95: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
96: #logging.debug("PUREXML :%s"%(serializeNode(pagenode)))
97: return serializeNode(pagenode)
98: if (queryType=="fulltextMorph"):
99: pagedivs = pagedom.xpath("//div[@class='queryResult']")
100: if len(pagedivs)>0:
101: pagenode=pagedivs[0]
102: links=pagenode.xpath("//a")
103: for l in links:
104: hrefNode = l.getAttributeNodeNS(None, u"href")
105: if hrefNode:
106: href = hrefNode.nodeValue
107: if href.startswith('page-fragment.xql'):
108: selfurl = self.absolute_url()
109: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN,characterNormalization))
110: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
111: if href.startswith('../lt/lemma.xql'):
112: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_query'%(selfurl))
113: l.setAttributeNS(None, 'target', '_blank')
114: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=300,height=400,top=180, left=400, scrollbars=1'); return false;")
115: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
116: pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']")
117: return serializeNode(pagenode)
118: if (queryType=="ftIndex")or(queryType=="ftIndexMorph"):
119: pagedivs= pagedom.xpath("//div[@class='queryResultPage']")
120: if len(pagedivs)>0:
121: pagenode=pagedivs[0]
122: links=pagenode.xpath("//a")
123: for l in links:
124: hrefNode = l.getAttributeNodeNS(None, u"href")
125: if hrefNode:
126: href = hrefNode.nodeValue
127: hrefNode.nodeValue=href.replace('mode=text','mode=texttool&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s&characterNormalization=%s'%(viewMode,tocMode,tocPN,pn,characterNormalization))
128: if href.startswith('../lt/lex.xql'):
129: hrefNode.nodeValue = href.replace('../lt/lex.xql','%s/template/head_main_lex'%selfurl)
130: l.setAttributeNS(None, 'target', '_blank')
131: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
132: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
133: if href.startswith('../lt/lemma.xql'):
134: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%(selfurl))
135: l.setAttributeNS(None, 'target', '_blank')
136: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=300,height=400,top=180, left=400, scrollbars=1'); return false;")
137: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
138: return serializeNode(pagenode)
139: return "no text here"
140:
141: def getGisPlaces(self, docinfo=None, pageinfo=None):
142: """ Show all Gis Places of whole Page"""
143: xpath='//place'
144: docpath = docinfo.get('textURLPath',None)
145: if not docpath:
146: return None
147:
148: url = docinfo['url']
149: selfurl = self.absolute_url()
150: pn = pageinfo['current']
151: hrefList=[]
152: myList= ""
153: text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn))
154: dom = Parse(text)
155: result = dom.xpath("//result/resultPage/place")
156: for l in result:
157: hrefNode= l.getAttributeNodeNS(None, u"id")
158: href= hrefNode.nodeValue
159: hrefList.append(href)
160: myList = ",".join(hrefList)
161: #logging.debug("getGisPlaces :%s"%(myList))
162: return myList
163:
164: def getAllGisPlaces (self, docinfo=None, pageinfo=None):
165: """Show all Gis Places of whole Book """
166: xpath ='//echo:place'
167: docpath =docinfo['textURLPath']
168: url = docinfo['url']
169: selfurl =self.absolute_url()
170: pn =pageinfo['current']
171: hrefList=[]
172: myList=""
173: text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath))
174: dom =Parse(text)
175: result = dom.xpath("//result/resultPage/place")
176:
177: for l in result:
178: hrefNode = l.getAttributeNodeNS(None, u"id")
179: href= hrefNode.nodeValue
180: hrefList.append(href)
181: myList = ",".join(hrefList)
182: #logging.debug("getALLGisPlaces :%s"%(myList))
183: return myList
184:
185:
186: def getTextPage(self, mode="text_dict", pn=1, docinfo=None, pageinfo=None):
187: """returns single page from fulltext"""
188: docpath = docinfo['textURLPath']
189: path = docinfo['textURLPath']
190: url = docinfo.get('url',None)
191: name = docinfo.get('name',None)
192: pn =pageinfo['current']
193: #sn = pageinfo['sn']
194: s = pageinfo['s']
195: highlightElementPos =pageinfo ['highlightElementPos']
196: highlightElement = pageinfo ['highlightElement']
197: #optionToggle =pageinfo ['optionToggle']
198: highlightQuery = pageinfo['highlightQuery']
199: #mode = pageinfo ['viewMode']
200: tocMode = pageinfo['tocMode']
201: xpointer = pageinfo['xpointer']
202: characterNormalization=pageinfo['characterNormalization']
203: tocPN = pageinfo['tocPN']
204: selfurl = self.absolute_url()
205:
206: if mode == "text_dict":
207: textmode = "textPollux"
208: else:
209: textmode = mode
210:
211: textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s&xpointer=%s&options=withIdentifier"%(docpath,textmode,pn,characterNormalization, xpointer)
212: if highlightQuery is not None:
213: #textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn)
214: textParam +="&highlightQuery=%s&s=%s&highlightElement=%s&highlightElementPos=%s"%(urllib.quote(highlightQuery),s, highlightElement, highlightElementPos)
215:
216: pagexml = self.getServerData("page-fragment.xql",textParam)
217: dom = Parse(pagexml)
218: #dom = NonvalidatingReader.parseStream(pagexml)
219:
220: #original Pages
221: pagedivs = dom.xpath("//div[@class='pageNumberOrig']")
222:
223: """if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"):
224: if len(pagedivs)>0:
225: docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0])
226: logging.debug("ORIGINAL PAGE: %s"%(docinfo['pageNumberOrig']))
227:
228: #original Pages Norm
229: pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']")
230: if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"):
231: if len(pagedivs)>0:
232: docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0])
233: logging.debug("ORIGINAL PAGE NORM: %s"%(docinfo['pageNumberOrigNorm']))
234: """
235: #figureEntries
236: pagedivs = dom.xpath("//div[@class='countFigureEntries']")
237: if pagedivs == dom.xpath("//div[@class='countFigureEntries']"):
238: if len(pagedivs)>0:
239: docinfo['countFigureEntries'] = getTextFromNode(pagedivs[0])
240: s = getTextFromNode(pagedivs[0])
241: if s=='0':
242: try:
243: docinfo['countFigureEntries'] = int(s)
244: except:
245: docinfo['countFigureEntries'] = 0
246: else:
247: s1 = int(s)/30+1
248: try:
249: docinfo['countFigureEntries'] = int(s1)
250: except:
251: docinfo['countFigureEntries'] = 0
252:
253: #allPlaces
254: pagedivs = dom.xpath("//div[@class='countPlaces']")
255: if pagedivs == dom.xpath("//div[@class='countPlaces']"):
256: if len(pagedivs)>0:
257: docinfo['countPlaces']= getTextFromNode(pagedivs[0])
258: s = getTextFromNode(pagedivs[0])
259: try:
260: docinfo['countPlaces'] = int(s)
261: except:
262: docinfo['countPlaces'] = 0
263:
264: #tocEntries
265: pagedivs = dom.xpath("//div[@class='countTocEntries']")
266: if pagedivs == dom.xpath("//div[@class='countTocEntries']"):
267: if len(pagedivs)>0:
268: docinfo['countTocEntries'] = int(getTextFromNode(pagedivs[0]))
269: s = getTextFromNode(pagedivs[0])
270: if s=='0':
271: try:
272: docinfo['countTocEntries'] = int(s)
273: except:
274: docinfo['countTocEntries'] = 0
275: else:
276: s1 = int(s)/30+1
277: try:
278: docinfo['countTocEntries'] = int(s1)
279: except:
280: docinfo['countTocEntries'] = 0
281:
282: #numTextPages
283: pagedivs = dom.xpath("//div[@class='countPages']")
284: if pagedivs == dom.xpath("//div[@class='countPages']"):
285: if len(pagedivs)>0:
286: docinfo['numPages'] = getTextFromNode(pagedivs[0])
287: s = getTextFromNode(pagedivs[0])
288:
289: try:
290: docinfo['numPages'] = int(s)
291: #logging.debug("PAGE NUMBER: %s"%(s))
292:
293: np = docinfo['numPages']
294: pageinfo['end'] = min(pageinfo['end'], np)
295: pageinfo['numgroups'] = int(np / pageinfo['groupsize'])
296: if np % pageinfo['groupsize'] > 0:
297: pageinfo['numgroups'] += 1
298: except:
299: docinfo['numPages'] = 0
300:
301: else:
302: #no full text -- init to 0
303: docinfo['pageNumberOrig'] = 0
304: docinfo['countFigureEntries'] = 0
305: docinfo['countPlaces'] = 0
306: docinfo['countTocEntries'] = 0
307: docinfo['numPages'] = 0
308: docinfo['pageNumberOrigNorm'] = 0
309: #return docinfo
310:
311: # plain text mode
312: if mode == "text":
313: # first div contains text
314: pagedivs = dom.xpath("/div")
315: if len(pagedivs) > 0:
316: pagenode = pagedivs[0]
317: links = pagenode.xpath("//a")
318: for l in links:
319: hrefNode = l.getAttributeNodeNS(None, u"href")
320: if hrefNode:
321: href= hrefNode.nodeValue
322: if href.startswith('#note-'):
323: hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn))
324: #if href.startswith():
325: return serializeNode(pagenode)
326: if mode == "xml":
327: # first div contains text
328: pagedivs = dom.xpath("/div")
329: if len(pagedivs) > 0:
330: pagenode = pagedivs[0]
331: return serializeNode(pagenode)
332: if mode == "gis":
333: # first div contains text
334: pagedivs = dom.xpath("/div")
335: if len(pagedivs) > 0:
336: pagenode = pagedivs[0]
337: links =pagenode.xpath("//a")
338: for l in links:
339: hrefNode =l.getAttributeNodeNS(None, u"href")
340: if hrefNode:
341: href=hrefNode.nodeValue
342: if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'):
343: hrefNode.nodeValue =href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name)
344: l.setAttributeNS(None, 'target', '_blank')
345: return serializeNode(pagenode)
346:
347: if mode == "pureXml":
348: # first div contains text
349: pagedivs = dom.xpath("/div")
350: if len(pagedivs) > 0:
351: pagenode = pagedivs[0]
352: return serializeNode(pagenode)
353: # text-with-links mode
354: if mode == "text_dict":
355: # first div contains text
356: #mode = pageinfo ['viewMode']
357: pagedivs = dom.xpath("/div")
358: if len(pagedivs) > 0:
359: pagenode = pagedivs[0]
360: # check all a-tags
361: links = pagenode.xpath("//a")
362:
363: for l in links:
364: hrefNode = l.getAttributeNodeNS(None, u"href")
365:
366: if hrefNode:
367: # is link with href
368: href = hrefNode.nodeValue
369: if href.startswith('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql'):
370: # is pollux link
371: selfurl = self.absolute_url()
372: # change href
373: hrefNode.nodeValue = href.replace('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/head_main_voc'%selfurl)
374: # add target
375: l.setAttributeNS(None, 'target', '_blank')
376: #l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;")
377: #l.setAttributeNS(None, "ondblclick", "popupWin.focus();")
378: #window.open("this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=yes, scrollbars=1'"); return false;")
379:
380: if href.startswith('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql'):
381: selfurl = self.absolute_url()
382: hrefNode.nodeValue = href.replace('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl)
383: l.setAttributeNS(None, 'target', '_blank')
384: l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=300,height=400,top=180, left=700, toolbar=no, scrollbars=1'); return false;")
385: l.setAttributeNS(None, 'ondblclick', 'popupWin.focus();')
386:
387: if href.startswith('#note-'):
388: hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn))
389:
390: return serializeNode(pagenode)
391: return "no text here"
392:
393: def getOrigPages(self, docinfo=None, pageinfo=None):
394: docpath = docinfo['textURLPath']
395: pn =pageinfo['current']
396: selfurl = self.absolute_url()
397: pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn))
398: dom = Parse(pagexml)
399: pagedivs = dom.xpath("//div[@class='pageNumberOrig']")
400: if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"):
401: if len(pagedivs)>0:
402: docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0])
403: return docinfo['pageNumberOrig']
404:
405: def getOrigPagesNorm(self, docinfo=None, pageinfo=None):
406: docpath = docinfo['textURLPath']
407: pn =pageinfo['current']
408: selfurl = self.absolute_url()
409: pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn))
410: dom = Parse(pagexml)
411: pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']")
412: if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"):
413: if len(pagedivs)>0:
414: docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0])
415: return docinfo['pageNumberOrigNorm']
416:
417:
418: def getTranslate(self, word=None, language=None, display=None):
419: """translate into another languages"""
420: data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&display="+urllib.quote(display)+"&output=html")
421: #pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lex.xql","document=&language="+str(language)+"&query="+url_quote(str(query)))
422: return data
423:
424: def getLemma(self, lemma=None, language=None):
425: """simular words lemma """
426: data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html")
427: return data
428:
429: def getLemmaQuery(self, query=None, language=None):
430: """simular words lemma """
431: data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html")
432: return data
433:
434: def getLex(self, query=None, language=None):
435: #simular words lemma
436: data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query))
437: return data
438:
439: def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1):
440: #number of
441: docpath = docinfo['textURLPath']
442: pagesize = pageinfo['queryPageSize']
443: pn = pageinfo['searchPN']
444: query =pageinfo['query']
445: queryType =pageinfo['queryType']
446: tocSearch = 0
447: tocDiv = None
448:
449: pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn))
450: pagedom = Parse(pagexml)
451: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
452: tocSearch = int(getTextFromNode(numdivs[0]))
453: tc=int((tocSearch/10)+1)
454: return tc
455:
456: def getToc(self, mode="text", docinfo=None):
457: """loads table of contents and stores in docinfo"""
458: if mode == "none":
459: return docinfo
460: if 'tocSize_%s'%mode in docinfo:
461: # cached toc
462: return docinfo
463:
464: docpath = docinfo['textURLPath']
465: # we need to set a result set size
466: pagesize = 1000
467: pn = 1
468: if mode == "text":
469: queryType = "toc"
470: else:
471: queryType = mode
472: # number of entries in toc
473: tocSize = 0
474: tocDiv = None
475:
476: pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn))
477:
478: # post-processing downloaded xml
479: pagedom = Parse(pagexml)
480: # get number of entries
481: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
482: if len(numdivs) > 0:
483: tocSize = int(getTextFromNode(numdivs[0]))
484: docinfo['tocSize_%s'%mode] = tocSize
485: return docinfo
486:
487: def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None):
488: """returns single page from the table of contents"""
489: # TODO: this should use the cached TOC
490: if mode == "text":
491: queryType = "toc"
492: else:
493: queryType = mode
494: docpath = docinfo['textURLPath']
495: path = docinfo['textURLPath']
496: pagesize = pageinfo['tocPageSize']
497: pn = pageinfo['tocPN']
498: url = docinfo['url']
499: selfurl = self.absolute_url()
500: viewMode= pageinfo['viewMode']
501: characterNormalization = pageinfo ['characterNormalization']
502: #optionToggle =pageinfo ['optionToggle']
503: tocMode = pageinfo['tocMode']
504: tocPN = pageinfo['tocPN']
505:
506: data = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s&characterNormalization=regPlusNorm"%(docpath,queryType, pagesize, pn))
507: page = data.replace('page-fragment.xql?document=%s'%str(path),'%s?url=%s&viewMode=%s&tocMode=%s&tocPN=%s'%(selfurl,url, viewMode, tocMode, tocPN))
508: text = page.replace('mode=image','mode=texttool')
509: return text
510:
511: def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
512: #def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):
513: """change settings"""
514: self.title=title
515: self.timeout = timeout
516: self.serverUrl = serverUrl
517: if RESPONSE is not None:
518: RESPONSE.redirect('manage_main')
519:
520: # management methods
521: def manage_addMpdlXmlTextServerForm(self):
522: """Form for adding"""
523: pt = PageTemplateFile("zpt/manage_addMpdlXmlTextServer", globals()).__of__(self)
524: return pt()
525:
526: def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
527: #def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):
528: """add zogiimage"""
529: newObj = MpdlXmlTextServer(id,title,serverUrl,timeout)
530: self.Destination()._setObject(id, newObj)
531: if RESPONSE is not None:
532: RESPONSE.redirect('manage_main')
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>