1:
2: from OFS.SimpleItem import SimpleItem
3: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
4: from Ft.Xml import EMPTY_NAMESPACE, Parse
5: from Ft.Xml.Domlette import NonvalidatingReader
6:
7: import md5
8: import sys
9: import logging
10: import urllib
11: import documentViewer
12: from documentViewer import getTextFromNode, serializeNode
13:
14: class MpdlXmlTextServer(SimpleItem):
15: """TextServer implementation for MPDL-XML eXist server"""
16: meta_type="MPDL-XML TextServer"
17:
18: manage_options=(
19: {'label':'Config','action':'manage_changeMpdlXmlTextServerForm'},
20: )+SimpleItem.manage_options
21:
22: manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals())
23:
24: def __init__(self,id,title="",serverUrl="http://mpdl-system.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40):
25: #def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/", serverName=None, timeout=40):
26:
27: """constructor"""
28: self.id=id
29: self.title=title
30: self.timeout = timeout
31: if serverName is None:
32: self.serverUrl = serverUrl
33: else:
34: self.serverUrl = "http://%s/mpdl/interface/"%serverName
35:
36: def getHttpData(self, url, data=None):
37: """returns result from url+data HTTP request"""
38: return documentViewer.getHttpData(url,data,timeout=self.timeout)
39:
40: def getServerData(self, method, data=None):
41: """returns result from text server for method+data"""
42: url = self.serverUrl+method
43: return documentViewer.getHttpData(url,data,timeout=self.timeout)
44:
45: def getSearch(self, pageinfo=None, docinfo=None):
46: """get search list"""
47: docpath = docinfo['textURLPath']
48: url = docinfo['url']
49: pagesize = pageinfo['queryPageSize']
50: pn = pageinfo.get('searchPN',1)
51: sn = pageinfo['sn']
52: highlightQuery = pageinfo['highlightQuery']
53: query =pageinfo['query']
54: queryType =pageinfo['queryType']
55: viewMode= pageinfo['viewMode']
56: tocMode = pageinfo['tocMode']
57: characterNormalization = pageinfo['characterNormalization']
58: #optionToggle = pageinfo['optionToggle']
59: tocPN = pageinfo['tocPN']
60: selfurl = self.absolute_url()
61: data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization, urllib.quote(highlightQuery)))
62: pagexml = data.replace('?document=%s'%str(docpath),'?url=%s'%url)
63: pagedom = Parse(pagexml)
64:
65: """
66: pagedivs = pagedom.xpath("//div[@class='queryResultHits']")
67: if (pagedivs == pagedom.xpath("//div[@class='queryResultHits']")):
68: if len(pagedivs)>0:
69: docinfo['queryResultHits'] = int(getTextFromNode(pagedivs[0]))
70: s = getTextFromNode(pagedivs[0])
71: s1 = int(s)/10+1
72: try:
73: docinfo['queryResultHits'] = int(s1)
74: logging.debug("SEARCH ENTRIES: %s"%(s1))
75: except:
76: docinfo['queryResultHits'] = 0
77: """
78: if (queryType=="fulltext")or(queryType=="xpath")or(queryType=="xquery")or(queryType=="fulltextMorphLemma"):
79: pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
80: if len(pagedivs)>0:
81: pagenode=pagedivs[0]
82: links=pagenode.xpath("//a")
83: for l in links:
84: hrefNode = l.getAttributeNodeNS(None, u"href")
85: if hrefNode:
86: href = hrefNode.nodeValue
87: if href.startswith('page-fragment.xql'):
88: selfurl = self.absolute_url()
89: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN, characterNormalization))
90: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
91: #logging.debug("PUREXML :%s"%(serializeNode(pagenode)))
92: return serializeNode(pagenode)
93: if (queryType=="fulltextMorph"):
94: pagedivs = pagedom.xpath("//div[@class='queryResult']")
95: if len(pagedivs)>0:
96: pagenode=pagedivs[0]
97: links=pagenode.xpath("//a")
98: for l in links:
99: hrefNode = l.getAttributeNodeNS(None, u"href")
100: if hrefNode:
101: href = hrefNode.nodeValue
102: if href.startswith('page-fragment.xql'):
103: selfurl = self.absolute_url()
104: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN,characterNormalization))
105: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
106: if href.startswith('../lt/lemma.xql'):
107: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_query'%(selfurl))
108: l.setAttributeNS(None, 'target', '_blank')
109: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
110: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
111: pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']")
112: return serializeNode(pagenode)
113: if (queryType=="ftIndex")or(queryType=="ftIndexMorph"):
114: pagedivs= pagedom.xpath("//div[@class='queryResultPage']")
115: if len(pagedivs)>0:
116: pagenode=pagedivs[0]
117: links=pagenode.xpath("//a")
118: for l in links:
119: hrefNode = l.getAttributeNodeNS(None, u"href")
120: if hrefNode:
121: href = hrefNode.nodeValue
122: hrefNode.nodeValue=href.replace('mode=text','mode=texttool&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s&characterNormalization=%s'%(viewMode,tocMode,tocPN,pn,characterNormalization))
123: if href.startswith('../lt/lex.xql'):
124: hrefNode.nodeValue = href.replace('../lt/lex.xql','%s/template/head_main_lex'%selfurl)
125: l.setAttributeNS(None, 'target', '_blank')
126: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
127: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
128: if href.startswith('../lt/lemma.xql'):
129: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%(selfurl))
130: l.setAttributeNS(None, 'target', '_blank')
131: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
132: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
133: return serializeNode(pagenode)
134: return "no text here"
135:
136: def getGisPlaces(self, docinfo=None, pageinfo=None):
137: """ Show all Gis Places of whole Page"""
138: xpath='//place'
139: docpath = docinfo.get('textURLPath',None)
140: if not docpath:
141: return None
142:
143: url = docinfo['url']
144: selfurl = self.absolute_url()
145: pn = pageinfo['current']
146: hrefList=[]
147: myList= ""
148: text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn))
149: dom = Parse(text)
150: result = dom.xpath("//result/resultPage/place")
151: for l in result:
152: hrefNode= l.getAttributeNodeNS(None, u"id")
153: href= hrefNode.nodeValue
154: hrefList.append(href)
155: myList = ",".join(hrefList)
156: #logging.debug("getGisPlaces :%s"%(myList))
157: return myList
158:
159: def getAllGisPlaces (self, docinfo=None, pageinfo=None):
160: """Show all Gis Places of whole Book """
161: xpath ='//echo:place'
162: docpath =docinfo['textURLPath']
163: url = docinfo['url']
164: selfurl =self.absolute_url()
165: pn =pageinfo['current']
166: hrefList=[]
167: myList=""
168: text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath))
169: dom =Parse(text)
170: result = dom.xpath("//result/resultPage/place")
171:
172: for l in result:
173: hrefNode = l.getAttributeNodeNS(None, u"id")
174: href= hrefNode.nodeValue
175: hrefList.append(href)
176: myList = ",".join(hrefList)
177: #logging.debug("getALLGisPlaces :%s"%(myList))
178: return myList
179:
180:
181: def getTextPage(self, mode="text_dict", pn=1, docinfo=None, pageinfo=None):
182: """returns single page from fulltext"""
183: docpath = docinfo['textURLPath']
184: path = docinfo['textURLPath']
185: url = docinfo.get('url',None)
186: name = docinfo.get('name',None)
187: pn =pageinfo['current']
188: sn = pageinfo['sn']
189: #optionToggle =pageinfo ['optionToggle']
190: highlightQuery = pageinfo['highlightQuery']
191: #mode = pageinfo ['viewMode']
192: tocMode = pageinfo['tocMode']
193: characterNormalization=pageinfo['characterNormalization']
194: tocPN = pageinfo['tocPN']
195: selfurl = self.absolute_url()
196: if mode == "text_dict":
197: textmode = "textPollux"
198: else:
199: textmode = mode
200:
201: textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s"%(docpath,textmode,pn,characterNormalization)
202: if highlightQuery is not None:
203: textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn)
204:
205: pagexml = self.getServerData("page-fragment.xql",textParam)
206: dom = Parse(pagexml)
207: #dom = NonvalidatingReader.parseStream(pagexml)
208:
209: #original Pages
210: pagedivs = dom.xpath("//div[@class='pageNumberOrig']")
211:
212: """if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"):
213: if len(pagedivs)>0:
214: docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0])
215: logging.debug("ORIGINAL PAGE: %s"%(docinfo['pageNumberOrig']))
216:
217: #original Pages Norm
218: pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']")
219: if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"):
220: if len(pagedivs)>0:
221: docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0])
222: logging.debug("ORIGINAL PAGE NORM: %s"%(docinfo['pageNumberOrigNorm']))
223: """
224: #figureEntries
225: pagedivs = dom.xpath("//div[@class='countFigureEntries']")
226: if pagedivs == dom.xpath("//div[@class='countFigureEntries']"):
227: if len(pagedivs)>0:
228: docinfo['countFigureEntries'] = getTextFromNode(pagedivs[0])
229: s = getTextFromNode(pagedivs[0])
230: if s=='0':
231: try:
232: docinfo['countFigureEntries'] = int(s)
233: except:
234: docinfo['countFigureEntries'] = 0
235: else:
236: s1 = int(s)/30+1
237: try:
238: docinfo['countFigureEntries'] = int(s1)
239: except:
240: docinfo['countFigureEntries'] = 0
241:
242: #allPlaces
243: pagedivs = dom.xpath("//div[@class='countPlaces']")
244: if pagedivs == dom.xpath("//div[@class='countPlaces']"):
245: if len(pagedivs)>0:
246: docinfo['countPlaces']= getTextFromNode(pagedivs[0])
247: s = getTextFromNode(pagedivs[0])
248: try:
249: docinfo['countPlaces'] = int(s)
250: except:
251: docinfo['countPlaces'] = 0
252:
253: #tocEntries
254: pagedivs = dom.xpath("//div[@class='countTocEntries']")
255: if pagedivs == dom.xpath("//div[@class='countTocEntries']"):
256: if len(pagedivs)>0:
257: docinfo['countTocEntries'] = int(getTextFromNode(pagedivs[0]))
258: s = getTextFromNode(pagedivs[0])
259: if s=='0':
260: try:
261: docinfo['countTocEntries'] = int(s)
262: except:
263: docinfo['countTocEntries'] = 0
264: else:
265: s1 = int(s)/30+1
266: try:
267: docinfo['countTocEntries'] = int(s1)
268: except:
269: docinfo['countTocEntries'] = 0
270:
271: #numTextPages
272: pagedivs = dom.xpath("//div[@class='countPages']")
273: if pagedivs == dom.xpath("//div[@class='countPages']"):
274: if len(pagedivs)>0:
275: docinfo['numPages'] = getTextFromNode(pagedivs[0])
276: s = getTextFromNode(pagedivs[0])
277:
278: try:
279: docinfo['numPages'] = int(s)
280: #logging.debug("PAGE NUMBER: %s"%(s))
281:
282: np = docinfo['numPages']
283: pageinfo['end'] = min(pageinfo['end'], np)
284: pageinfo['numgroups'] = int(np / pageinfo['groupsize'])
285: if np % pageinfo['groupsize'] > 0:
286: pageinfo['numgroups'] += 1
287: except:
288: docinfo['numPages'] = 0
289:
290: else:
291: #no full text -- init to 0
292: docinfo['pageNumberOrig'] = 0
293: docinfo['countFigureEntries'] = 0
294: docinfo['countPlaces'] = 0
295: docinfo['countTocEntries'] = 0
296: docinfo['numPages'] = 0
297: docinfo['pageNumberOrigNorm'] = 0
298: #return docinfo
299:
300: # plain text mode
301: if mode == "text":
302: # first div contains text
303: pagedivs = dom.xpath("/div")
304: if len(pagedivs) > 0:
305: pagenode = pagedivs[0]
306: links = pagenode.xpath("//a")
307: for l in links:
308: hrefNode = l.getAttributeNodeNS(None, u"href")
309: if hrefNode:
310: href= hrefNode.nodeValue
311: if href.startswith('#note-'):
312: hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn))
313: return serializeNode(pagenode)
314: if mode == "xml":
315: # first div contains text
316: pagedivs = dom.xpath("/div")
317: if len(pagedivs) > 0:
318: pagenode = pagedivs[0]
319: return serializeNode(pagenode)
320: if mode == "gis":
321: # first div contains text
322: pagedivs = dom.xpath("/div")
323: if len(pagedivs) > 0:
324: pagenode = pagedivs[0]
325: links =pagenode.xpath("//a")
326: for l in links:
327: hrefNode =l.getAttributeNodeNS(None, u"href")
328: if hrefNode:
329: href=hrefNode.nodeValue
330: if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'):
331: hrefNode.nodeValue =href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name)
332: l.setAttributeNS(None, 'target', '_blank')
333: return serializeNode(pagenode)
334:
335: if mode == "pureXml":
336: # first div contains text
337: pagedivs = dom.xpath("/div")
338: if len(pagedivs) > 0:
339: pagenode = pagedivs[0]
340: return serializeNode(pagenode)
341: # text-with-links mode
342: if mode == "text_dict":
343: # first div contains text
344: #mode = pageinfo ['viewMode']
345: pagedivs = dom.xpath("/div")
346: if len(pagedivs) > 0:
347: pagenode = pagedivs[0]
348: # check all a-tags
349: links = pagenode.xpath("//a")
350:
351: for l in links:
352: hrefNode = l.getAttributeNodeNS(None, u"href")
353:
354: if hrefNode:
355: # is link with href
356: href = hrefNode.nodeValue
357: if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql'):
358: # is pollux link
359: selfurl = self.absolute_url()
360: # change href
361: hrefNode.nodeValue = href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/head_main_voc'%selfurl)
362: # add target
363: l.setAttributeNS(None, 'target', '_blank')
364: #l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;")
365: #l.setAttributeNS(None, "ondblclick", "popupWin.focus();")
366: #window.open("this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=yes, scrollbars=1'"); return false;")
367:
368: if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql'):
369: selfurl = self.absolute_url()
370: hrefNode.nodeValue = href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl)
371: l.setAttributeNS(None, 'target', '_blank')
372: l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;")
373: l.setAttributeNS(None, 'ondblclick', 'popupWin.focus();')
374:
375: if href.startswith('#note-'):
376: hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn))
377:
378: return serializeNode(pagenode)
379: return "no text here"
380:
381: def getOrigPages(self, docinfo=None, pageinfo=None):
382: docpath = docinfo['textURLPath']
383: pn =pageinfo['current']
384: selfurl = self.absolute_url()
385: pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn))
386: dom = Parse(pagexml)
387: pagedivs = dom.xpath("//div[@class='pageNumberOrig']")
388: if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"):
389: if len(pagedivs)>0:
390: docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0])
391: return docinfo['pageNumberOrig']
392:
393: def getOrigPagesNorm(self, docinfo=None, pageinfo=None):
394: docpath = docinfo['textURLPath']
395: pn =pageinfo['current']
396: selfurl = self.absolute_url()
397: pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn))
398: dom = Parse(pagexml)
399: pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']")
400: if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"):
401: if len(pagedivs)>0:
402: docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0])
403: return docinfo['pageNumberOrigNorm']
404:
405:
406: def getTranslate(self, word=None, language=None):
407: """translate into another languages"""
408: data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&output=html")
409: #pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lex.xql","document=&language="+str(language)+"&query="+url_quote(str(query)))
410: return data
411:
412: def getLemma(self, lemma=None, language=None):
413: """simular words lemma """
414: data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html")
415: return data
416:
417: def getLemmaQuery(self, query=None, language=None):
418: """simular words lemma """
419: data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html")
420: return data
421:
422: def getLex(self, query=None, language=None):
423: #simular words lemma
424: data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query))
425: return data
426:
427: def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1):
428: #number of
429: docpath = docinfo['textURLPath']
430: pagesize = pageinfo['queryPageSize']
431: pn = pageinfo['searchPN']
432: query =pageinfo['query']
433: queryType =pageinfo['queryType']
434: tocSearch = 0
435: tocDiv = None
436:
437: pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn))
438: pagedom = Parse(pagexml)
439: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
440: tocSearch = int(getTextFromNode(numdivs[0]))
441: tc=int((tocSearch/10)+1)
442: return tc
443:
444: def getToc(self, mode="text", docinfo=None):
445: """loads table of contents and stores in docinfo"""
446: if mode == "none":
447: return docinfo
448: if 'tocSize_%s'%mode in docinfo:
449: # cached toc
450: return docinfo
451:
452: docpath = docinfo['textURLPath']
453: # we need to set a result set size
454: pagesize = 1000
455: pn = 1
456: if mode == "text":
457: queryType = "toc"
458: else:
459: queryType = mode
460: # number of entries in toc
461: tocSize = 0
462: tocDiv = None
463:
464: pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn))
465:
466: # post-processing downloaded xml
467: pagedom = Parse(pagexml)
468: # get number of entries
469: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
470: if len(numdivs) > 0:
471: tocSize = int(getTextFromNode(numdivs[0]))
472: docinfo['tocSize_%s'%mode] = tocSize
473: return docinfo
474:
475: def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None):
476: """returns single page from the table of contents"""
477: # TODO: this should use the cached TOC
478: if mode == "text":
479: queryType = "toc"
480: else:
481: queryType = mode
482: docpath = docinfo['textURLPath']
483: path = docinfo['textURLPath']
484: pagesize = pageinfo['tocPageSize']
485: pn = pageinfo['tocPN']
486: url = docinfo['url']
487: selfurl = self.absolute_url()
488: viewMode= pageinfo['viewMode']
489: characterNormalization = pageinfo ['characterNormalization']
490: #optionToggle =pageinfo ['optionToggle']
491: tocMode = pageinfo['tocMode']
492: tocPN = pageinfo['tocPN']
493:
494: data = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s&characterNormalization=regPlusNorm"%(docpath,queryType, pagesize, pn))
495: page = data.replace('page-fragment.xql?document=%s'%str(path),'%s?url=%s&viewMode=%s&tocMode=%s&tocPN=%s'%(selfurl,url, viewMode, tocMode, tocPN))
496: text = page.replace('mode=image','mode=texttool')
497: return text
498:
499: def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
500: #def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):
501: """change settings"""
502: self.title=title
503: self.timeout = timeout
504: self.serverUrl = serverUrl
505: if RESPONSE is not None:
506: RESPONSE.redirect('manage_main')
507:
508: # management methods
509: def manage_addMpdlXmlTextServerForm(self):
510: """Form for adding"""
511: pt = PageTemplateFile("zpt/manage_addMpdlXmlTextServer", globals()).__of__(self)
512: return pt()
513:
514: def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
515: #def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):
516: """add zogiimage"""
517: newObj = MpdlXmlTextServer(id,title,serverUrl,timeout)
518: self.Destination()._setObject(id, newObj)
519: if RESPONSE is not None:
520: RESPONSE.redirect('manage_main')
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>