1:
2: from OFS.SimpleItem import SimpleItem
3: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
4: from Ft.Xml import EMPTY_NAMESPACE, Parse
5:
6: import md5
7: import sys
8: import logging
9: import urllib
10: import documentViewer
11: from documentViewer import getTextFromNode, serializeNode
12:
13: class MpdlXmlTextServer(SimpleItem):
14: """TextServer implementation for MPDL-XML eXist server"""
15: meta_type="MPDL-XML TextServer"
16:
17: manage_options=(
18: {'label':'Config','action':'manage_changeMpdlXmlTextServerForm'},
19: )+SimpleItem.manage_options
20:
21: manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals())
22:
23: def __init__(self,id,title="",serverUrl="http://mpdl-system.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40):
24: #def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/", serverName=None, timeout=40):
25:
26: """constructor"""
27: self.id=id
28: self.title=title
29: self.timeout = timeout
30: if serverName is None:
31: self.serverUrl = serverUrl
32: else:
33: self.serverUrl = "http://%s/mpdl/interface/"%serverName
34:
35: def getHttpData(self, url, data=None):
36: """returns result from url+data HTTP request"""
37: return documentViewer.getHttpData(url,data,timeout=self.timeout)
38:
39: def getServerData(self, method, data=None):
40: """returns result from text server for method+data"""
41: url = self.serverUrl+method
42: return documentViewer.getHttpData(url,data,timeout=self.timeout)
43:
44: def getSearch(self, pageinfo=None, docinfo=None):
45: """get search list"""
46: docpath = docinfo['textURLPath']
47: url = docinfo['url']
48: pagesize = pageinfo['queryPageSize']
49: pn = pageinfo.get('searchPN',1)
50: sn = pageinfo['sn']
51: highlightQuery = pageinfo['highlightQuery']
52: query =pageinfo['query']
53: queryType =pageinfo['queryType']
54: viewMode= pageinfo['viewMode']
55: tocMode = pageinfo['tocMode']
56: characterNormalization = pageinfo['characterNormalization']
57: #optionToggle = pageinfo['optionToggle']
58: tocPN = pageinfo['tocPN']
59: selfurl = self.absolute_url()
60: data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization, urllib.quote(highlightQuery)))
61: pagexml = data.replace('?document=%s'%str(docpath),'?url=%s'%url)
62: pagedom = Parse(pagexml)
63:
64: """
65: pagedivs = pagedom.xpath("//div[@class='queryResultHits']")
66: if (pagedivs == pagedom.xpath("//div[@class='queryResultHits']")):
67: if len(pagedivs)>0:
68: docinfo['queryResultHits'] = int(getTextFromNode(pagedivs[0]))
69: s = getTextFromNode(pagedivs[0])
70: s1 = int(s)/10+1
71: try:
72: docinfo['queryResultHits'] = int(s1)
73: logging.debug("SEARCH ENTRIES: %s"%(s1))
74: except:
75: docinfo['queryResultHits'] = 0
76: """
77: if (queryType=="fulltext")or(queryType=="xpath")or(queryType=="xquery")or(queryType=="fulltextMorphLemma"):
78: pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
79: if len(pagedivs)>0:
80: pagenode=pagedivs[0]
81: links=pagenode.xpath("//a")
82: for l in links:
83: hrefNode = l.getAttributeNodeNS(None, u"href")
84: if hrefNode:
85: href = hrefNode.nodeValue
86: if href.startswith('page-fragment.xql'):
87: selfurl = self.absolute_url()
88: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN, characterNormalization))
89: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
90: #logging.debug("PUREXML :%s"%(serializeNode(pagenode)))
91: return serializeNode(pagenode)
92: if (queryType=="fulltextMorph"):
93: pagedivs = pagedom.xpath("//div[@class='queryResult']")
94: if len(pagedivs)>0:
95: pagenode=pagedivs[0]
96: links=pagenode.xpath("//a")
97: for l in links:
98: hrefNode = l.getAttributeNodeNS(None, u"href")
99: if hrefNode:
100: href = hrefNode.nodeValue
101: if href.startswith('page-fragment.xql'):
102: selfurl = self.absolute_url()
103: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN,characterNormalization))
104: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
105: if href.startswith('../lt/lemma.xql'):
106: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_query'%(selfurl))
107: l.setAttributeNS(None, 'target', '_blank')
108: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
109: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
110: pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']")
111: return serializeNode(pagenode)
112: if (queryType=="ftIndex")or(queryType=="ftIndexMorph"):
113: pagedivs= pagedom.xpath("//div[@class='queryResultPage']")
114: if len(pagedivs)>0:
115: pagenode=pagedivs[0]
116: links=pagenode.xpath("//a")
117: for l in links:
118: hrefNode = l.getAttributeNodeNS(None, u"href")
119: if hrefNode:
120: href = hrefNode.nodeValue
121: hrefNode.nodeValue=href.replace('mode=text','mode=texttool&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s&characterNormalization=%s'%(viewMode,tocMode,tocPN,pn,characterNormalization))
122: if href.startswith('../lt/lex.xql'):
123: hrefNode.nodeValue = href.replace('../lt/lex.xql','%s/template/head_main_lex'%selfurl)
124: l.setAttributeNS(None, 'target', '_blank')
125: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
126: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
127: if href.startswith('../lt/lemma.xql'):
128: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%(selfurl))
129: l.setAttributeNS(None, 'target', '_blank')
130: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
131: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
132: return serializeNode(pagenode)
133: return "no text here"
134:
135: def getGisPlaces(self, docinfo=None, pageinfo=None):
136: """ Show all Gis Places of whole Page"""
137: xpath='//place'
138: docpath = docinfo.get('textURLPath',None)
139: if not docpath:
140: return None
141:
142: url = docinfo['url']
143: selfurl = self.absolute_url()
144: pn = pageinfo['current']
145: hrefList=[]
146: myList= ""
147: text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn))
148: dom = Parse(text)
149: result = dom.xpath("//result/resultPage/place")
150: for l in result:
151: hrefNode= l.getAttributeNodeNS(None, u"id")
152: href= hrefNode.nodeValue
153: hrefList.append(href)
154: myList = ",".join(hrefList)
155: #logging.debug("getGisPlaces :%s"%(myList))
156: return myList
157:
158: def getAllGisPlaces (self, docinfo=None, pageinfo=None):
159: """Show all Gis Places of whole Book """
160: xpath ='//echo:place'
161: docpath =docinfo['textURLPath']
162: url = docinfo['url']
163: selfurl =self.absolute_url()
164: pn =pageinfo['current']
165: hrefList=[]
166: myList=""
167: text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath))
168: dom =Parse(text)
169: result = dom.xpath("//result/resultPage/place")
170:
171: for l in result:
172: hrefNode = l.getAttributeNodeNS(None, u"id")
173: href= hrefNode.nodeValue
174: hrefList.append(href)
175: myList = ",".join(hrefList)
176: #logging.debug("getALLGisPlaces :%s"%(myList))
177: return myList
178:
179:
180: def getTextPage(self, mode="text_dict", pn=1, docinfo=None, pageinfo=None):
181: """returns single page from fulltext"""
182: docpath = docinfo['textURLPath']
183: path = docinfo['textURLPath']
184: url = docinfo.get('url',None)
185: name = docinfo.get('name',None)
186: pn =pageinfo['current']
187: sn = pageinfo['sn']
188: #optionToggle =pageinfo ['optionToggle']
189: highlightQuery = pageinfo['highlightQuery']
190: #mode = pageinfo ['viewMode']
191: tocMode = pageinfo['tocMode']
192: characterNormalization=pageinfo['characterNormalization']
193: tocPN = pageinfo['tocPN']
194: selfurl = self.absolute_url()
195: if mode == "text_dict":
196: textmode = "textPollux"
197: else:
198: textmode = mode
199:
200: textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s"%(docpath,textmode,pn,characterNormalization)
201: if highlightQuery is not None:
202: textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn)
203:
204: pagexml = self.getServerData("page-fragment.xql",textParam)
205: dom = Parse(pagexml)
206:
207:
208: #original Pages
209: pagedivs = dom.xpath("//div[@class='pageNumberOrig']")
210:
211: """if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"):
212: if len(pagedivs)>0:
213: docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0])
214: logging.debug("ORIGINAL PAGE: %s"%(docinfo['pageNumberOrig']))
215:
216: #original Pages Norm
217: pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']")
218: if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"):
219: if len(pagedivs)>0:
220: docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0])
221: logging.debug("ORIGINAL PAGE NORM: %s"%(docinfo['pageNumberOrigNorm']))
222: """
223: #figureEntries
224: pagedivs = dom.xpath("//div[@class='countFigureEntries']")
225: if pagedivs == dom.xpath("//div[@class='countFigureEntries']"):
226: if len(pagedivs)>0:
227: docinfo['countFigureEntries'] = getTextFromNode(pagedivs[0])
228: s = getTextFromNode(pagedivs[0])
229: if s=='0':
230: try:
231: docinfo['countFigureEntries'] = int(s)
232: except:
233: docinfo['countFigureEntries'] = 0
234: else:
235: s1 = int(s)/30+1
236: try:
237: docinfo['countFigureEntries'] = int(s1)
238: except:
239: docinfo['countFigureEntries'] = 0
240:
241: #allPlaces
242: pagedivs = dom.xpath("//div[@class='countPlaces']")
243: if pagedivs == dom.xpath("//div[@class='countPlaces']"):
244: if len(pagedivs)>0:
245: docinfo['countPlaces']= getTextFromNode(pagedivs[0])
246: s = getTextFromNode(pagedivs[0])
247: try:
248: docinfo['countPlaces'] = int(s)
249: except:
250: docinfo['countPlaces'] = 0
251:
252: #tocEntries
253: pagedivs = dom.xpath("//div[@class='countTocEntries']")
254: if pagedivs == dom.xpath("//div[@class='countTocEntries']"):
255: if len(pagedivs)>0:
256: docinfo['countTocEntries'] = int(getTextFromNode(pagedivs[0]))
257: s = getTextFromNode(pagedivs[0])
258: if s=='0':
259: try:
260: docinfo['countTocEntries'] = int(s)
261: except:
262: docinfo['countTocEntries'] = 0
263: else:
264: s1 = int(s)/30+1
265: try:
266: docinfo['countTocEntries'] = int(s1)
267: except:
268: docinfo['countTocEntries'] = 0
269:
270: #numTextPages
271: pagedivs = dom.xpath("//div[@class='countPages']")
272: if pagedivs == dom.xpath("//div[@class='countPages']"):
273: if len(pagedivs)>0:
274: docinfo['numPages'] = getTextFromNode(pagedivs[0])
275: s = getTextFromNode(pagedivs[0])
276:
277: try:
278: docinfo['numPages'] = int(s)
279: #logging.debug("PAGE NUMBER: %s"%(s))
280:
281: np = docinfo['numPages']
282: pageinfo['end'] = min(pageinfo['end'], np)
283: pageinfo['numgroups'] = int(np / pageinfo['groupsize'])
284: if np % pageinfo['groupsize'] > 0:
285: pageinfo['numgroups'] += 1
286: except:
287: docinfo['numPages'] = 0
288:
289: else:
290: #no full text -- init to 0
291: docinfo['pageNumberOrig'] = 0
292: docinfo['countFigureEntries'] = 0
293: docinfo['countPlaces'] = 0
294: docinfo['countTocEntries'] = 0
295: docinfo['numPages'] = 0
296: docinfo['pageNumberOrigNorm'] = 0
297: #return docinfo
298:
299: # plain text mode
300: if mode == "text":
301: # first div contains text
302: pagedivs = dom.xpath("/div")
303: if len(pagedivs) > 0:
304: pagenode = pagedivs[0]
305: links = pagenode.xpath("//a")
306: for l in links:
307: hrefNode = l.getAttributeNodeNS(None, u"href")
308: if hrefNode:
309: href= hrefNode.nodeValue
310: if href.startswith('#note-'):
311: hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn))
312: return serializeNode(pagenode)
313: if mode == "xml":
314: # first div contains text
315: pagedivs = dom.xpath("/div")
316: if len(pagedivs) > 0:
317: pagenode = pagedivs[0]
318: return serializeNode(pagenode)
319: if mode == "gis":
320: # first div contains text
321: pagedivs = dom.xpath("/div")
322: if len(pagedivs) > 0:
323: pagenode = pagedivs[0]
324: links =pagenode.xpath("//a")
325: for l in links:
326: hrefNode =l.getAttributeNodeNS(None, u"href")
327: if hrefNode:
328: href=hrefNode.nodeValue
329: if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'):
330: hrefNode.nodeValue =href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name)
331: l.setAttributeNS(None, 'target', '_blank')
332: return serializeNode(pagenode)
333:
334: if mode == "pureXml":
335: # first div contains text
336: pagedivs = dom.xpath("/div")
337: if len(pagedivs) > 0:
338: pagenode = pagedivs[0]
339: return serializeNode(pagenode)
340: # text-with-links mode
341: if mode == "text_dict":
342: # first div contains text
343: #mode = pageinfo ['viewMode']
344: pagedivs = dom.xpath("/div")
345: if len(pagedivs) > 0:
346: pagenode = pagedivs[0]
347: # check all a-tags
348: links = pagenode.xpath("//a")
349:
350: for l in links:
351: hrefNode = l.getAttributeNodeNS(None, u"href")
352:
353: if hrefNode:
354: # is link with href
355: href = hrefNode.nodeValue
356: if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql'):
357: # is pollux link
358: selfurl = self.absolute_url()
359: # change href
360: hrefNode.nodeValue = href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/head_main_voc'%selfurl)
361: # add target
362: l.setAttributeNS(None, 'target', '_blank')
363: l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;")
364: l.setAttributeNS(None, "ondblclick", "popupWin.focus();")
365: #window.open("this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=yes, scrollbars=1'"); return false;")
366:
367: if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql'):
368: selfurl = self.absolute_url()
369: hrefNode.nodeValue = href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl)
370: l.setAttributeNS(None, 'target', '_blank')
371: l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;")
372: l.setAttributeNS(None, 'ondblclick', 'popupWin.focus();')
373:
374: if href.startswith('#note-'):
375: hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn))
376:
377: return serializeNode(pagenode)
378: return "no text here"
379:
380: def getOrigPages(self, docinfo=None, pageinfo=None):
381: docpath = docinfo['textURLPath']
382: pn =pageinfo['current']
383: selfurl = self.absolute_url()
384: pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn))
385: dom = Parse(pagexml)
386: pagedivs = dom.xpath("//div[@class='pageNumberOrig']")
387: if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"):
388: if len(pagedivs)>0:
389: docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0])
390: return docinfo['pageNumberOrig']
391:
392: def getOrigPagesNorm(self, docinfo=None, pageinfo=None):
393: docpath = docinfo['textURLPath']
394: pn =pageinfo['current']
395: selfurl = self.absolute_url()
396: pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn))
397: dom = Parse(pagexml)
398: pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']")
399: if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"):
400: if len(pagedivs)>0:
401: docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0])
402: return docinfo['pageNumberOrigNorm']
403:
404:
405: def getTranslate(self, word=None, language=None):
406: """translate into another languages"""
407: data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&output=html")
408: #pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lex.xql","document=&language="+str(language)+"&query="+url_quote(str(query)))
409: return data
410:
411: def getLemma(self, lemma=None, language=None):
412: """simular words lemma """
413: data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html")
414: return data
415:
416: def getLemmaQuery(self, query=None, language=None):
417: """simular words lemma """
418: data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html")
419: return data
420:
421: def getLex(self, query=None, language=None):
422: #simular words lemma
423: data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query))
424: return data
425:
426: def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1):
427: #number of
428: docpath = docinfo['textURLPath']
429: pagesize = pageinfo['queryPageSize']
430: pn = pageinfo['searchPN']
431: query =pageinfo['query']
432: queryType =pageinfo['queryType']
433: tocSearch = 0
434: tocDiv = None
435:
436: pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn))
437: pagedom = Parse(pagexml)
438: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
439: tocSearch = int(getTextFromNode(numdivs[0]))
440: tc=int((tocSearch/10)+1)
441: return tc
442:
443: def getToc(self, mode="text", docinfo=None):
444: """loads table of contents and stores in docinfo"""
445: if mode == "none":
446: return docinfo
447: if 'tocSize_%s'%mode in docinfo:
448: # cached toc
449: return docinfo
450:
451: docpath = docinfo['textURLPath']
452: # we need to set a result set size
453: pagesize = 1000
454: pn = 1
455: if mode == "text":
456: queryType = "toc"
457: else:
458: queryType = mode
459: # number of entries in toc
460: tocSize = 0
461: tocDiv = None
462:
463: pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn))
464:
465: # post-processing downloaded xml
466: pagedom = Parse(pagexml)
467: # get number of entries
468: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
469: if len(numdivs) > 0:
470: tocSize = int(getTextFromNode(numdivs[0]))
471: docinfo['tocSize_%s'%mode] = tocSize
472: return docinfo
473:
474: def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None):
475: """returns single page from the table of contents"""
476: # TODO: this should use the cached TOC
477: if mode == "text":
478: queryType = "toc"
479: else:
480: queryType = mode
481: docpath = docinfo['textURLPath']
482: path = docinfo['textURLPath']
483: pagesize = pageinfo['tocPageSize']
484: pn = pageinfo['tocPN']
485: url = docinfo['url']
486: selfurl = self.absolute_url()
487: viewMode= pageinfo['viewMode']
488: characterNormalization = pageinfo ['characterNormalization']
489: #optionToggle =pageinfo ['optionToggle']
490: tocMode = pageinfo['tocMode']
491: tocPN = pageinfo['tocPN']
492:
493: data = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s&characterNormalization=regPlusNorm"%(docpath,queryType, pagesize, pn))
494: page = data.replace('page-fragment.xql?document=%s'%str(path),'%s?url=%s&viewMode=%s&tocMode=%s&tocPN=%s'%(selfurl,url, viewMode, tocMode, tocPN))
495: text = page.replace('mode=image','mode=texttool')
496: return text
497:
498: def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
499: #def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):
500: """change settings"""
501: self.title=title
502: self.timeout = timeout
503: self.serverUrl = serverUrl
504: if RESPONSE is not None:
505: RESPONSE.redirect('manage_main')
506:
507: # management methods
508: def manage_addMpdlXmlTextServerForm(self):
509: """Form for adding"""
510: pt = PageTemplateFile("zpt/manage_addMpdlXmlTextServer", globals()).__of__(self)
511: return pt()
512:
513: def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
514: #def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):
515: """add zogiimage"""
516: newObj = MpdlXmlTextServer(id,title,serverUrl,timeout)
517: self.Destination()._setObject(id, newObj)
518: if RESPONSE is not None:
519: RESPONSE.redirect('manage_main')
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>