1: from OFS.SimpleItem import SimpleItem
2: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
3:
4: from Ft.Xml import EMPTY_NAMESPACE, Parse
5: from Ft.Xml.Domlette import NonvalidatingReader
6: import Ft.Xml.Domlette
7: import cStringIO
8:
9: import xml.etree.ElementTree as ET
10:
11: import re
12: import logging
13: import urllib
14:
15: from SrvTxtUtils import getInt, getText, getHttpData
16:
17: def serialize(node):
18: """returns a string containing an XML snippet of node"""
19: s = ET.tostring(node, 'UTF-8')
20: # snip off XML declaration
21: if s.startswith('<?xml'):
22: i = s.find('?>')
23: return s[i+3:]
24:
25: return s
26:
27:
28: def getTextFromNode(node):
29: """get the cdata content of a node"""
30: if node is None:
31: return ""
32: # ET:
33: # text = node.text or ""
34: # for e in node:
35: # text += gettext(e)
36: # if e.tail:
37: # text += e.tail
38:
39: # 4Suite:
40: nodelist=node.childNodes
41: text = ""
42: for n in nodelist:
43: if n.nodeType == node.TEXT_NODE:
44: text = text + n.data
45:
46: return text
47:
48: def serializeNode(node, encoding="utf-8"):
49: """returns a string containing node as XML"""
50: #s = ET.tostring(node)
51:
52: # 4Suite:
53: stream = cStringIO.StringIO()
54: Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding)
55: s = stream.getvalue()
56: stream.close()
57:
58: return s
59:
60:
61: class MpdlXmlTextServer(SimpleItem):
62: """TextServer implementation for MPDL-XML eXist server"""
63: meta_type="MPDL-XML TextServer"
64:
65: manage_options=(
66: {'label':'Config','action':'manage_changeMpdlXmlTextServerForm'},
67: )+SimpleItem.manage_options
68:
69: manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals())
70:
71: def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40):
72: """constructor"""
73: self.id=id
74: self.title=title
75: self.timeout = timeout
76: if serverName is None:
77: self.serverUrl = serverUrl
78: else:
79: self.serverUrl = "http://%s/mpdl/interface/"%serverName
80:
81: def getHttpData(self, url, data=None):
82: """returns result from url+data HTTP request"""
83: return getHttpData(url,data,timeout=self.timeout)
84:
85: def getServerData(self, method, data=None):
86: """returns result from text server for method+data"""
87: url = self.serverUrl+method
88: return getHttpData(url,data,timeout=self.timeout)
89:
90: # WTF: what does this really do? can it be integrated in getPage?
91: def getSearch(self, pageinfo=None, docinfo=None):
92: """get search list"""
93: logging.debug("getSearch()")
94: docpath = docinfo['textURLPath']
95: url = docinfo['url']
96: pagesize = pageinfo['queryPageSize']
97: pn = pageinfo.get('searchPN',1)
98: sn = pageinfo['sn']
99: highlightQuery = pageinfo['highlightQuery']
100: query =pageinfo['query']
101: queryType =pageinfo['queryType']
102: viewMode= pageinfo['viewMode']
103: tocMode = pageinfo['tocMode']
104: characterNormalization = pageinfo['characterNormalization']
105: #optionToggle = pageinfo['optionToggle']
106: tocPN = pageinfo['tocPN']
107: selfurl = self.absolute_url()
108: data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization, urllib.quote(highlightQuery)))
109: pagexml = data.replace('?document=%s'%str(docpath),'?url=%s'%url)
110: pagedom = Parse(pagexml)
111:
112: """
113: pagedivs = pagedom.xpath("//div[@class='queryResultHits']")
114: if (pagedivs == pagedom.xpath("//div[@class='queryResultHits']")):
115: if len(pagedivs)>0:
116: docinfo['queryResultHits'] = int(getTextFromNode(pagedivs[0]))
117: s = getTextFromNode(pagedivs[0])
118: s1 = int(s)/10+1
119: try:
120: docinfo['queryResultHits'] = int(s1)
121: logging.debug("SEARCH ENTRIES: %s"%(s1))
122: except:
123: docinfo['queryResultHits'] = 0
124: """
125: if (queryType=="fulltext")or(queryType=="xpath")or(queryType=="xquery")or(queryType=="fulltextMorphLemma"):
126: pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
127: if len(pagedivs)>0:
128: pagenode=pagedivs[0]
129: links=pagenode.xpath("//a")
130: for l in links:
131: hrefNode = l.getAttributeNodeNS(None, u"href")
132: if hrefNode:
133: href = hrefNode.nodeValue
134: if href.startswith('page-fragment.xql'):
135: selfurl = self.absolute_url()
136: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN, characterNormalization))
137: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
138: #logging.debug("PUREXML :%s"%(serializeNode(pagenode)))
139: return serializeNode(pagenode)
140: if (queryType=="fulltextMorph"):
141: pagedivs = pagedom.xpath("//div[@class='queryResult']")
142: if len(pagedivs)>0:
143: pagenode=pagedivs[0]
144: links=pagenode.xpath("//a")
145: for l in links:
146: hrefNode = l.getAttributeNodeNS(None, u"href")
147: if hrefNode:
148: href = hrefNode.nodeValue
149: if href.startswith('page-fragment.xql'):
150: selfurl = self.absolute_url()
151: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN,characterNormalization))
152: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
153: if href.startswith('../lt/lemma.xql'):
154: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_query'%(selfurl))
155: l.setAttributeNS(None, 'target', '_blank')
156: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
157: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
158: pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']")
159: return serializeNode(pagenode)
160: if (queryType=="ftIndex")or(queryType=="ftIndexMorph"):
161: pagedivs= pagedom.xpath("//div[@class='queryResultPage']")
162: if len(pagedivs)>0:
163: pagenode=pagedivs[0]
164: links=pagenode.xpath("//a")
165: for l in links:
166: hrefNode = l.getAttributeNodeNS(None, u"href")
167: if hrefNode:
168: href = hrefNode.nodeValue
169: hrefNode.nodeValue=href.replace('mode=text','mode=texttool&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s&characterNormalization=%s'%(viewMode,tocMode,tocPN,pn,characterNormalization))
170: if href.startswith('../lt/lex.xql'):
171: hrefNode.nodeValue = href.replace('../lt/lex.xql','%s/template/head_main_lex'%selfurl)
172: l.setAttributeNS(None, 'target', '_blank')
173: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
174: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
175: if href.startswith('../lt/lemma.xql'):
176: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%(selfurl))
177: l.setAttributeNS(None, 'target', '_blank')
178: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
179: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
180: return serializeNode(pagenode)
181: return "no text here"
182:
183: def getGisPlaces(self, docinfo=None, pageinfo=None):
184: """ Show all Gis Places of whole Page"""
185: xpath='//place'
186: docpath = docinfo.get('textURLPath',None)
187: if not docpath:
188: return None
189:
190: url = docinfo['url']
191: selfurl = self.absolute_url()
192: pn = pageinfo['current']
193: hrefList=[]
194: myList= ""
195: text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn))
196: dom = ET.fromstring(text)
197: result = dom.findall(".//result/resultPage/place")
198: for l in result:
199: href = l.get("id")
200: hrefList.append(href)
201: # WTF: what does this do?
202: myList = ",".join(hrefList)
203: #logging.debug("getGisPlaces :%s"%(myList))
204: return myList
205:
206: def getAllGisPlaces (self, docinfo=None, pageinfo=None):
207: """Show all Gis Places of whole Book """
208: xpath ='//echo:place'
209: docpath =docinfo['textURLPath']
210: url = docinfo['url']
211: selfurl =self.absolute_url()
212: pn =pageinfo['current']
213: hrefList=[]
214: myList=""
215: text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath))
216: dom = ET.fromstring(text)
217: result = dom.findall(".//result/resultPage/place")
218:
219: for l in result:
220: href = l.get("id")
221: hrefList.append(href)
222: # WTF: what does this do?
223: myList = ",".join(hrefList)
224: #logging.debug("getALLGisPlaces :%s"%(myList))
225: return myList
226:
227: def processPageInfo(self, dom, docinfo, pageinfo):
228: """processes page info divs from dom and stores in docinfo and pageinfo"""
229: # assume first second level div is pageMeta
230: alldivs = dom.find("div")
231:
232: if alldivs is None or alldivs.get('class', '') != 'pageMeta':
233: logging.error("processPageInfo: pageMeta div not found!")
234: return
235:
236: for div in alldivs:
237: dc = div.get('class')
238:
239: # pageNumberOrig
240: if dc == 'pageNumberOrig':
241: pageinfo['pageNumberOrig'] = div.text
242:
243: # pageNumberOrigNorm
244: elif dc == 'pageNumberOrigNorm':
245: pageinfo['pageNumberOrigNorm'] = div.text
246:
247: # pageHeaderTitle
248: elif dc == 'pageHeaderTitle':
249: pageinfo['pageHeaderTitle'] = div.text
250:
251: # numFigureEntries
252: elif dc == 'countFigureEntries':
253: docinfo['numFigureEntries'] = getInt(div.text)
254:
255: # numTocEntries
256: elif dc == 'countTocEntries':
257: # WTF: s1 = int(s)/30+1
258: docinfo['numTocEntries'] = getInt(div.text)
259:
260: # numPlaces
261: elif dc == 'countPlaces':
262: docinfo['numPlaces'] = getInt(div.text)
263:
264: # numTextPages
265: elif dc == 'countPages':
266: np = getInt(div.text)
267: if np > 0:
268: docinfo['numTextPages'] = np
269: if docinfo.get('numPages', 0) == 0:
270: # seems to be text-only - update page count
271: docinfo['numPages'] = np
272: pageinfo['end'] = min(pageinfo['end'], np)
273: pageinfo['numgroups'] = int(np / pageinfo['groupsize'])
274: if np % pageinfo['groupsize'] > 0:
275: pageinfo['numgroups'] += 1
276:
277: #logging.debug("processPageInfo: pageinfo=%s"%repr(pageinfo))
278: return
279:
280:
281: def getTextPage(self, mode="text", pn=1, docinfo=None, pageinfo=None):
282: """returns single page from fulltext"""
283: logging.debug("getTextPage mode=%s, pn=%s"%(mode,pn))
284: # check for cached text -- but this shouldn't be called twice
285: if pageinfo.has_key('textPage'):
286: logging.debug("getTextPage: using cached text")
287: return pageinfo['textPage']
288:
289: docpath = docinfo['textURLPath']
290: # just checking
291: if pageinfo['current'] != pn:
292: logging.warning("getTextPage: current!=pn!")
293:
294: # stuff for constructing full urls
295: url = docinfo['url']
296: urlmode = docinfo['mode']
297: sn = pageinfo.get('sn', None)
298: highlightQuery = pageinfo.get('highlightQuery', None)
299: tocMode = pageinfo.get('tocMode', None)
300: tocPN = pageinfo.get('tocPN',None)
301: characterNormalization = pageinfo.get('characterNormalization', None)
302: selfurl = docinfo['viewerUrl']
303:
304: if mode == "dict" or mode == "text_dict":
305: # dict is called textPollux in the backend
306: textmode = "textPollux"
307: elif not mode:
308: # default is text
309: mode = "text"
310: textmode = "text"
311: else:
312: textmode = mode
313:
314: textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s"%(docpath,textmode,pn,characterNormalization)
315: if highlightQuery:
316: textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn)
317:
318: # fetch the page
319: pagexml = self.getServerData("page-fragment.xql",textParam)
320: dom = ET.fromstring(pagexml)
321: # extract additional info
322: self.processPageInfo(dom, docinfo, pageinfo)
323: # page content is in <div class="pageContent">
324: pagediv = None
325: # ElementTree 1.2 in Python 2.6 can't do div[@class='pageContent']
326: # so we look at the second level divs
327: alldivs = dom.findall("div")
328: for div in alldivs:
329: dc = div.get('class')
330: # page content div
331: if dc == 'pageContent':
332: pagediv = div
333: break
334:
335: # plain text mode
336: if mode == "text":
337: if pagediv is not None:
338: links = pagediv.findall(".//a")
339: for l in links:
340: href = l.get('href')
341: if href and href.startswith('#note-'):
342: href = href.replace('#note-',"?mode=%s&url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn))
343: l.set('href', href)
344:
345: return serialize(pagediv)
346:
347: # text-with-links mode
348: elif mode == "dict":
349: if pagediv is not None:
350: # check all a-tags
351: links = pagediv.findall(".//a")
352: for l in links:
353: href = l.get('href')
354:
355: if href:
356: # is link with href
357: if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql'):
358: # is pollux link
359: selfurl = self.absolute_url()
360: # change href
361: l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/head_main_voc'%selfurl))
362: # add target
363: l.set('target', '_blank')
364:
365: if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql'):
366: selfurl = self.absolute_url()
367: l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl))
368: l.set('target', '_blank')
369: l.set('onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;")
370: l.set('ondblclick', 'popupWin.focus();')
371:
372: if href.startswith('#note-'):
373: l.set('href', href.replace('#note-',"?mode=%s&url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn)))
374:
375: return serialize(pagediv)
376:
377: # xml mode
378: elif mode == "xml":
379: if pagediv is not None:
380: return serialize(pagediv)
381:
382: # pureXml mode
383: elif mode == "pureXml":
384: if pagediv is not None:
385: return serialize(pagediv)
386:
387: # gis mode
388: elif mode == "gis":
389: name = docinfo['name']
390: if pagediv is not None:
391: # check all a-tags
392: links = pagediv.findall(".//a")
393: for l in links:
394: href = l.get('href')
395: if href:
396: if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'):
397: l.set('href', href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name))
398: l.set('target', '_blank')
399:
400: return serialize(pagediv)
401:
402: return "no text here"
403:
404: # WTF: is this needed?
405: def getOrigPages(self, docinfo=None, pageinfo=None):
406: logging.debug("CALLED: getOrigPages!")
407: if not pageinfo.has_key('pageNumberOrig'):
408: logging.warning("getOrigPages: not in pageinfo!")
409: return None
410:
411: return pageinfo['pageNumberOrig']
412:
413: # WTF: is this needed?
414: def getOrigPagesNorm(self, docinfo=None, pageinfo=None):
415: logging.debug("CALLED: getOrigPagesNorm!")
416: if not pageinfo.has_key('pageNumberOrigNorm'):
417: logging.warning("getOrigPagesNorm: not in pageinfo!")
418: return None
419:
420: return pageinfo['pageNumberOrigNorm']
421:
422: # TODO: should be getWordInfo
423: def getTranslate(self, word=None, language=None):
424: """translate into another languages"""
425: data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&output=html")
426: return data
427:
428: # WTF: what does this do?
429: def getLemma(self, lemma=None, language=None):
430: """simular words lemma """
431: data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html")
432: return data
433:
434: # WTF: what does this do?
435: def getLemmaQuery(self, query=None, language=None):
436: """simular words lemma """
437: data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html")
438: return data
439:
440: # WTF: what does this do?
441: def getLex(self, query=None, language=None):
442: #simular words lemma
443: data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query))
444: return data
445:
446: # WTF: what does this do?
447: def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1):
448: #number of
449: docpath = docinfo['textURLPath']
450: pagesize = pageinfo['queryPageSize']
451: pn = pageinfo['searchPN']
452: query =pageinfo['query']
453: queryType =pageinfo['queryType']
454: tocSearch = 0
455: tocDiv = None
456:
457: pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn))
458: pagedom = Parse(pagexml)
459: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
460: tocSearch = int(getTextFromNode(numdivs[0]))
461: tc=int((tocSearch/10)+1)
462: return tc
463:
464: def getToc(self, mode="text", docinfo=None):
465: """loads table of contents and stores XML in docinfo"""
466: logging.debug("getToc mode=%s"%mode)
467: if mode == "none":
468: return docinfo
469:
470: if 'tocSize_%s'%mode in docinfo:
471: # cached toc
472: return docinfo
473:
474: docpath = docinfo['textURLPath']
475: # we need to set a result set size
476: pagesize = 1000
477: pn = 1
478: if mode == "text":
479: queryType = "toc"
480: else:
481: queryType = mode
482: # number of entries in toc
483: tocSize = 0
484: tocDiv = None
485: # fetch full toc
486: pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn))
487: dom = ET.fromstring(pagexml)
488: # page content is in <div class="queryResultPage">
489: pagediv = None
490: # ElementTree 1.2 in Python 2.6 can't do div[@class='queryResultPage']
491: alldivs = dom.findall("div")
492: for div in alldivs:
493: dc = div.get('class')
494: # page content div
495: if dc == 'queryResultPage':
496: pagediv = div
497:
498: elif dc == 'queryResultHits':
499: docinfo['tocSize_%s'%mode] = getInt(div.text)
500:
501: if pagediv:
502: # store XML in docinfo
503: docinfo['tocXML_%s'%mode] = ET.tostring(pagediv, 'UTF-8')
504:
505: return docinfo
506:
507: def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None):
508: """returns single page from the table of contents"""
509: logging.debug("getTocPage mode=%s, pn=%s"%(mode,pn))
510: if mode == "text":
511: queryType = "toc"
512: else:
513: queryType = mode
514:
515: # check for cached TOC
516: if not docinfo.has_key('tocXML_%s'%mode):
517: self.getToc(mode=mode, docinfo=docinfo)
518:
519: tocxml = docinfo.get('tocXML_%s'%mode, None)
520: if not tocxml:
521: logging.error("getTocPage: unable to find tocXML")
522: return "No ToC"
523:
524: pagesize = int(pageinfo['tocPageSize'])
525: url = docinfo['url']
526: urlmode = docinfo['mode']
527: selfurl = docinfo['viewerUrl']
528: viewMode= pageinfo['viewMode']
529: tocMode = pageinfo['tocMode']
530: tocPN = int(pageinfo['tocPN'])
531: pn = tocPN
532:
533: fulltoc = ET.fromstring(tocxml)
534:
535: if fulltoc:
536: # paginate
537: start = (pn - 1) * pagesize * 2
538: len = pagesize * 2
539: del fulltoc[:start]
540: del fulltoc[len:]
541: tocdivs = fulltoc
542:
543: # check all a-tags
544: links = tocdivs.findall(".//a")
545: for l in links:
546: href = l.get('href')
547: if href:
548: # take pn from href
549: m = re.match(r'page-fragment\.xql.*pn=(\d+)', href)
550: if m is not None:
551: # and create new url
552: l.set('href', '%s?mode=%s&url=%s&viewMode=%s&pn=%s&tocMode=%s&tocPN=%s'%(selfurl, urlmode, url, viewMode, m.group(1), tocMode, tocPN))
553: else:
554: logging.warning("getTocPage: Problem with link=%s"%href)
555:
556: return serialize(tocdivs)
557:
558:
559: def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
560: """change settings"""
561: self.title=title
562: self.timeout = timeout
563: self.serverUrl = serverUrl
564: if RESPONSE is not None:
565: RESPONSE.redirect('manage_main')
566:
567: # management methods
568: def manage_addMpdlXmlTextServerForm(self):
569: """Form for adding"""
570: pt = PageTemplateFile("zpt/manage_addMpdlXmlTextServer", globals()).__of__(self)
571: return pt()
572:
573: def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
574: #def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):
575: """add zogiimage"""
576: newObj = MpdlXmlTextServer(id,title,serverUrl,timeout)
577: self.Destination()._setObject(id, newObj)
578: if RESPONSE is not None:
579: RESPONSE.redirect('manage_main')
580:
581:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>