--- documentViewer/MpdlXmlTextServer.py 2011/06/14 09:57:11 1.238 +++ documentViewer/MpdlXmlTextServer.py 2011/07/15 19:34:41 1.238.2.2 @@ -1,15 +1,83 @@ from OFS.SimpleItem import SimpleItem from Products.PageTemplates.PageTemplateFile import PageTemplateFile + from Ft.Xml import EMPTY_NAMESPACE, Parse from Ft.Xml.Domlette import NonvalidatingReader +import Ft.Xml.Domlette +import cStringIO + +import xml.etree.ElementTree as ET -import md5 -import sys +import re import logging import urllib import documentViewer -from documentViewer import getTextFromNode, serializeNode +#from documentViewer import getTextFromNode, serializeNode + +def intOr0(s, default=0): + """convert s to int or return default""" + try: + return int(s) + except: + return default + +def getText(node): + """get the cdata content of a node""" + if node is None: + return "" + # ET: + text = node.text or "" + for e in node: + text += gettext(e) + if e.tail: + text += e.tail + + return text + +def serialize(node): + """returns a string containing an XML snippet of node""" + s = ET.tostring(node, 'UTF-8') + # snip off XML declaration + if s.startswith('') + return s[i+3:] + + return s + + +def getTextFromNode(node): + """get the cdata content of a node""" + if node is None: + return "" + # ET: +# text = node.text or "" +# for e in node: +# text += gettext(e) +# if e.tail: +# text += e.tail + + # 4Suite: + nodelist=node.childNodes + text = "" + for n in nodelist: + if n.nodeType == node.TEXT_NODE: + text = text + n.data + + return text + +def serializeNode(node, encoding="utf-8"): + """returns a string containing node as XML""" + #s = ET.tostring(node) + + # 4Suite: + stream = cStringIO.StringIO() + Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding) + s = stream.getvalue() + stream.close() + + return s + class MpdlXmlTextServer(SimpleItem): """TextServer implementation for MPDL-XML eXist server""" @@ -21,8 +89,7 @@ class MpdlXmlTextServer(SimpleItem): manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals()) - def __init__(self,id,title="",serverUrl="http://mpdl-system.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40): - #def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/", serverName=None, timeout=40): + def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40): """constructor""" self.id=id @@ -42,8 +109,10 @@ class MpdlXmlTextServer(SimpleItem): url = self.serverUrl+method return documentViewer.getHttpData(url,data,timeout=self.timeout) + # WTF: what does this really do? can it be integrated in getPage? def getSearch(self, pageinfo=None, docinfo=None): """get search list""" + logging.debug("getSearch()") docpath = docinfo['textURLPath'] url = docinfo['url'] pagesize = pageinfo['queryPageSize'] @@ -146,12 +215,12 @@ class MpdlXmlTextServer(SimpleItem): hrefList=[] myList= "" text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn)) - dom = Parse(text) - result = dom.xpath("//result/resultPage/place") + dom = ET.fromstring(text) + result = dom.findall(".//result/resultPage/place") for l in result: - hrefNode= l.getAttributeNodeNS(None, u"id") - href= hrefNode.nodeValue + href = l.get("id") hrefList.append(href) + # WTF: what does this do? myList = ",".join(hrefList) #logging.debug("getGisPlaces :%s"%(myList)) return myList @@ -166,264 +235,223 @@ class MpdlXmlTextServer(SimpleItem): hrefList=[] myList="" text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath)) - dom =Parse(text) - result = dom.xpath("//result/resultPage/place") + dom = ET.fromstring(text) + result = dom.findall(".//result/resultPage/place") for l in result: - hrefNode = l.getAttributeNodeNS(None, u"id") - href= hrefNode.nodeValue + href = l.get("id") hrefList.append(href) + # WTF: what does this do? myList = ",".join(hrefList) #logging.debug("getALLGisPlaces :%s"%(myList)) return myList + def processPageInfo(self, dom, docinfo, pageinfo): + """processes page info divs from dom and stores in docinfo and pageinfo""" + # process all toplevel divs + alldivs = dom.findall(".//div") + pagediv = None + for div in alldivs: + dc = div.get('class') + + # page content div + if dc == 'pageContent': + pagediv = div + + # pageNumberOrig + elif dc == 'pageNumberOrig': + pageinfo['pageNumberOrig'] = div.text + + # pageNumberOrigNorm + elif dc == 'pageNumberOrigNorm': + pageinfo['pageNumberOrigNorm'] = div.text + + # pageNumberOrigNorm + elif dc == 'countFigureEntries': + docinfo['countFigureEntries'] = intOr0(div.text) + + # pageNumberOrigNorm + elif dc == 'countTocEntries': + # WTF: s1 = int(s)/30+1 + docinfo['countTocEntries'] = intOr0(div.text) + + # numTextPages + elif dc == 'countPages': + np = intOr0(div.text) + if np > 0: + docinfo['numTextPages'] = np + if docinfo.get('numPages', 0) == 0: + # seems to be text-only + docinfo['numTextPages'] = np + pageinfo['end'] = min(pageinfo['end'], np) + pageinfo['numgroups'] = int(np / pageinfo['groupsize']) + if np % pageinfo['groupsize'] > 0: + pageinfo['numgroups'] += 1 + + return + def getTextPage(self, mode="text_dict", pn=1, docinfo=None, pageinfo=None): """returns single page from fulltext""" + logging.debug("getTextPage mode=%s, pn=%s"%(mode,pn)) + # check for cached text -- but this shouldn't be called twice + if pageinfo.has_key('textPage'): + logging.debug("getTextPage: using cached text") + return pageinfo['textPage'] + docpath = docinfo['textURLPath'] - path = docinfo['textURLPath'] - url = docinfo.get('url',None) - name = docinfo.get('name',None) - pn =pageinfo['current'] - sn = pageinfo['sn'] - #optionToggle =pageinfo ['optionToggle'] - highlightQuery = pageinfo['highlightQuery'] - #mode = pageinfo ['viewMode'] - tocMode = pageinfo['tocMode'] - characterNormalization=pageinfo['characterNormalization'] - tocPN = pageinfo['tocPN'] - selfurl = self.absolute_url() + # just checking + if pageinfo['current'] != pn: + logging.warning("getTextPage: current!=pn!") + + # stuff for constructing full urls + url = docinfo['url'] + urlmode = docinfo['mode'] + sn = pageinfo.get('sn', None) + highlightQuery = pageinfo.get('highlightQuery', None) + tocMode = pageinfo.get('tocMode', None) + tocPN = pageinfo.get('tocPN',None) + characterNormalization = pageinfo.get('characterNormalization', None) + selfurl = docinfo['viewerUrl'] + if mode == "text_dict": + # text_dict is called textPollux in the backend textmode = "textPollux" else: textmode = mode textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s"%(docpath,textmode,pn,characterNormalization) - if highlightQuery is not None: + if highlightQuery: textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn) + # fetch the page pagexml = self.getServerData("page-fragment.xql",textParam) - dom = Parse(pagexml) - #dom = NonvalidatingReader.parseStream(pagexml) - - #original Pages - pagedivs = dom.xpath("//div[@class='pageNumberOrig']") - - """if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"): - if len(pagedivs)>0: - docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0]) - logging.debug("ORIGINAL PAGE: %s"%(docinfo['pageNumberOrig'])) - - #original Pages Norm - pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']") - if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"): - if len(pagedivs)>0: - docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0]) - logging.debug("ORIGINAL PAGE NORM: %s"%(docinfo['pageNumberOrigNorm'])) - """ - #figureEntries - pagedivs = dom.xpath("//div[@class='countFigureEntries']") - if pagedivs == dom.xpath("//div[@class='countFigureEntries']"): - if len(pagedivs)>0: - docinfo['countFigureEntries'] = getTextFromNode(pagedivs[0]) - s = getTextFromNode(pagedivs[0]) - if s=='0': - try: - docinfo['countFigureEntries'] = int(s) - except: - docinfo['countFigureEntries'] = 0 - else: - s1 = int(s)/30+1 - try: - docinfo['countFigureEntries'] = int(s1) - except: - docinfo['countFigureEntries'] = 0 - - #allPlaces - pagedivs = dom.xpath("//div[@class='countPlaces']") - if pagedivs == dom.xpath("//div[@class='countPlaces']"): - if len(pagedivs)>0: - docinfo['countPlaces']= getTextFromNode(pagedivs[0]) - s = getTextFromNode(pagedivs[0]) - try: - docinfo['countPlaces'] = int(s) - except: - docinfo['countPlaces'] = 0 - - #tocEntries - pagedivs = dom.xpath("//div[@class='countTocEntries']") - if pagedivs == dom.xpath("//div[@class='countTocEntries']"): - if len(pagedivs)>0: - docinfo['countTocEntries'] = int(getTextFromNode(pagedivs[0])) - s = getTextFromNode(pagedivs[0]) - if s=='0': - try: - docinfo['countTocEntries'] = int(s) - except: - docinfo['countTocEntries'] = 0 - else: - s1 = int(s)/30+1 - try: - docinfo['countTocEntries'] = int(s1) - except: - docinfo['countTocEntries'] = 0 - - #numTextPages - pagedivs = dom.xpath("//div[@class='countPages']") - if pagedivs == dom.xpath("//div[@class='countPages']"): - if len(pagedivs)>0: - docinfo['numPages'] = getTextFromNode(pagedivs[0]) - s = getTextFromNode(pagedivs[0]) - - try: - docinfo['numPages'] = int(s) - #logging.debug("PAGE NUMBER: %s"%(s)) - - np = docinfo['numPages'] - pageinfo['end'] = min(pageinfo['end'], np) - pageinfo['numgroups'] = int(np / pageinfo['groupsize']) - if np % pageinfo['groupsize'] > 0: - pageinfo['numgroups'] += 1 - except: - docinfo['numPages'] = 0 - - else: - #no full text -- init to 0 - docinfo['pageNumberOrig'] = 0 - docinfo['countFigureEntries'] = 0 - docinfo['countPlaces'] = 0 - docinfo['countTocEntries'] = 0 - docinfo['numPages'] = 0 - docinfo['pageNumberOrigNorm'] = 0 - #return docinfo + dom = ET.fromstring(pagexml) + # extract additional info + self.processPageInfo(dom, docinfo, pageinfo) + # page content is in
+ pagediv = None + # ElementTree 1.2 in Python 2.6 can't do div[@class='pageContent'] + alldivs = dom.findall(".//div") + for div in alldivs: + dc = div.get('class') + # page content div + if dc == 'pageContent': + pagediv = div + break # plain text mode if mode == "text": - # first div contains text - pagedivs = dom.xpath("/div") - if len(pagedivs) > 0: - pagenode = pagedivs[0] - links = pagenode.xpath("//a") + if pagediv: + links = pagediv.findall(".//a") for l in links: - hrefNode = l.getAttributeNodeNS(None, u"href") - if hrefNode: - href= hrefNode.nodeValue - if href.startswith('#note-'): - hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn)) - return serializeNode(pagenode) - if mode == "xml": - # first div contains text - pagedivs = dom.xpath("/div") - if len(pagedivs) > 0: - pagenode = pagedivs[0] - return serializeNode(pagenode) - if mode == "gis": - # first div contains text - pagedivs = dom.xpath("/div") - if len(pagedivs) > 0: - pagenode = pagedivs[0] - links =pagenode.xpath("//a") - for l in links: - hrefNode =l.getAttributeNodeNS(None, u"href") - if hrefNode: - href=hrefNode.nodeValue - if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'): - hrefNode.nodeValue =href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name) - l.setAttributeNS(None, 'target', '_blank') - return serializeNode(pagenode) - - if mode == "pureXml": - # first div contains text - pagedivs = dom.xpath("/div") - if len(pagedivs) > 0: - pagenode = pagedivs[0] - return serializeNode(pagenode) + href = l.get('href') + if href and href.startswith('#note-'): + href = href.replace('#note-',"?mode=%s&url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn)) + l.set('href', href) + + return serialize(pagediv) + # text-with-links mode - if mode == "text_dict": - # first div contains text - #mode = pageinfo ['viewMode'] - pagedivs = dom.xpath("/div") - if len(pagedivs) > 0: - pagenode = pagedivs[0] + elif mode == "text_dict": + if pagediv: # check all a-tags - links = pagenode.xpath("//a") - + links = pagediv.findall(".//a") for l in links: - hrefNode = l.getAttributeNodeNS(None, u"href") + href = l.get('href') - if hrefNode: + if href: # is link with href - href = hrefNode.nodeValue if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql'): # is pollux link selfurl = self.absolute_url() # change href - hrefNode.nodeValue = href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/head_main_voc'%selfurl) + l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/head_main_voc'%selfurl)) # add target - l.setAttributeNS(None, 'target', '_blank') - #l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;") - #l.setAttributeNS(None, "ondblclick", "popupWin.focus();") - #window.open("this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=yes, scrollbars=1'"); return false;") + l.set('target', '_blank') if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql'): selfurl = self.absolute_url() - hrefNode.nodeValue = href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl) - l.setAttributeNS(None, 'target', '_blank') - l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;") - l.setAttributeNS(None, 'ondblclick', 'popupWin.focus();') + l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl)) + l.set('target', '_blank') + l.set('onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;") + l.set('ondblclick', 'popupWin.focus();') if href.startswith('#note-'): - hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn)) + l.set('href', href.replace('#note-',"?mode=%s&url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn))) - return serializeNode(pagenode) + return serialize(pagediv) + + # xml mode + elif mode == "xml": + if pagediv: + return serialize(pagediv) + + # pureXml mode + elif mode == "pureXml": + if pagediv: + return serialize(pagediv) + + # gis mode + elif mode == "gis": + name = docinfo['name'] + if pagediv: + # check all a-tags + links = pagediv.findall(".//a") + for l in links: + href = l.get('href') + if href: + if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'): + l.set('href', href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name)) + l.set('target', '_blank') + + return serialize(pagediv) + return "no text here" + # WTF: is this needed? def getOrigPages(self, docinfo=None, pageinfo=None): - docpath = docinfo['textURLPath'] - pn =pageinfo['current'] - selfurl = self.absolute_url() - pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn)) - dom = Parse(pagexml) - pagedivs = dom.xpath("//div[@class='pageNumberOrig']") - if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"): - if len(pagedivs)>0: - docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0]) - return docinfo['pageNumberOrig'] + logging.debug("CALLED: getOrigPages!") + if not pageinfo.has_key('pageNumberOrig'): + logging.warning("getOrigPages: not in pageinfo!") + return None + + return pageinfo['pageNumberOrig'] + # WTF: is this needed? def getOrigPagesNorm(self, docinfo=None, pageinfo=None): - docpath = docinfo['textURLPath'] - pn =pageinfo['current'] - selfurl = self.absolute_url() - pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn)) - dom = Parse(pagexml) - pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']") - if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"): - if len(pagedivs)>0: - docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0]) - return docinfo['pageNumberOrigNorm'] - + logging.debug("CALLED: getOrigPagesNorm!") + if not pageinfo.has_key('pageNumberOrigNorm'): + logging.warning("getOrigPagesNorm: not in pageinfo!") + return None + + return pageinfo['pageNumberOrigNorm'] + # TODO: should be getWordInfo def getTranslate(self, word=None, language=None): """translate into another languages""" data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&output=html") - #pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lex.xql","document=&language="+str(language)+"&query="+url_quote(str(query))) return data + # WTF: what does this do? def getLemma(self, lemma=None, language=None): """simular words lemma """ data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html") return data + # WTF: what does this do? def getLemmaQuery(self, query=None, language=None): """simular words lemma """ data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html") return data + # WTF: what does this do? def getLex(self, query=None, language=None): #simular words lemma data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query)) return data - + + # WTF: what does this do? def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1): #number of docpath = docinfo['textURLPath'] @@ -442,9 +470,11 @@ class MpdlXmlTextServer(SimpleItem): return tc def getToc(self, mode="text", docinfo=None): - """loads table of contents and stores in docinfo""" + """loads table of contents and stores XML in docinfo""" + logging.debug("getToc mode=%s"%mode) if mode == "none": - return docinfo + return docinfo + if 'tocSize_%s'%mode in docinfo: # cached toc return docinfo @@ -460,44 +490,87 @@ class MpdlXmlTextServer(SimpleItem): # number of entries in toc tocSize = 0 tocDiv = None - + # fetch full toc pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn)) - - # post-processing downloaded xml - pagedom = Parse(pagexml) - # get number of entries - numdivs = pagedom.xpath("//div[@class='queryResultHits']") - if len(numdivs) > 0: - tocSize = int(getTextFromNode(numdivs[0])) - docinfo['tocSize_%s'%mode] = tocSize + dom = ET.fromstring(pagexml) + # page content is in
+ pagediv = None + # ElementTree 1.2 in Python 2.6 can't do div[@class='queryResultPage'] + alldivs = dom.findall("div") + for div in alldivs: + dc = div.get('class') + # page content div + if dc == 'queryResultPage': + pagediv = div + + elif dc == 'queryResultHits': + docinfo['tocSize_%s'%mode] = intOr0(div.text) + + if pagediv: +# # split xml in chunks +# tocs = [] +# tocdivs = pagediv.findall('div') +# for p in zip(tocdivs[::2], tocdivs[1::2]): +# toc = serialize(p[0]) +# toc += serialize(p[1]) +# tocs.append(toc) +# logging.debug("pair: %s"%(toc)) + # store XML in docinfo + docinfo['tocXML_%s'%mode] = ET.tostring(pagediv, 'UTF-8') + return docinfo def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None): """returns single page from the table of contents""" - # TODO: this should use the cached TOC + logging.debug("getTocPage mode=%s, pn=%s"%(mode,pn)) if mode == "text": queryType = "toc" else: queryType = mode - docpath = docinfo['textURLPath'] - path = docinfo['textURLPath'] - pagesize = pageinfo['tocPageSize'] - pn = pageinfo['tocPN'] + + # check for cached TOC + if not docinfo.has_key('tocXML_%s'%mode): + self.getToc(mode=mode, docinfo=docinfo) + + tocxml = docinfo.get('tocXML_%s'%mode, None) + if not tocxml: + logging.error("getTocPage: unable to find tocXML") + return "No ToC" + + pagesize = int(pageinfo['tocPageSize']) url = docinfo['url'] - selfurl = self.absolute_url() + urlmode = docinfo['mode'] + selfurl = docinfo['viewerUrl'] viewMode= pageinfo['viewMode'] - characterNormalization = pageinfo ['characterNormalization'] - #optionToggle =pageinfo ['optionToggle'] tocMode = pageinfo['tocMode'] - tocPN = pageinfo['tocPN'] + tocPN = int(pageinfo['tocPN']) + + fulltoc = ET.fromstring(tocxml) - data = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s&characterNormalization=regPlusNorm"%(docpath,queryType, pagesize, pn)) - page = data.replace('page-fragment.xql?document=%s'%str(path),'%s?url=%s&viewMode=%s&tocMode=%s&tocPN=%s'%(selfurl,url, viewMode, tocMode, tocPN)) - text = page.replace('mode=image','mode=texttool') - return text + if fulltoc: + # paginate + #start = (pn - 1) * pagesize * 2 + #end = start + pagesize * 2 + #tocdivs = fulltoc[start:end] + tocdivs = fulltoc + + # check all a-tags + links = tocdivs.findall(".//a") + for l in links: + href = l.get('href') + if href: + # take pn from href + m = re.match(r'page-fragment\.xql.*pn=(\d+)', href) + if m is not None: + # and create new url + l.set('href', '%s?mode=%s&url=%s&viewMode=%s&pn=%s&tocMode=%s&tocPN=%s'%(selfurl, urlmode, url, viewMode, m.group(1), tocMode, tocPN)) + else: + logging.warning("getTocPage: Problem with link=%s"%href) + + return serialize(tocdivs) + def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None): - #def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None): """change settings""" self.title=title self.timeout = timeout @@ -517,4 +590,6 @@ def manage_addMpdlXmlTextServer(self,id, newObj = MpdlXmlTextServer(id,title,serverUrl,timeout) self.Destination()._setObject(id, newObj) if RESPONSE is not None: - RESPONSE.redirect('manage_main') \ No newline at end of file + RESPONSE.redirect('manage_main') + + \ No newline at end of file