File:  [Repository] / documentViewer / MpdlXmlTextServer.py
Revision 1.238.2.13: download - view: text, annotated - select for diffs - revision graph
Fri Aug 12 14:41:39 2011 UTC (12 years, 9 months ago) by casties
Branches: elementtree
Diff to: branchpoint 1.238: preferred, unified
more cleanup

    1: from OFS.SimpleItem import SimpleItem
    2: from Products.PageTemplates.PageTemplateFile import PageTemplateFile 
    3: 
    4: from Ft.Xml import EMPTY_NAMESPACE, Parse
    5: from Ft.Xml.Domlette import NonvalidatingReader
    6: import Ft.Xml.Domlette
    7: import cStringIO
    8: 
    9: import xml.etree.ElementTree as ET
   10: 
   11: import re
   12: import logging
   13: import urllib
   14: 
   15: from SrvTxtUtils import getInt, getText, getHttpData
   16: 
   17: def serialize(node):
   18:     """returns a string containing an XML snippet of node"""
   19:     s = ET.tostring(node, 'UTF-8')
   20:     # snip off XML declaration
   21:     if s.startswith('<?xml'):
   22:         i = s.find('?>')
   23:         return s[i+3:]
   24: 
   25:     return s
   26: 
   27: 
   28: def getTextFromNode(node):
   29:     """get the cdata content of a node"""
   30:     if node is None:
   31:         return ""
   32: 
   33:     # 4Suite:
   34:     nodelist=node.childNodes
   35:     text = ""
   36:     for n in nodelist:
   37:         if n.nodeType == node.TEXT_NODE:
   38:            text = text + n.data
   39:     
   40:     return text
   41: 
   42: def serializeNode(node, encoding="utf-8"):
   43:     """returns a string containing node as XML"""
   44:     #s = ET.tostring(node)
   45:     
   46:     # 4Suite:
   47:     stream = cStringIO.StringIO()
   48:     Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding)
   49:     s = stream.getvalue()
   50:     stream.close()
   51: 
   52:     return s
   53: 
   54: 
   55: class MpdlXmlTextServer(SimpleItem):
   56:     """TextServer implementation for MPDL-XML eXist server"""
   57:     meta_type="MPDL-XML TextServer"
   58: 
   59:     manage_options=(
   60:         {'label':'Config','action':'manage_changeMpdlXmlTextServerForm'},
   61:        )+SimpleItem.manage_options
   62:     
   63:     manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals())
   64:         
   65:     def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40):
   66:         """constructor"""
   67:         self.id=id
   68:         self.title=title
   69:         self.timeout = timeout
   70:         if serverName is None:
   71:             self.serverUrl = serverUrl
   72:         else:
   73:             self.serverUrl = "http://%s/mpdl/interface/"%serverName
   74:         
   75:     def getHttpData(self, url, data=None):
   76:         """returns result from url+data HTTP request"""
   77:         return getHttpData(url,data,timeout=self.timeout)
   78:     
   79:     def getServerData(self, method, data=None):
   80:         """returns result from text server for method+data"""
   81:         url = self.serverUrl+method
   82:         return getHttpData(url,data,timeout=self.timeout)
   83: 
   84:     # WTF: what does this really do? can it be integrated in getPage?
   85:     def getSearch(self, pageinfo=None,  docinfo=None):
   86:         """get search list"""
   87:         logging.debug("getSearch()")
   88:         docpath = docinfo['textURLPath'] 
   89:         url = docinfo['url']
   90:         pagesize = pageinfo['queryPageSize']
   91:         pn = pageinfo.get('searchPN',1)
   92:         sn = pageinfo.get('sn',None) #TODO: is this s now?
   93:         highlightQuery = pageinfo['highlightQuery']
   94:         query =pageinfo['query']
   95:         queryType =pageinfo['queryType']
   96:         viewMode=  pageinfo['viewMode']
   97:         tocMode = pageinfo['tocMode']
   98:         characterNormalization = pageinfo['characterNormalization']
   99:         #optionToggle = pageinfo['optionToggle']
  100:         tocPN = pageinfo['tocPN']
  101:         selfurl = self.absolute_url()
  102:         data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization, urllib.quote(highlightQuery)))
  103:         pagexml = data.replace('?document=%s'%str(docpath),'?url=%s'%url)
  104:         pagedom = Parse(pagexml)
  105:         
  106:         """
  107:         pagedivs = pagedom.xpath("//div[@class='queryResultHits']") 
  108:         if (pagedivs == pagedom.xpath("//div[@class='queryResultHits']")):
  109:             if len(pagedivs)>0:
  110:                 docinfo['queryResultHits'] = int(getTextFromNode(pagedivs[0]))
  111:                 s = getTextFromNode(pagedivs[0])
  112:                 s1 = int(s)/10+1
  113:                 try:
  114:                     docinfo['queryResultHits'] = int(s1)
  115:                     logging.debug("SEARCH ENTRIES: %s"%(s1))
  116:                 except:
  117:                     docinfo['queryResultHits'] = 0
  118:         """
  119:         if (queryType=="fulltext")or(queryType=="xpath")or(queryType=="xquery")or(queryType=="fulltextMorphLemma"):   
  120:             pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
  121:             if len(pagedivs)>0:
  122:                 pagenode=pagedivs[0]
  123:                 links=pagenode.xpath("//a")
  124:                 for l in links:
  125:                     hrefNode = l.getAttributeNodeNS(None, u"href")
  126:                     if hrefNode:
  127:                         href = hrefNode.nodeValue
  128:                         if href.startswith('page-fragment.xql'):
  129:                             selfurl = self.absolute_url()            
  130:                             pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN, characterNormalization))
  131:                             hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)                                           
  132:                 #logging.debug("PUREXML :%s"%(serializeNode(pagenode)))
  133:                 return serializeNode(pagenode)        
  134:         if (queryType=="fulltextMorph"):
  135:             pagedivs = pagedom.xpath("//div[@class='queryResult']")
  136:             if len(pagedivs)>0:
  137:                 pagenode=pagedivs[0]
  138:                 links=pagenode.xpath("//a")
  139:                 for l in links:
  140:                     hrefNode = l.getAttributeNodeNS(None, u"href")
  141:                     if hrefNode:
  142:                         href = hrefNode.nodeValue
  143:                         if href.startswith('page-fragment.xql'):
  144:                             selfurl = self.absolute_url()       
  145:                             pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN,characterNormalization))
  146:                             hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)  
  147:                         if href.startswith('../lt/lemma.xql'):
  148:                             hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_query'%(selfurl))        
  149:                             l.setAttributeNS(None, 'target', '_blank')
  150:                             l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
  151:                             l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')  
  152:                 pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']")                
  153:                 return serializeNode(pagenode)        
  154:         if (queryType=="ftIndex")or(queryType=="ftIndexMorph"):
  155:             pagedivs= pagedom.xpath("//div[@class='queryResultPage']")
  156:             if len(pagedivs)>0:
  157:                 pagenode=pagedivs[0]
  158:                 links=pagenode.xpath("//a")
  159:                 for l in links:
  160:                     hrefNode = l.getAttributeNodeNS(None, u"href")
  161:                     if hrefNode:
  162:                         href = hrefNode.nodeValue
  163:                         hrefNode.nodeValue=href.replace('mode=text','mode=texttool&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s&characterNormalization=%s'%(viewMode,tocMode,tocPN,pn,characterNormalization))             
  164:                         if href.startswith('../lt/lex.xql'):
  165:                             hrefNode.nodeValue = href.replace('../lt/lex.xql','%s/template/head_main_lex'%selfurl)         
  166:                             l.setAttributeNS(None, 'target', '_blank')
  167:                             l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
  168:                             l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
  169:                         if href.startswith('../lt/lemma.xql'):
  170:                             hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%(selfurl))        
  171:                             l.setAttributeNS(None, 'target', '_blank')
  172:                             l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
  173:                             l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
  174:                 return serializeNode(pagenode)      
  175:         return "no text here"   
  176:            
  177:     def getGisPlaces(self, docinfo=None, pageinfo=None):
  178:         """ Show all Gis Places of whole Page"""
  179:         xpath='//place'
  180:         docpath = docinfo.get('textURLPath',None)
  181:         if not docpath:
  182:             return None
  183: 
  184:         pn = pageinfo['current']
  185:         hrefList=[]
  186:         myList= ""
  187:         text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn))
  188:         dom = ET.fromstring(text)
  189:         result = dom.findall(".//result/resultPage/place")
  190:         for l in result:
  191:             href = l.get("id")
  192:             hrefList.append(href)
  193:             # WTF: what does this do?
  194:             myList = ",".join(hrefList)
  195:         #logging.debug("getGisPlaces :%s"%(myList))                             
  196:         return myList
  197:     
  198:     def getAllGisPlaces (self, docinfo=None, pageinfo=None):
  199:         """Show all Gis Places of whole Book """
  200:         xpath ='//echo:place'
  201:         hrefList=[]
  202:         myList=""
  203:         text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath))
  204:         dom = ET.fromstring(text)
  205:         result = dom.findall(".//result/resultPage/place")
  206:         
  207:         for l in result:
  208:             href = l.get("id")
  209:             hrefList.append(href)
  210:             # WTF: what does this do?
  211:             myList = ",".join(hrefList)
  212:             #logging.debug("getALLGisPlaces :%s"%(myList))
  213:         return myList
  214:           
  215:     def processPageInfo(self, dom, docinfo, pageinfo):
  216:         """processes page info divs from dom and stores in docinfo and pageinfo"""
  217:         # assume first second level div is pageMeta
  218:         alldivs = dom.find("div")
  219:         
  220:         if alldivs is None or alldivs.get('class', '') != 'pageMeta':
  221:             logging.error("processPageInfo: pageMeta div not found!")
  222:             return
  223:         
  224:         for div in alldivs:
  225:             dc = div.get('class')
  226:             
  227:             # pageNumberOrig  
  228:             if dc == 'pageNumberOrig':
  229:                 pageinfo['pageNumberOrig'] = div.text
  230:                 
  231:             # pageNumberOrigNorm
  232:             elif dc == 'pageNumberOrigNorm':
  233:                 pageinfo['pageNumberOrigNorm'] = div.text
  234:                 
  235:             # pageHeaderTitle
  236:             elif dc == 'pageHeaderTitle':
  237:                 pageinfo['pageHeaderTitle'] = div.text
  238:                 
  239:             # numFigureEntries
  240:             elif dc == 'countFigureEntries':
  241:                 docinfo['numFigureEntries'] = getInt(div.text)
  242:                 
  243:             # numTocEntries
  244:             elif dc == 'countTocEntries':
  245:                 # WTF: s1 = int(s)/30+1
  246:                 docinfo['numTocEntries'] = getInt(div.text)
  247:                 
  248:             # numPlaces
  249:             elif dc == 'countPlaces':
  250:                 docinfo['numPlaces'] = getInt(div.text)
  251:                 
  252:             # numTextPages
  253:             elif dc == 'countPages':
  254:                 np = getInt(div.text)                    
  255:                 if np > 0:
  256:                     docinfo['numTextPages'] = np
  257:                     if docinfo.get('numPages', 0) == 0:
  258:                         # seems to be text-only - update page count
  259:                         docinfo['numPages'] = np
  260:                         #pageinfo['end'] = min(pageinfo['end'], np)
  261:                         pageinfo['numgroups'] = int(np / pageinfo['groupsize'])
  262:                         if np % pageinfo['groupsize'] > 0:
  263:                             pageinfo['numgroups'] += 1
  264:         
  265:         #logging.debug("processPageInfo: pageinfo=%s"%repr(pageinfo))
  266:         return
  267:          
  268:            
  269:     def getTextPage(self, mode="text", pn=1, docinfo=None, pageinfo=None):
  270:         """returns single page from fulltext"""
  271:         logging.debug("getTextPage mode=%s, pn=%s"%(mode,pn))
  272:         # check for cached text -- but ideally this shouldn't be called twice
  273:         if pageinfo.has_key('textPage'):
  274:             logging.debug("getTextPage: using cached text")
  275:             return pageinfo['textPage']
  276:         
  277:         docpath = docinfo['textURLPath']
  278:         # just checking
  279:         if pageinfo['current'] != pn:
  280:             logging.warning("getTextPage: current!=pn!")
  281:             
  282:         # stuff for constructing full urls
  283:         url = docinfo['url']
  284:         urlmode = docinfo['mode']
  285:         sn = pageinfo.get('sn', None)
  286:         highlightQuery = pageinfo.get('highlightQuery', None)
  287:         tocMode = pageinfo.get('tocMode', None)
  288:         tocPN = pageinfo.get('tocPN',None)
  289:         characterNormalization = pageinfo.get('characterNormalization', None)
  290:         
  291:         selfurl = docinfo['viewerUrl']
  292:         
  293:         if mode == "dict" or mode == "text_dict":
  294:             # dict is called textPollux in the backend
  295:             textmode = "textPollux"
  296:         elif not mode:
  297:             # default is text
  298:             mode = "text"
  299:             textmode = "text"
  300:         else:
  301:             textmode = mode
  302:         
  303:         textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s"%(docpath,textmode,pn,characterNormalization)
  304:         if highlightQuery:
  305:             textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn)           
  306:         
  307:         # fetch the page
  308:         pagexml = self.getServerData("page-fragment.xql",textParam)
  309:         dom = ET.fromstring(pagexml)
  310:         # extract additional info
  311:         self.processPageInfo(dom, docinfo, pageinfo)
  312:         # page content is in <div class="pageContent">
  313:         pagediv = None
  314:         # ElementTree 1.2 in Python 2.6 can't do div[@class='pageContent']
  315:         # so we look at the second level divs
  316:         alldivs = dom.findall("div")
  317:         for div in alldivs:
  318:             dc = div.get('class')
  319:             # page content div
  320:             if dc == 'pageContent':
  321:                 pagediv = div
  322:                 break
  323:         
  324:         # plain text mode
  325:         if mode == "text":
  326:             # get full url assuming documentViewer is parent
  327:             selfurl = self.getLink()
  328:             if pagediv is not None:
  329:                 links = pagediv.findall(".//a")
  330:                 for l in links:
  331:                     href = l.get('href')
  332:                     if href and href.startswith('#note-'):
  333:                         href = href.replace('#note-',"%s#note-"%selfurl)
  334:                         l.set('href', href)
  335: 
  336:                 return serialize(pagediv)
  337:             
  338:         # text-with-links mode
  339:         elif mode == "dict":
  340:             if pagediv is not None:
  341:                 viewerurl = docinfo['viewerUrl']
  342:                 selfurl = self.getLink()
  343:                 # check all a-tags
  344:                 links = pagediv.findall(".//a")
  345:                 for l in links:
  346:                     href = l.get('href')
  347:                     
  348:                     if href:
  349:                         # is link with href
  350:                         if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql'):
  351:                             # is dictionary link - change href (keeping parameters)
  352:                             l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/template/viewer_wordinfo'%viewerurl))
  353:                             # add target to open new page
  354:                             l.set('target', '_blank')
  355:                                                           
  356:                         # TODO: is this needed?
  357:                         if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql'):
  358:                             selfurl = self.absolute_url()
  359:                             l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl))
  360:                             l.set('target', '_blank')
  361:                             l.set('onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;")
  362:                             l.set('ondblclick', 'popupWin.focus();')   
  363:                     
  364:                         if href.startswith('#note-'):
  365:                             # note link
  366:                             l.set('href', href.replace('#note-',"%s#note-"%selfurl))
  367:                               
  368:                 return serialize(pagediv)
  369:             
  370:         # xml mode
  371:         elif mode == "xml":
  372:             if pagediv is not None:
  373:                 return serialize(pagediv)
  374:             
  375:         # pureXml mode
  376:         elif mode == "pureXml":
  377:             if pagediv is not None:
  378:                 return serialize(pagediv)
  379:                   
  380:         # gis mode
  381:         elif mode == "gis":
  382:             name = docinfo['name']
  383:             if pagediv is not None:
  384:                 # check all a-tags
  385:                 links = pagediv.findall(".//a")
  386:                 for l in links:
  387:                     href = l.get('href')
  388:                     if href:
  389:                         if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'):
  390:                             l.set('href', href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name))
  391:                             l.set('target', '_blank') 
  392:                             
  393:                 return serialize(pagediv)
  394:                     
  395:         return "no text here"
  396:     
  397:     # TODO: should be getWordInfo
  398:     def getWordInfo(self, word='', language='', display=''):
  399:         """show information (like dictionaries) about word"""
  400:         data = self.getServerData("lt/wordInfo.xql","language=%s&word=%s&display=%s&output=html"%(language,urllib.quote(word),urllib.quote(display)))
  401:         return data
  402:     
  403:     # WTF: what does this do?
  404:     def getLemma(self, lemma=None, language=None):
  405:         """simular words lemma """
  406:         data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html")
  407:         return data
  408:     
  409:     # WTF: what does this do?
  410:     def getLemmaQuery(self, query=None, language=None):
  411:         """simular words lemma """
  412:         data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html")
  413:         return data
  414:     
  415:     # WTF: what does this do?
  416:     def getLex(self, query=None, language=None):
  417:         #simular words lemma
  418:         data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query))
  419:         return data
  420: 
  421:     # WTF: what does this do?
  422:     def getQuery (self,  docinfo=None, pageinfo=None, query=None, queryType=None, pn=1):
  423:          #number of
  424:          docpath = docinfo['textURLPath'] 
  425:          pagesize = pageinfo['queryPageSize']
  426:          pn = pageinfo['searchPN']
  427:          query =pageinfo['query']
  428:          queryType =pageinfo['queryType']
  429:          tocSearch = 0
  430:          tocDiv = None
  431:          
  432:          pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn))
  433:          pagedom = Parse(pagexml)
  434:          numdivs = pagedom.xpath("//div[@class='queryResultHits']")
  435:          tocSearch = int(getTextFromNode(numdivs[0]))
  436:          tc=int((tocSearch/10)+1)
  437:          return tc
  438:     
  439:     def getToc(self, mode="text", docinfo=None):
  440:         """loads table of contents and stores XML in docinfo"""
  441:         logging.debug("getToc mode=%s"%mode)
  442:         if mode == "none":
  443:             return docinfo
  444:               
  445:         if 'tocSize_%s'%mode in docinfo:
  446:             # cached toc
  447:             return docinfo
  448:         
  449:         docpath = docinfo['textURLPath']
  450:         # we need to set a result set size
  451:         pagesize = 1000
  452:         pn = 1
  453:         if mode == "text":
  454:             queryType = "toc"
  455:         else:
  456:             queryType = mode
  457:         # number of entries in toc
  458:         tocSize = 0
  459:         tocDiv = None
  460:         # fetch full toc
  461:         pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn))
  462:         dom = ET.fromstring(pagexml)
  463:         # page content is in <div class="queryResultPage">
  464:         pagediv = None
  465:         # ElementTree 1.2 in Python 2.6 can't do div[@class='queryResultPage']
  466:         alldivs = dom.findall("div")
  467:         for div in alldivs:
  468:             dc = div.get('class')
  469:             # page content div
  470:             if dc == 'queryResultPage':
  471:                 pagediv = div
  472:                 
  473:             elif dc == 'queryResultHits':
  474:                 docinfo['tocSize_%s'%mode] = getInt(div.text)
  475: 
  476:         if pagediv:
  477:             # store XML in docinfo
  478:             docinfo['tocXML_%s'%mode] = ET.tostring(pagediv, 'UTF-8')
  479: 
  480:         return docinfo
  481:     
  482:     def getTocPage(self, mode="text", pn=0, pageinfo=None, docinfo=None):
  483:         """returns single page from the table of contents"""
  484:         logging.debug("getTocPage mode=%s, pn=%s"%(mode,pn))
  485:         if mode == "text":
  486:             queryType = "toc"
  487:         else:
  488:             queryType = mode
  489:             
  490:         # check for cached TOC
  491:         if not docinfo.has_key('tocXML_%s'%mode):
  492:             self.getToc(mode=mode, docinfo=docinfo)
  493:             
  494:         tocxml = docinfo.get('tocXML_%s'%mode, None)
  495:         if not tocxml:
  496:             logging.error("getTocPage: unable to find tocXML")
  497:             return "No ToC"
  498:         
  499:         pagesize = pageinfo['tocPageSize']
  500:         tocPN = pageinfo['tocPN']
  501:         if not pn:
  502:             pn = tocPN
  503: 
  504:         fulltoc = ET.fromstring(tocxml)
  505:         
  506:         if fulltoc:
  507:             # paginate
  508:             start = (pn - 1) * pagesize * 2
  509:             len = pagesize * 2
  510:             del fulltoc[:start]
  511:             del fulltoc[len:]
  512:             tocdivs = fulltoc
  513:             
  514:             # check all a-tags
  515:             links = tocdivs.findall(".//a")
  516:             for l in links:
  517:                 href = l.get('href')
  518:                 if href:
  519:                     # take pn from href
  520:                     m = re.match(r'page-fragment\.xql.*pn=(\d+)', href)
  521:                     if m is not None:
  522:                         # and create new url (assuming parent is documentViewer)
  523:                         url = self.getLink('pn', m.group(1))
  524:                         l.set('href', url)
  525:                     else:
  526:                         logging.warning("getTocPage: Problem with link=%s"%href)
  527:                         
  528:             return serialize(tocdivs)
  529:     
  530:     
  531:     def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
  532:         """change settings"""
  533:         self.title=title
  534:         self.timeout = timeout
  535:         self.serverUrl = serverUrl
  536:         if RESPONSE is not None:
  537:             RESPONSE.redirect('manage_main')
  538:         
  539: # management methods
  540: def manage_addMpdlXmlTextServerForm(self):
  541:     """Form for adding"""
  542:     pt = PageTemplateFile("zpt/manage_addMpdlXmlTextServer", globals()).__of__(self)
  543:     return pt()
  544: 
  545: def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
  546: #def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):    
  547:     """add zogiimage"""
  548:     newObj = MpdlXmlTextServer(id,title,serverUrl,timeout)
  549:     self.Destination()._setObject(id, newObj)
  550:     if RESPONSE is not None:
  551:         RESPONSE.redirect('manage_main')
  552:         
  553:         

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>