File:  [Repository] / documentViewer / MpdlXmlTextServer.py
Revision 1.238.2.8: download - view: text, annotated - select for diffs - revision graph
Fri Aug 5 09:24:42 2011 UTC (12 years, 10 months ago) by casties
Branches: elementtree
Diff to: branchpoint 1.238: preferred, unified
smaller improvements

    1: from OFS.SimpleItem import SimpleItem
    2: from Products.PageTemplates.PageTemplateFile import PageTemplateFile 
    3: 
    4: from Ft.Xml import EMPTY_NAMESPACE, Parse
    5: from Ft.Xml.Domlette import NonvalidatingReader
    6: import Ft.Xml.Domlette
    7: import cStringIO
    8: 
    9: import xml.etree.ElementTree as ET
   10: 
   11: import re
   12: import logging
   13: import urllib
   14: 
   15: from SrvTxtUtils import getInt, getText, getHttpData
   16: 
   17: def serialize(node):
   18:     """returns a string containing an XML snippet of node"""
   19:     s = ET.tostring(node, 'UTF-8')
   20:     # snip off XML declaration
   21:     if s.startswith('<?xml'):
   22:         i = s.find('?>')
   23:         return s[i+3:]
   24: 
   25:     return s
   26: 
   27: 
   28: def getTextFromNode(node):
   29:     """get the cdata content of a node"""
   30:     if node is None:
   31:         return ""
   32:     # ET:
   33: #    text = node.text or ""
   34: #    for e in node:
   35: #        text += gettext(e)
   36: #        if e.tail:
   37: #            text += e.tail
   38: 
   39:     # 4Suite:
   40:     nodelist=node.childNodes
   41:     text = ""
   42:     for n in nodelist:
   43:         if n.nodeType == node.TEXT_NODE:
   44:            text = text + n.data
   45:     
   46:     return text
   47: 
   48: def serializeNode(node, encoding="utf-8"):
   49:     """returns a string containing node as XML"""
   50:     #s = ET.tostring(node)
   51:     
   52:     # 4Suite:
   53:     stream = cStringIO.StringIO()
   54:     Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding)
   55:     s = stream.getvalue()
   56:     stream.close()
   57: 
   58:     return s
   59: 
   60: 
   61: class MpdlXmlTextServer(SimpleItem):
   62:     """TextServer implementation for MPDL-XML eXist server"""
   63:     meta_type="MPDL-XML TextServer"
   64: 
   65:     manage_options=(
   66:         {'label':'Config','action':'manage_changeMpdlXmlTextServerForm'},
   67:        )+SimpleItem.manage_options
   68:     
   69:     manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals())
   70:         
   71:     def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40):
   72:         """constructor"""
   73:         self.id=id
   74:         self.title=title
   75:         self.timeout = timeout
   76:         if serverName is None:
   77:             self.serverUrl = serverUrl
   78:         else:
   79:             self.serverUrl = "http://%s/mpdl/interface/"%serverName
   80:         
   81:     def getHttpData(self, url, data=None):
   82:         """returns result from url+data HTTP request"""
   83:         return getHttpData(url,data,timeout=self.timeout)
   84:     
   85:     def getServerData(self, method, data=None):
   86:         """returns result from text server for method+data"""
   87:         url = self.serverUrl+method
   88:         return getHttpData(url,data,timeout=self.timeout)
   89: 
   90:     # WTF: what does this really do? can it be integrated in getPage?
   91:     def getSearch(self, pageinfo=None,  docinfo=None):
   92:         """get search list"""
   93:         logging.debug("getSearch()")
   94:         docpath = docinfo['textURLPath'] 
   95:         url = docinfo['url']
   96:         pagesize = pageinfo['queryPageSize']
   97:         pn = pageinfo.get('searchPN',1)
   98:         sn = pageinfo['sn']
   99:         highlightQuery = pageinfo['highlightQuery']
  100:         query =pageinfo['query']
  101:         queryType =pageinfo['queryType']
  102:         viewMode=  pageinfo['viewMode']
  103:         tocMode = pageinfo['tocMode']
  104:         characterNormalization = pageinfo['characterNormalization']
  105:         #optionToggle = pageinfo['optionToggle']
  106:         tocPN = pageinfo['tocPN']
  107:         selfurl = self.absolute_url()
  108:         data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization, urllib.quote(highlightQuery)))
  109:         pagexml = data.replace('?document=%s'%str(docpath),'?url=%s'%url)
  110:         pagedom = Parse(pagexml)
  111:         
  112:         """
  113:         pagedivs = pagedom.xpath("//div[@class='queryResultHits']") 
  114:         if (pagedivs == pagedom.xpath("//div[@class='queryResultHits']")):
  115:             if len(pagedivs)>0:
  116:                 docinfo['queryResultHits'] = int(getTextFromNode(pagedivs[0]))
  117:                 s = getTextFromNode(pagedivs[0])
  118:                 s1 = int(s)/10+1
  119:                 try:
  120:                     docinfo['queryResultHits'] = int(s1)
  121:                     logging.debug("SEARCH ENTRIES: %s"%(s1))
  122:                 except:
  123:                     docinfo['queryResultHits'] = 0
  124:         """
  125:         if (queryType=="fulltext")or(queryType=="xpath")or(queryType=="xquery")or(queryType=="fulltextMorphLemma"):   
  126:             pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
  127:             if len(pagedivs)>0:
  128:                 pagenode=pagedivs[0]
  129:                 links=pagenode.xpath("//a")
  130:                 for l in links:
  131:                     hrefNode = l.getAttributeNodeNS(None, u"href")
  132:                     if hrefNode:
  133:                         href = hrefNode.nodeValue
  134:                         if href.startswith('page-fragment.xql'):
  135:                             selfurl = self.absolute_url()            
  136:                             pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN, characterNormalization))
  137:                             hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)                                           
  138:                 #logging.debug("PUREXML :%s"%(serializeNode(pagenode)))
  139:                 return serializeNode(pagenode)        
  140:         if (queryType=="fulltextMorph"):
  141:             pagedivs = pagedom.xpath("//div[@class='queryResult']")
  142:             if len(pagedivs)>0:
  143:                 pagenode=pagedivs[0]
  144:                 links=pagenode.xpath("//a")
  145:                 for l in links:
  146:                     hrefNode = l.getAttributeNodeNS(None, u"href")
  147:                     if hrefNode:
  148:                         href = hrefNode.nodeValue
  149:                         if href.startswith('page-fragment.xql'):
  150:                             selfurl = self.absolute_url()       
  151:                             pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN,characterNormalization))
  152:                             hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)  
  153:                         if href.startswith('../lt/lemma.xql'):
  154:                             hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_query'%(selfurl))        
  155:                             l.setAttributeNS(None, 'target', '_blank')
  156:                             l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
  157:                             l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')  
  158:                 pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']")                
  159:                 return serializeNode(pagenode)        
  160:         if (queryType=="ftIndex")or(queryType=="ftIndexMorph"):
  161:             pagedivs= pagedom.xpath("//div[@class='queryResultPage']")
  162:             if len(pagedivs)>0:
  163:                 pagenode=pagedivs[0]
  164:                 links=pagenode.xpath("//a")
  165:                 for l in links:
  166:                     hrefNode = l.getAttributeNodeNS(None, u"href")
  167:                     if hrefNode:
  168:                         href = hrefNode.nodeValue
  169:                         hrefNode.nodeValue=href.replace('mode=text','mode=texttool&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s&characterNormalization=%s'%(viewMode,tocMode,tocPN,pn,characterNormalization))             
  170:                         if href.startswith('../lt/lex.xql'):
  171:                             hrefNode.nodeValue = href.replace('../lt/lex.xql','%s/template/head_main_lex'%selfurl)         
  172:                             l.setAttributeNS(None, 'target', '_blank')
  173:                             l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
  174:                             l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
  175:                         if href.startswith('../lt/lemma.xql'):
  176:                             hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%(selfurl))        
  177:                             l.setAttributeNS(None, 'target', '_blank')
  178:                             l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
  179:                             l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
  180:                 return serializeNode(pagenode)      
  181:         return "no text here"   
  182:            
  183:     def getGisPlaces(self, docinfo=None, pageinfo=None):
  184:         """ Show all Gis Places of whole Page"""
  185:         xpath='//place'
  186:         docpath = docinfo.get('textURLPath',None)
  187:         if not docpath:
  188:             return None
  189: 
  190:         url = docinfo['url']
  191:         selfurl = self.absolute_url()
  192:         pn = pageinfo['current']
  193:         hrefList=[]
  194:         myList= ""
  195:         text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn))
  196:         dom = ET.fromstring(text)
  197:         result = dom.findall(".//result/resultPage/place")
  198:         for l in result:
  199:             href = l.get("id")
  200:             hrefList.append(href)
  201:             # WTF: what does this do?
  202:             myList = ",".join(hrefList)
  203:         #logging.debug("getGisPlaces :%s"%(myList))                             
  204:         return myList
  205:     
  206:     def getAllGisPlaces (self, docinfo=None, pageinfo=None):
  207:         """Show all Gis Places of whole Book """
  208:         xpath ='//echo:place'
  209:         docpath =docinfo['textURLPath']
  210:         url = docinfo['url']
  211:         selfurl =self.absolute_url()
  212:         pn =pageinfo['current']
  213:         hrefList=[]
  214:         myList=""
  215:         text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath))
  216:         dom = ET.fromstring(text)
  217:         result = dom.findall(".//result/resultPage/place")
  218:         
  219:         for l in result:
  220:             href = l.get("id")
  221:             hrefList.append(href)
  222:             # WTF: what does this do?
  223:             myList = ",".join(hrefList)
  224:             #logging.debug("getALLGisPlaces :%s"%(myList))
  225:         return myList
  226:           
  227:     def processPageInfo(self, dom, docinfo, pageinfo):
  228:         """processes page info divs from dom and stores in docinfo and pageinfo"""
  229:         # assume first second level div is pageMeta
  230:         alldivs = dom.find("div")
  231:         
  232:         if alldivs is None or alldivs.get('class', '') != 'pageMeta':
  233:             logging.error("processPageInfo: pageMeta div not found!")
  234:             return
  235:         
  236:         for div in alldivs:
  237:             dc = div.get('class')
  238:             
  239:             # pageNumberOrig  
  240:             if dc == 'pageNumberOrig':
  241:                 pageinfo['pageNumberOrig'] = div.text
  242:                 
  243:             # pageNumberOrigNorm
  244:             elif dc == 'pageNumberOrigNorm':
  245:                 pageinfo['pageNumberOrigNorm'] = div.text
  246:                 
  247:             # pageNumberOrigNorm
  248:             elif dc == 'countFigureEntries':
  249:                 docinfo['numFigureEntries'] = getInt(div.text)
  250:                 
  251:             # pageNumberOrigNorm
  252:             elif dc == 'countTocEntries':
  253:                 # WTF: s1 = int(s)/30+1
  254:                 docinfo['numTocEntries'] = getInt(div.text)
  255:                 
  256:             # pageHeaderTitle
  257:             elif dc == 'pageHeaderTitle':
  258:                 docinfo['pageHeaderTitle'] = div.text
  259:                 
  260:             # numTextPages
  261:             elif dc == 'countPages':
  262:                 np = getInt(div.text)                    
  263:                 if np > 0:
  264:                     docinfo['numTextPages'] = np
  265:                     if docinfo.get('numPages', 0) == 0:
  266:                         # seems to be text-only - update page count
  267:                         docinfo['numPages'] = np
  268:                         pageinfo['end'] = min(pageinfo['end'], np)
  269:                         pageinfo['numgroups'] = int(np / pageinfo['groupsize'])
  270:                         if np % pageinfo['groupsize'] > 0:
  271:                             pageinfo['numgroups'] += 1
  272:         
  273:         #logging.debug("processPageInfo: pageinfo=%s"%repr(pageinfo))
  274:         return
  275:          
  276:            
  277:     def getTextPage(self, mode="text", pn=1, docinfo=None, pageinfo=None):
  278:         """returns single page from fulltext"""
  279:         logging.debug("getTextPage mode=%s, pn=%s"%(mode,pn))
  280:         # check for cached text -- but this shouldn't be called twice
  281:         if pageinfo.has_key('textPage'):
  282:             logging.debug("getTextPage: using cached text")
  283:             return pageinfo['textPage']
  284:         
  285:         docpath = docinfo['textURLPath']
  286:         # just checking
  287:         if pageinfo['current'] != pn:
  288:             logging.warning("getTextPage: current!=pn!")
  289:             
  290:         # stuff for constructing full urls
  291:         url = docinfo['url']
  292:         urlmode = docinfo['mode']
  293:         sn = pageinfo.get('sn', None)
  294:         highlightQuery = pageinfo.get('highlightQuery', None)
  295:         tocMode = pageinfo.get('tocMode', None)
  296:         tocPN = pageinfo.get('tocPN',None)
  297:         characterNormalization = pageinfo.get('characterNormalization', None)
  298:         selfurl = docinfo['viewerUrl']   
  299:         
  300:         if mode == "dict" or mode == "text_dict":
  301:             # dict is called textPollux in the backend
  302:             textmode = "textPollux"
  303:         elif not mode:
  304:             # default is text
  305:             textmode = "text"
  306:         else:
  307:             textmode = mode
  308:         
  309:         textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s"%(docpath,textmode,pn,characterNormalization)
  310:         if highlightQuery:
  311:             textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn)           
  312:         
  313:         # fetch the page
  314:         pagexml = self.getServerData("page-fragment.xql",textParam)
  315:         dom = ET.fromstring(pagexml)
  316:         # extract additional info
  317:         self.processPageInfo(dom, docinfo, pageinfo)
  318:         # page content is in <div class="pageContent">
  319:         pagediv = None
  320:         # ElementTree 1.2 in Python 2.6 can't do div[@class='pageContent']
  321:         # so we look at the second level divs
  322:         alldivs = dom.findall("div")
  323:         for div in alldivs:
  324:             dc = div.get('class')
  325:             # page content div
  326:             if dc == 'pageContent':
  327:                 pagediv = div
  328:                 break
  329:         
  330:         # plain text mode
  331:         if mode == "text":
  332:             if pagediv is not None:
  333:                 links = pagediv.findall(".//a")
  334:                 for l in links:
  335:                     href = l.get('href')
  336:                     if href and href.startswith('#note-'):
  337:                         href = href.replace('#note-',"?mode=%s&url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn))
  338:                         l.set('href', href)
  339: 
  340:                 return serialize(pagediv)
  341:             
  342:         # text-with-links mode
  343:         elif mode == "text_dict":
  344:             if pagediv is not None:
  345:                 # check all a-tags
  346:                 links = pagediv.findall(".//a")
  347:                 for l in links:
  348:                     href = l.get('href')
  349:                     
  350:                     if href:
  351:                         # is link with href
  352:                         if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql'):
  353:                             # is pollux link
  354:                             selfurl = self.absolute_url()
  355:                             # change href
  356:                             l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/head_main_voc'%selfurl))
  357:                             # add target
  358:                             l.set('target', '_blank')
  359:                                                           
  360:                         if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql'):    
  361:                             selfurl = self.absolute_url()
  362:                             l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl))
  363:                             l.set('target', '_blank')
  364:                             l.set('onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;")
  365:                             l.set('ondblclick', 'popupWin.focus();')   
  366:                     
  367:                         if href.startswith('#note-'):
  368:                             l.set('href', href.replace('#note-',"?mode=%s&url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn)))
  369:                               
  370:                 return serialize(pagediv)
  371:             
  372:         # xml mode
  373:         elif mode == "xml":
  374:             if pagediv is not None:
  375:                 return serialize(pagediv)
  376:             
  377:         # pureXml mode
  378:         elif mode == "pureXml":
  379:             if pagediv is not None:
  380:                 return serialize(pagediv)
  381:                   
  382:         # gis mode
  383:         elif mode == "gis":
  384:             name = docinfo['name']
  385:             if pagediv is not None:
  386:                 # check all a-tags
  387:                 links = pagediv.findall(".//a")
  388:                 for l in links:
  389:                     href = l.get('href')
  390:                     if href:
  391:                         if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'):
  392:                             l.set('href', href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name))
  393:                             l.set('target', '_blank') 
  394:                             
  395:                 return serialize(pagediv)
  396:                     
  397:         return "no text here"
  398:     
  399:     # WTF: is this needed?
  400:     def getOrigPages(self, docinfo=None, pageinfo=None):
  401:         logging.debug("CALLED: getOrigPages!")
  402:         if not pageinfo.has_key('pageNumberOrig'):
  403:             logging.warning("getOrigPages: not in pageinfo!")
  404:             return None
  405:         
  406:         return pageinfo['pageNumberOrig']
  407:     
  408:     # WTF: is this needed?
  409:     def getOrigPagesNorm(self, docinfo=None, pageinfo=None):
  410:         logging.debug("CALLED: getOrigPagesNorm!")
  411:         if not pageinfo.has_key('pageNumberOrigNorm'):
  412:             logging.warning("getOrigPagesNorm: not in pageinfo!")
  413:             return None
  414:         
  415:         return pageinfo['pageNumberOrigNorm']
  416:                 
  417:     # TODO: should be getWordInfo
  418:     def getTranslate(self, word=None, language=None):
  419:         """translate into another languages"""
  420:         data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&output=html")
  421:         return data
  422:     
  423:     # WTF: what does this do?
  424:     def getLemma(self, lemma=None, language=None):
  425:         """simular words lemma """
  426:         data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html")
  427:         return data
  428:     
  429:     # WTF: what does this do?
  430:     def getLemmaQuery(self, query=None, language=None):
  431:         """simular words lemma """
  432:         data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html")
  433:         return data
  434:     
  435:     # WTF: what does this do?
  436:     def getLex(self, query=None, language=None):
  437:         #simular words lemma
  438:         data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query))
  439:         return data
  440: 
  441:     # WTF: what does this do?
  442:     def getQuery (self,  docinfo=None, pageinfo=None, query=None, queryType=None, pn=1):
  443:          #number of
  444:          docpath = docinfo['textURLPath'] 
  445:          pagesize = pageinfo['queryPageSize']
  446:          pn = pageinfo['searchPN']
  447:          query =pageinfo['query']
  448:          queryType =pageinfo['queryType']
  449:          tocSearch = 0
  450:          tocDiv = None
  451:          
  452:          pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn))
  453:          pagedom = Parse(pagexml)
  454:          numdivs = pagedom.xpath("//div[@class='queryResultHits']")
  455:          tocSearch = int(getTextFromNode(numdivs[0]))
  456:          tc=int((tocSearch/10)+1)
  457:          return tc
  458:     
  459:     def getToc(self, mode="text", docinfo=None):
  460:         """loads table of contents and stores XML in docinfo"""
  461:         logging.debug("getToc mode=%s"%mode)
  462:         if mode == "none":
  463:             return docinfo
  464:               
  465:         if 'tocSize_%s'%mode in docinfo:
  466:             # cached toc
  467:             return docinfo
  468:         
  469:         docpath = docinfo['textURLPath']
  470:         # we need to set a result set size
  471:         pagesize = 1000
  472:         pn = 1
  473:         if mode == "text":
  474:             queryType = "toc"
  475:         else:
  476:             queryType = mode
  477:         # number of entries in toc
  478:         tocSize = 0
  479:         tocDiv = None
  480:         # fetch full toc
  481:         pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn))
  482:         dom = ET.fromstring(pagexml)
  483:         # page content is in <div class="queryResultPage">
  484:         pagediv = None
  485:         # ElementTree 1.2 in Python 2.6 can't do div[@class='queryResultPage']
  486:         alldivs = dom.findall("div")
  487:         for div in alldivs:
  488:             dc = div.get('class')
  489:             # page content div
  490:             if dc == 'queryResultPage':
  491:                 pagediv = div
  492:                 
  493:             elif dc == 'queryResultHits':
  494:                 docinfo['tocSize_%s'%mode] = getInt(div.text)
  495: 
  496:         if pagediv:
  497:             # store XML in docinfo
  498:             docinfo['tocXML_%s'%mode] = ET.tostring(pagediv, 'UTF-8')
  499: 
  500:         return docinfo
  501:     
  502:     def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None):
  503:         """returns single page from the table of contents"""
  504:         logging.debug("getTocPage mode=%s, pn=%s"%(mode,pn))
  505:         if mode == "text":
  506:             queryType = "toc"
  507:         else:
  508:             queryType = mode
  509:             
  510:         # check for cached TOC
  511:         if not docinfo.has_key('tocXML_%s'%mode):
  512:             self.getToc(mode=mode, docinfo=docinfo)
  513:             
  514:         tocxml = docinfo.get('tocXML_%s'%mode, None)
  515:         if not tocxml:
  516:             logging.error("getTocPage: unable to find tocXML")
  517:             return "No ToC"
  518:         
  519:         pagesize = int(pageinfo['tocPageSize'])
  520:         url = docinfo['url']
  521:         urlmode = docinfo['mode']
  522:         selfurl = docinfo['viewerUrl']
  523:         viewMode=  pageinfo['viewMode']
  524:         tocMode = pageinfo['tocMode']
  525:         tocPN = int(pageinfo['tocPN'])
  526:         pn = tocPN
  527: 
  528:         fulltoc = ET.fromstring(tocxml)
  529:         
  530:         if fulltoc:
  531:             # paginate
  532:             start = (pn - 1) * pagesize * 2
  533:             len = pagesize * 2
  534:             del fulltoc[:start]
  535:             del fulltoc[len:]
  536:             tocdivs = fulltoc
  537:             
  538:             # check all a-tags
  539:             links = tocdivs.findall(".//a")
  540:             for l in links:
  541:                 href = l.get('href')
  542:                 if href:
  543:                     # take pn from href
  544:                     m = re.match(r'page-fragment\.xql.*pn=(\d+)', href)
  545:                     if m is not None:
  546:                         # and create new url
  547:                         l.set('href', '%s?mode=%s&url=%s&viewMode=%s&pn=%s&tocMode=%s&tocPN=%s'%(selfurl, urlmode, url, viewMode, m.group(1), tocMode, tocPN))
  548:                     else:
  549:                         logging.warning("getTocPage: Problem with link=%s"%href)
  550:                         
  551:             return serialize(tocdivs)
  552:     
  553:     
  554:     def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
  555:         """change settings"""
  556:         self.title=title
  557:         self.timeout = timeout
  558:         self.serverUrl = serverUrl
  559:         if RESPONSE is not None:
  560:             RESPONSE.redirect('manage_main')
  561:         
  562: # management methods
  563: def manage_addMpdlXmlTextServerForm(self):
  564:     """Form for adding"""
  565:     pt = PageTemplateFile("zpt/manage_addMpdlXmlTextServer", globals()).__of__(self)
  566:     return pt()
  567: 
  568: def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
  569: #def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):    
  570:     """add zogiimage"""
  571:     newObj = MpdlXmlTextServer(id,title,serverUrl,timeout)
  572:     self.Destination()._setObject(id, newObj)
  573:     if RESPONSE is not None:
  574:         RESPONSE.redirect('manage_main')
  575:         
  576:         

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>