--- documentViewer/MpdlXmlTextServer.py 2011/07/15 19:34:41 1.238.2.2
+++ documentViewer/MpdlXmlTextServer.py 2011/09/28 13:39:28 1.244
@@ -1,83 +1,15 @@
from OFS.SimpleItem import SimpleItem
from Products.PageTemplates.PageTemplateFile import PageTemplateFile
-
from Ft.Xml import EMPTY_NAMESPACE, Parse
from Ft.Xml.Domlette import NonvalidatingReader
-import Ft.Xml.Domlette
-import cStringIO
-
-import xml.etree.ElementTree as ET
-import re
+import md5
+import sys
import logging
import urllib
import documentViewer
-#from documentViewer import getTextFromNode, serializeNode
-
-def intOr0(s, default=0):
- """convert s to int or return default"""
- try:
- return int(s)
- except:
- return default
-
-def getText(node):
- """get the cdata content of a node"""
- if node is None:
- return ""
- # ET:
- text = node.text or ""
- for e in node:
- text += gettext(e)
- if e.tail:
- text += e.tail
-
- return text
-
-def serialize(node):
- """returns a string containing an XML snippet of node"""
- s = ET.tostring(node, 'UTF-8')
- # snip off XML declaration
- if s.startswith('')
- return s[i+3:]
-
- return s
-
-
-def getTextFromNode(node):
- """get the cdata content of a node"""
- if node is None:
- return ""
- # ET:
-# text = node.text or ""
-# for e in node:
-# text += gettext(e)
-# if e.tail:
-# text += e.tail
-
- # 4Suite:
- nodelist=node.childNodes
- text = ""
- for n in nodelist:
- if n.nodeType == node.TEXT_NODE:
- text = text + n.data
-
- return text
-
-def serializeNode(node, encoding="utf-8"):
- """returns a string containing node as XML"""
- #s = ET.tostring(node)
-
- # 4Suite:
- stream = cStringIO.StringIO()
- Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding)
- s = stream.getvalue()
- stream.close()
-
- return s
-
+from documentViewer import getTextFromNode, serializeNode
class MpdlXmlTextServer(SimpleItem):
"""TextServer implementation for MPDL-XML eXist server"""
@@ -89,7 +21,8 @@ class MpdlXmlTextServer(SimpleItem):
manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals())
- def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40):
+ def __init__(self,id,title="",serverUrl="http://mpdl-test.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40):
+ #def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/", serverName=None, timeout=40):
"""constructor"""
self.id=id
@@ -109,15 +42,17 @@ class MpdlXmlTextServer(SimpleItem):
url = self.serverUrl+method
return documentViewer.getHttpData(url,data,timeout=self.timeout)
- # WTF: what does this really do? can it be integrated in getPage?
def getSearch(self, pageinfo=None, docinfo=None):
"""get search list"""
- logging.debug("getSearch()")
docpath = docinfo['textURLPath']
url = docinfo['url']
pagesize = pageinfo['queryPageSize']
pn = pageinfo.get('searchPN',1)
- sn = pageinfo['sn']
+ #sn = pageinfo['sn']
+ s = pageinfo['s']
+ highlightElementPos =pageinfo ['highlightElementPos']
+ highlightElement = pageinfo ['highlightElement']
+
highlightQuery = pageinfo['highlightQuery']
query =pageinfo['query']
queryType =pageinfo['queryType']
@@ -127,7 +62,8 @@ class MpdlXmlTextServer(SimpleItem):
#optionToggle = pageinfo['optionToggle']
tocPN = pageinfo['tocPN']
selfurl = self.absolute_url()
- data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization, urllib.quote(highlightQuery)))
+ data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&s=%s&viewMode=%s&characterNormalization=%s&highlightElementPos=%s&highlightElement=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, s, viewMode,characterNormalization, highlightElementPos, highlightElement, urllib.quote(highlightQuery)))
+ #data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization, urllib.quote(highlightQuery)))
pagexml = data.replace('?document=%s'%str(docpath),'?url=%s'%url)
pagedom = Parse(pagexml)
@@ -175,7 +111,7 @@ class MpdlXmlTextServer(SimpleItem):
if href.startswith('../lt/lemma.xql'):
hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_query'%(selfurl))
l.setAttributeNS(None, 'target', '_blank')
- l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
+ l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=300,height=400,top=180, left=400, scrollbars=1'); return false;")
l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']")
return serializeNode(pagenode)
@@ -197,7 +133,7 @@ class MpdlXmlTextServer(SimpleItem):
if href.startswith('../lt/lemma.xql'):
hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%(selfurl))
l.setAttributeNS(None, 'target', '_blank')
- l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
+ l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=300,height=400,top=180, left=400, scrollbars=1'); return false;")
l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
return serializeNode(pagenode)
return "no text here"
@@ -215,12 +151,12 @@ class MpdlXmlTextServer(SimpleItem):
hrefList=[]
myList= ""
text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn))
- dom = ET.fromstring(text)
- result = dom.findall(".//result/resultPage/place")
+ dom = Parse(text)
+ result = dom.xpath("//result/resultPage/place")
for l in result:
- href = l.get("id")
+ hrefNode= l.getAttributeNodeNS(None, u"id")
+ href= hrefNode.nodeValue
hrefList.append(href)
- # WTF: what does this do?
myList = ",".join(hrefList)
#logging.debug("getGisPlaces :%s"%(myList))
return myList
@@ -235,223 +171,271 @@ class MpdlXmlTextServer(SimpleItem):
hrefList=[]
myList=""
text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath))
- dom = ET.fromstring(text)
- result = dom.findall(".//result/resultPage/place")
+ dom =Parse(text)
+ result = dom.xpath("//result/resultPage/place")
for l in result:
- href = l.get("id")
+ hrefNode = l.getAttributeNodeNS(None, u"id")
+ href= hrefNode.nodeValue
hrefList.append(href)
- # WTF: what does this do?
myList = ",".join(hrefList)
#logging.debug("getALLGisPlaces :%s"%(myList))
return myList
- def processPageInfo(self, dom, docinfo, pageinfo):
- """processes page info divs from dom and stores in docinfo and pageinfo"""
- # process all toplevel divs
- alldivs = dom.findall(".//div")
- pagediv = None
- for div in alldivs:
- dc = div.get('class')
-
- # page content div
- if dc == 'pageContent':
- pagediv = div
-
- # pageNumberOrig
- elif dc == 'pageNumberOrig':
- pageinfo['pageNumberOrig'] = div.text
-
- # pageNumberOrigNorm
- elif dc == 'pageNumberOrigNorm':
- pageinfo['pageNumberOrigNorm'] = div.text
-
- # pageNumberOrigNorm
- elif dc == 'countFigureEntries':
- docinfo['countFigureEntries'] = intOr0(div.text)
-
- # pageNumberOrigNorm
- elif dc == 'countTocEntries':
- # WTF: s1 = int(s)/30+1
- docinfo['countTocEntries'] = intOr0(div.text)
-
- # numTextPages
- elif dc == 'countPages':
- np = intOr0(div.text)
- if np > 0:
- docinfo['numTextPages'] = np
- if docinfo.get('numPages', 0) == 0:
- # seems to be text-only
- docinfo['numTextPages'] = np
- pageinfo['end'] = min(pageinfo['end'], np)
- pageinfo['numgroups'] = int(np / pageinfo['groupsize'])
- if np % pageinfo['groupsize'] > 0:
- pageinfo['numgroups'] += 1
-
- return
-
def getTextPage(self, mode="text_dict", pn=1, docinfo=None, pageinfo=None):
"""returns single page from fulltext"""
- logging.debug("getTextPage mode=%s, pn=%s"%(mode,pn))
- # check for cached text -- but this shouldn't be called twice
- if pageinfo.has_key('textPage'):
- logging.debug("getTextPage: using cached text")
- return pageinfo['textPage']
-
docpath = docinfo['textURLPath']
- # just checking
- if pageinfo['current'] != pn:
- logging.warning("getTextPage: current!=pn!")
-
- # stuff for constructing full urls
- url = docinfo['url']
- urlmode = docinfo['mode']
- sn = pageinfo.get('sn', None)
- highlightQuery = pageinfo.get('highlightQuery', None)
- tocMode = pageinfo.get('tocMode', None)
- tocPN = pageinfo.get('tocPN',None)
- characterNormalization = pageinfo.get('characterNormalization', None)
- selfurl = docinfo['viewerUrl']
+ path = docinfo['textURLPath']
+ url = docinfo.get('url',None)
+ name = docinfo.get('name',None)
+ pn =pageinfo['current']
+ #sn = pageinfo['sn']
+ s = pageinfo['s']
+ highlightElementPos =pageinfo ['highlightElementPos']
+ highlightElement = pageinfo ['highlightElement']
+ #optionToggle =pageinfo ['optionToggle']
+ highlightQuery = pageinfo['highlightQuery']
+ #mode = pageinfo ['viewMode']
+ tocMode = pageinfo['tocMode']
+ xpointer = pageinfo['xpointer']
+ characterNormalization=pageinfo['characterNormalization']
+ tocPN = pageinfo['tocPN']
+ selfurl = self.absolute_url()
if mode == "text_dict":
- # text_dict is called textPollux in the backend
textmode = "textPollux"
else:
textmode = mode
- textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s"%(docpath,textmode,pn,characterNormalization)
- if highlightQuery:
- textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn)
+ textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s&xpointer=%s&options=withIdentifier"%(docpath,textmode,pn,characterNormalization, xpointer)
+ if highlightQuery is not None:
+ #textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn)
+ textParam +="&highlightQuery=%s&s=%s&highlightElement=%s&highlightElementPos=%s"%(urllib.quote(highlightQuery),s, highlightElement, highlightElementPos)
- # fetch the page
pagexml = self.getServerData("page-fragment.xql",textParam)
- dom = ET.fromstring(pagexml)
- # extract additional info
- self.processPageInfo(dom, docinfo, pageinfo)
- # page content is in
- pagediv = None
- # ElementTree 1.2 in Python 2.6 can't do div[@class='pageContent']
- alldivs = dom.findall(".//div")
- for div in alldivs:
- dc = div.get('class')
- # page content div
- if dc == 'pageContent':
- pagediv = div
- break
+ dom = Parse(pagexml)
+ #dom = NonvalidatingReader.parseStream(pagexml)
+
+ #original Pages
+ pagedivs = dom.xpath("//div[@class='pageNumberOrig']")
+
+ """if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"):
+ if len(pagedivs)>0:
+ docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0])
+ logging.debug("ORIGINAL PAGE: %s"%(docinfo['pageNumberOrig']))
+
+ #original Pages Norm
+ pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']")
+ if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"):
+ if len(pagedivs)>0:
+ docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0])
+ logging.debug("ORIGINAL PAGE NORM: %s"%(docinfo['pageNumberOrigNorm']))
+ """
+ #figureEntries
+ pagedivs = dom.xpath("//div[@class='countFigureEntries']")
+ if pagedivs == dom.xpath("//div[@class='countFigureEntries']"):
+ if len(pagedivs)>0:
+ docinfo['countFigureEntries'] = getTextFromNode(pagedivs[0])
+ s = getTextFromNode(pagedivs[0])
+ if s=='0':
+ try:
+ docinfo['countFigureEntries'] = int(s)
+ except:
+ docinfo['countFigureEntries'] = 0
+ else:
+ s1 = int(s)/30+1
+ try:
+ docinfo['countFigureEntries'] = int(s1)
+ except:
+ docinfo['countFigureEntries'] = 0
+
+ #allPlaces
+ pagedivs = dom.xpath("//div[@class='countPlaces']")
+ if pagedivs == dom.xpath("//div[@class='countPlaces']"):
+ if len(pagedivs)>0:
+ docinfo['countPlaces']= getTextFromNode(pagedivs[0])
+ s = getTextFromNode(pagedivs[0])
+ try:
+ docinfo['countPlaces'] = int(s)
+ except:
+ docinfo['countPlaces'] = 0
+
+ #tocEntries
+ pagedivs = dom.xpath("//div[@class='countTocEntries']")
+ if pagedivs == dom.xpath("//div[@class='countTocEntries']"):
+ if len(pagedivs)>0:
+ docinfo['countTocEntries'] = int(getTextFromNode(pagedivs[0]))
+ s = getTextFromNode(pagedivs[0])
+ if s=='0':
+ try:
+ docinfo['countTocEntries'] = int(s)
+ except:
+ docinfo['countTocEntries'] = 0
+ else:
+ s1 = int(s)/30+1
+ try:
+ docinfo['countTocEntries'] = int(s1)
+ except:
+ docinfo['countTocEntries'] = 0
+
+ #numTextPages
+ pagedivs = dom.xpath("//div[@class='countPages']")
+ if pagedivs == dom.xpath("//div[@class='countPages']"):
+ if len(pagedivs)>0:
+ docinfo['numPages'] = getTextFromNode(pagedivs[0])
+ s = getTextFromNode(pagedivs[0])
+
+ try:
+ docinfo['numPages'] = int(s)
+ #logging.debug("PAGE NUMBER: %s"%(s))
+
+ np = docinfo['numPages']
+ pageinfo['end'] = min(pageinfo['end'], np)
+ pageinfo['numgroups'] = int(np / pageinfo['groupsize'])
+ if np % pageinfo['groupsize'] > 0:
+ pageinfo['numgroups'] += 1
+ except:
+ docinfo['numPages'] = 0
+
+ else:
+ #no full text -- init to 0
+ docinfo['pageNumberOrig'] = 0
+ docinfo['countFigureEntries'] = 0
+ docinfo['countPlaces'] = 0
+ docinfo['countTocEntries'] = 0
+ docinfo['numPages'] = 0
+ docinfo['pageNumberOrigNorm'] = 0
+ #return docinfo
# plain text mode
if mode == "text":
- if pagediv:
- links = pagediv.findall(".//a")
+ # first div contains text
+ pagedivs = dom.xpath("/div")
+ if len(pagedivs) > 0:
+ pagenode = pagedivs[0]
+ links = pagenode.xpath("//a")
for l in links:
- href = l.get('href')
- if href and href.startswith('#note-'):
- href = href.replace('#note-',"?mode=%s&url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn))
- l.set('href', href)
-
- return serialize(pagediv)
-
+ hrefNode = l.getAttributeNodeNS(None, u"href")
+ if hrefNode:
+ href= hrefNode.nodeValue
+ if href.startswith('#note-'):
+ hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn))
+ #if href.startswith():
+ return serializeNode(pagenode)
+ if mode == "xml":
+ # first div contains text
+ pagedivs = dom.xpath("/div")
+ if len(pagedivs) > 0:
+ pagenode = pagedivs[0]
+ return serializeNode(pagenode)
+ if mode == "gis":
+ # first div contains text
+ pagedivs = dom.xpath("/div")
+ if len(pagedivs) > 0:
+ pagenode = pagedivs[0]
+ links =pagenode.xpath("//a")
+ for l in links:
+ hrefNode =l.getAttributeNodeNS(None, u"href")
+ if hrefNode:
+ href=hrefNode.nodeValue
+ if href.startswith('http://mappit.mpiwg-berlin.mpg.de'):
+ hrefNode.nodeValue =href.replace('db/REST/db/chgis/mpdl','db/RESTdb/db/mpdl/%s'%name)
+ l.setAttributeNS(None, 'target', '_blank')
+ return serializeNode(pagenode)
+
+ if mode == "pureXml":
+ # first div contains text
+ pagedivs = dom.xpath("/div")
+ if len(pagedivs) > 0:
+ pagenode = pagedivs[0]
+ return serializeNode(pagenode)
# text-with-links mode
- elif mode == "text_dict":
- if pagediv:
+ if mode == "text_dict":
+ # first div contains text
+ #mode = pageinfo ['viewMode']
+ pagedivs = dom.xpath("/div")
+ if len(pagedivs) > 0:
+ pagenode = pagedivs[0]
# check all a-tags
- links = pagediv.findall(".//a")
+ links = pagenode.xpath("//a")
+
for l in links:
- href = l.get('href')
+ hrefNode = l.getAttributeNodeNS(None, u"href")
- if href:
+ if hrefNode:
# is link with href
- if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql'):
+ href = hrefNode.nodeValue
+ if href.startswith('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql'):
# is pollux link
selfurl = self.absolute_url()
# change href
- l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/head_main_voc'%selfurl))
+ hrefNode.nodeValue = href.replace('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/head_main_voc'%selfurl)
# add target
- l.set('target', '_blank')
+ l.setAttributeNS(None, 'target', '_blank')
+ #l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;")
+ #l.setAttributeNS(None, "ondblclick", "popupWin.focus();")
+ #window.open("this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=yes, scrollbars=1'"); return false;")
- if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql'):
+ if href.startswith('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql'):
selfurl = self.absolute_url()
- l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl))
- l.set('target', '_blank')
- l.set('onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;")
- l.set('ondblclick', 'popupWin.focus();')
+ hrefNode.nodeValue = href.replace('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl)
+ l.setAttributeNS(None, 'target', '_blank')
+ l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=300,height=400,top=180, left=700, toolbar=no, scrollbars=1'); return false;")
+ l.setAttributeNS(None, 'ondblclick', 'popupWin.focus();')
if href.startswith('#note-'):
- l.set('href', href.replace('#note-',"?mode=%s&url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn)))
+ hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn))
- return serialize(pagediv)
-
- # xml mode
- elif mode == "xml":
- if pagediv:
- return serialize(pagediv)
-
- # pureXml mode
- elif mode == "pureXml":
- if pagediv:
- return serialize(pagediv)
-
- # gis mode
- elif mode == "gis":
- name = docinfo['name']
- if pagediv:
- # check all a-tags
- links = pagediv.findall(".//a")
- for l in links:
- href = l.get('href')
- if href:
- if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'):
- l.set('href', href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name))
- l.set('target', '_blank')
-
- return serialize(pagediv)
-
+ return serializeNode(pagenode)
return "no text here"
- # WTF: is this needed?
def getOrigPages(self, docinfo=None, pageinfo=None):
- logging.debug("CALLED: getOrigPages!")
- if not pageinfo.has_key('pageNumberOrig'):
- logging.warning("getOrigPages: not in pageinfo!")
- return None
-
- return pageinfo['pageNumberOrig']
+ docpath = docinfo['textURLPath']
+ pn =pageinfo['current']
+ selfurl = self.absolute_url()
+ pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn))
+ dom = Parse(pagexml)
+ pagedivs = dom.xpath("//div[@class='pageNumberOrig']")
+ if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"):
+ if len(pagedivs)>0:
+ docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0])
+ return docinfo['pageNumberOrig']
- # WTF: is this needed?
def getOrigPagesNorm(self, docinfo=None, pageinfo=None):
- logging.debug("CALLED: getOrigPagesNorm!")
- if not pageinfo.has_key('pageNumberOrigNorm'):
- logging.warning("getOrigPagesNorm: not in pageinfo!")
- return None
-
- return pageinfo['pageNumberOrigNorm']
+ docpath = docinfo['textURLPath']
+ pn =pageinfo['current']
+ selfurl = self.absolute_url()
+ pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn))
+ dom = Parse(pagexml)
+ pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']")
+ if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"):
+ if len(pagedivs)>0:
+ docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0])
+ return docinfo['pageNumberOrigNorm']
+
- # TODO: should be getWordInfo
- def getTranslate(self, word=None, language=None):
+ def getTranslate(self, word=None, language=None, display=None):
"""translate into another languages"""
- data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&output=html")
+ data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&display="+urllib.quote(display)+"&output=html")
+ #pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lex.xql","document=&language="+str(language)+"&query="+url_quote(str(query)))
return data
- # WTF: what does this do?
def getLemma(self, lemma=None, language=None):
"""simular words lemma """
data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html")
return data
- # WTF: what does this do?
def getLemmaQuery(self, query=None, language=None):
"""simular words lemma """
data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html")
return data
- # WTF: what does this do?
def getLex(self, query=None, language=None):
#simular words lemma
data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query))
return data
-
- # WTF: what does this do?
+
def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1):
#number of
docpath = docinfo['textURLPath']
@@ -470,11 +454,9 @@ class MpdlXmlTextServer(SimpleItem):
return tc
def getToc(self, mode="text", docinfo=None):
- """loads table of contents and stores XML in docinfo"""
- logging.debug("getToc mode=%s"%mode)
+ """loads table of contents and stores in docinfo"""
if mode == "none":
- return docinfo
-
+ return docinfo
if 'tocSize_%s'%mode in docinfo:
# cached toc
return docinfo
@@ -490,87 +472,44 @@ class MpdlXmlTextServer(SimpleItem):
# number of entries in toc
tocSize = 0
tocDiv = None
- # fetch full toc
+
pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn))
- dom = ET.fromstring(pagexml)
- # page content is in
- pagediv = None
- # ElementTree 1.2 in Python 2.6 can't do div[@class='queryResultPage']
- alldivs = dom.findall("div")
- for div in alldivs:
- dc = div.get('class')
- # page content div
- if dc == 'queryResultPage':
- pagediv = div
-
- elif dc == 'queryResultHits':
- docinfo['tocSize_%s'%mode] = intOr0(div.text)
-
- if pagediv:
-# # split xml in chunks
-# tocs = []
-# tocdivs = pagediv.findall('div')
-# for p in zip(tocdivs[::2], tocdivs[1::2]):
-# toc = serialize(p[0])
-# toc += serialize(p[1])
-# tocs.append(toc)
-# logging.debug("pair: %s"%(toc))
- # store XML in docinfo
- docinfo['tocXML_%s'%mode] = ET.tostring(pagediv, 'UTF-8')
-
+
+ # post-processing downloaded xml
+ pagedom = Parse(pagexml)
+ # get number of entries
+ numdivs = pagedom.xpath("//div[@class='queryResultHits']")
+ if len(numdivs) > 0:
+ tocSize = int(getTextFromNode(numdivs[0]))
+ docinfo['tocSize_%s'%mode] = tocSize
return docinfo
def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None):
"""returns single page from the table of contents"""
- logging.debug("getTocPage mode=%s, pn=%s"%(mode,pn))
+ # TODO: this should use the cached TOC
if mode == "text":
queryType = "toc"
else:
queryType = mode
-
- # check for cached TOC
- if not docinfo.has_key('tocXML_%s'%mode):
- self.getToc(mode=mode, docinfo=docinfo)
-
- tocxml = docinfo.get('tocXML_%s'%mode, None)
- if not tocxml:
- logging.error("getTocPage: unable to find tocXML")
- return "No ToC"
-
- pagesize = int(pageinfo['tocPageSize'])
+ docpath = docinfo['textURLPath']
+ path = docinfo['textURLPath']
+ pagesize = pageinfo['tocPageSize']
+ pn = pageinfo['tocPN']
url = docinfo['url']
- urlmode = docinfo['mode']
- selfurl = docinfo['viewerUrl']
+ selfurl = self.absolute_url()
viewMode= pageinfo['viewMode']
+ characterNormalization = pageinfo ['characterNormalization']
+ #optionToggle =pageinfo ['optionToggle']
tocMode = pageinfo['tocMode']
- tocPN = int(pageinfo['tocPN'])
-
- fulltoc = ET.fromstring(tocxml)
+ tocPN = pageinfo['tocPN']
- if fulltoc:
- # paginate
- #start = (pn - 1) * pagesize * 2
- #end = start + pagesize * 2
- #tocdivs = fulltoc[start:end]
- tocdivs = fulltoc
-
- # check all a-tags
- links = tocdivs.findall(".//a")
- for l in links:
- href = l.get('href')
- if href:
- # take pn from href
- m = re.match(r'page-fragment\.xql.*pn=(\d+)', href)
- if m is not None:
- # and create new url
- l.set('href', '%s?mode=%s&url=%s&viewMode=%s&pn=%s&tocMode=%s&tocPN=%s'%(selfurl, urlmode, url, viewMode, m.group(1), tocMode, tocPN))
- else:
- logging.warning("getTocPage: Problem with link=%s"%href)
-
- return serialize(tocdivs)
-
+ data = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s&characterNormalization=regPlusNorm"%(docpath,queryType, pagesize, pn))
+ page = data.replace('page-fragment.xql?document=%s'%str(path),'%s?url=%s&viewMode=%s&tocMode=%s&tocPN=%s'%(selfurl,url, viewMode, tocMode, tocPN))
+ text = page.replace('mode=image','mode=texttool')
+ return text
def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
+ #def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):
"""change settings"""
self.title=title
self.timeout = timeout
@@ -590,6 +529,4 @@ def manage_addMpdlXmlTextServer(self,id,
newObj = MpdlXmlTextServer(id,title,serverUrl,timeout)
self.Destination()._setObject(id, newObj)
if RESPONSE is not None:
- RESPONSE.redirect('manage_main')
-
-
\ No newline at end of file
+ RESPONSE.redirect('manage_main')
\ No newline at end of file