version 1.238.2.1, 2011/07/14 17:43:56
|
version 1.238.2.5, 2011/07/29 18:36:04
|
Line 1
|
Line 1
|
|
|
from OFS.SimpleItem import SimpleItem |
from OFS.SimpleItem import SimpleItem |
from Products.PageTemplates.PageTemplateFile import PageTemplateFile |
from Products.PageTemplates.PageTemplateFile import PageTemplateFile |
|
|
from Ft.Xml import EMPTY_NAMESPACE, Parse |
from Ft.Xml import EMPTY_NAMESPACE, Parse |
from Ft.Xml.Domlette import NonvalidatingReader |
from Ft.Xml.Domlette import NonvalidatingReader |
import Ft.Xml.Domlette |
import Ft.Xml.Domlette |
Line 8 import cStringIO
|
Line 9 import cStringIO
|
|
|
import xml.etree.ElementTree as ET |
import xml.etree.ElementTree as ET |
|
|
import md5 |
import re |
import sys |
|
import logging |
import logging |
import urllib |
import urllib |
import documentViewer |
|
#from documentViewer import getTextFromNode, serializeNode |
|
|
|
def getText(node): |
from SrvTxtUtils import getInt, getText, getHttpData |
"""get the cdata content of a node""" |
|
if node is None: |
|
return "" |
|
# ET: |
|
text = node.text or "" |
|
for e in node: |
|
text += gettext(e) |
|
if e.tail: |
|
text += e.tail |
|
|
|
return text |
|
|
|
def serialize(node): |
def serialize(node): |
"""returns a string containing an XML snippet of node""" |
"""returns a string containing an XML snippet of node""" |
Line 82 class MpdlXmlTextServer(SimpleItem):
|
Line 69 class MpdlXmlTextServer(SimpleItem):
|
|
|
manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals()) |
manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals()) |
|
|
def __init__(self,id,title="",serverUrl="http://mpdl-system.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40): |
def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40): |
#def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/", serverName=None, timeout=40): |
|
|
|
"""constructor""" |
"""constructor""" |
self.id=id |
self.id=id |
self.title=title |
self.title=title |
Line 96 class MpdlXmlTextServer(SimpleItem):
|
Line 81 class MpdlXmlTextServer(SimpleItem):
|
|
|
def getHttpData(self, url, data=None): |
def getHttpData(self, url, data=None): |
"""returns result from url+data HTTP request""" |
"""returns result from url+data HTTP request""" |
return documentViewer.getHttpData(url,data,timeout=self.timeout) |
return getHttpData(url,data,timeout=self.timeout) |
|
|
def getServerData(self, method, data=None): |
def getServerData(self, method, data=None): |
"""returns result from text server for method+data""" |
"""returns result from text server for method+data""" |
url = self.serverUrl+method |
url = self.serverUrl+method |
return documentViewer.getHttpData(url,data,timeout=self.timeout) |
return getHttpData(url,data,timeout=self.timeout) |
|
|
|
# WTF: what does this really do? can it be integrated in getPage? |
def getSearch(self, pageinfo=None, docinfo=None): |
def getSearch(self, pageinfo=None, docinfo=None): |
"""get search list""" |
"""get search list""" |
|
logging.debug("getSearch()") |
docpath = docinfo['textURLPath'] |
docpath = docinfo['textURLPath'] |
url = docinfo['url'] |
url = docinfo['url'] |
pagesize = pageinfo['queryPageSize'] |
pagesize = pageinfo['queryPageSize'] |
Line 207 class MpdlXmlTextServer(SimpleItem):
|
Line 194 class MpdlXmlTextServer(SimpleItem):
|
hrefList=[] |
hrefList=[] |
myList= "" |
myList= "" |
text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn)) |
text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn)) |
dom = Parse(text) |
dom = ET.fromstring(text) |
result = dom.xpath("//result/resultPage/place") |
result = dom.findall(".//result/resultPage/place") |
for l in result: |
for l in result: |
hrefNode= l.getAttributeNodeNS(None, u"id") |
href = l.get("id") |
href= hrefNode.nodeValue |
|
hrefList.append(href) |
hrefList.append(href) |
|
# WTF: what does this do? |
myList = ",".join(hrefList) |
myList = ",".join(hrefList) |
#logging.debug("getGisPlaces :%s"%(myList)) |
#logging.debug("getGisPlaces :%s"%(myList)) |
return myList |
return myList |
Line 227 class MpdlXmlTextServer(SimpleItem):
|
Line 214 class MpdlXmlTextServer(SimpleItem):
|
hrefList=[] |
hrefList=[] |
myList="" |
myList="" |
text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath)) |
text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath)) |
dom =Parse(text) |
dom = ET.fromstring(text) |
result = dom.xpath("//result/resultPage/place") |
result = dom.findall(".//result/resultPage/place") |
|
|
for l in result: |
for l in result: |
hrefNode = l.getAttributeNodeNS(None, u"id") |
href = l.get("id") |
href= hrefNode.nodeValue |
|
hrefList.append(href) |
hrefList.append(href) |
|
# WTF: what does this do? |
myList = ",".join(hrefList) |
myList = ",".join(hrefList) |
#logging.debug("getALLGisPlaces :%s"%(myList)) |
#logging.debug("getALLGisPlaces :%s"%(myList)) |
return myList |
return myList |
|
|
|
def processPageInfo(self, dom, docinfo, pageinfo): |
|
"""processes page info divs from dom and stores in docinfo and pageinfo""" |
|
# process all toplevel divs |
|
alldivs = dom.findall(".//div") |
|
pagediv = None |
|
for div in alldivs: |
|
dc = div.get('class') |
|
|
|
# page content div |
|
if dc == 'pageContent': |
|
pagediv = div |
|
|
|
# pageNumberOrig |
|
elif dc == 'pageNumberOrig': |
|
pageinfo['pageNumberOrig'] = div.text |
|
|
|
# pageNumberOrigNorm |
|
elif dc == 'pageNumberOrigNorm': |
|
pageinfo['pageNumberOrigNorm'] = div.text |
|
|
|
# pageNumberOrigNorm |
|
elif dc == 'countFigureEntries': |
|
docinfo['countFigureEntries'] = getInt(div.text) |
|
|
|
# pageNumberOrigNorm |
|
elif dc == 'countTocEntries': |
|
# WTF: s1 = int(s)/30+1 |
|
docinfo['countTocEntries'] = getInt(div.text) |
|
|
|
# numTextPages |
|
elif dc == 'countPages': |
|
np = getInt(div.text) |
|
if np > 0: |
|
docinfo['numTextPages'] = np |
|
if docinfo.get('numPages', 0) == 0: |
|
# seems to be text-only - update page count |
|
docinfo['numPages'] = np |
|
pageinfo['end'] = min(pageinfo['end'], np) |
|
pageinfo['numgroups'] = int(np / pageinfo['groupsize']) |
|
if np % pageinfo['groupsize'] > 0: |
|
pageinfo['numgroups'] += 1 |
|
|
|
return |
|
|
|
|
def getTextPage(self, mode="text_dict", pn=1, docinfo=None, pageinfo=None): |
def getTextPage(self, mode="text_dict", pn=1, docinfo=None, pageinfo=None): |
"""returns single page from fulltext""" |
"""returns single page from fulltext""" |
|
logging.debug("getTextPage mode=%s, pn=%s"%(mode,pn)) |
|
# check for cached text -- but this shouldn't be called twice |
|
if pageinfo.has_key('textPage'): |
|
logging.debug("getTextPage: using cached text") |
|
return pageinfo['textPage'] |
|
|
docpath = docinfo['textURLPath'] |
docpath = docinfo['textURLPath'] |
path = docinfo['textURLPath'] |
# just checking |
url = docinfo.get('url',None) |
if pageinfo['current'] != pn: |
name = docinfo.get('name',None) |
logging.warning("getTextPage: current!=pn!") |
pn =pageinfo['current'] |
|
sn = pageinfo['sn'] |
# stuff for constructing full urls |
#optionToggle =pageinfo ['optionToggle'] |
url = docinfo['url'] |
highlightQuery = pageinfo['highlightQuery'] |
urlmode = docinfo['mode'] |
#mode = pageinfo ['viewMode'] |
sn = pageinfo.get('sn', None) |
tocMode = pageinfo['tocMode'] |
highlightQuery = pageinfo.get('highlightQuery', None) |
characterNormalization=pageinfo['characterNormalization'] |
tocMode = pageinfo.get('tocMode', None) |
tocPN = pageinfo['tocPN'] |
tocPN = pageinfo.get('tocPN',None) |
selfurl = self.absolute_url() |
characterNormalization = pageinfo.get('characterNormalization', None) |
|
selfurl = docinfo['viewerUrl'] |
|
|
if mode == "text_dict": |
if mode == "text_dict": |
|
# text_dict is called textPollux in the backend |
textmode = "textPollux" |
textmode = "textPollux" |
else: |
else: |
textmode = mode |
textmode = mode |
|
|
textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s"%(docpath,textmode,pn,characterNormalization) |
textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s"%(docpath,textmode,pn,characterNormalization) |
if highlightQuery is not None: |
if highlightQuery: |
textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn) |
textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn) |
|
|
|
# fetch the page |
pagexml = self.getServerData("page-fragment.xql",textParam) |
pagexml = self.getServerData("page-fragment.xql",textParam) |
dom = ET.fromstring(pagexml) |
dom = ET.fromstring(pagexml) |
#dom = NonvalidatingReader.parseStream(pagexml) |
# extract additional info |
|
self.processPageInfo(dom, docinfo, pageinfo) |
#original Pages |
# page content is in <div class="pageContent"> |
#pagedivs = dom.xpath("//div[@class='pageNumberOrig']") |
|
|
|
"""if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"): |
|
if len(pagedivs)>0: |
|
docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0]) |
|
logging.debug("ORIGINAL PAGE: %s"%(docinfo['pageNumberOrig'])) |
|
|
|
#original Pages Norm |
|
pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']") |
|
if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"): |
|
if len(pagedivs)>0: |
|
docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0]) |
|
logging.debug("ORIGINAL PAGE NORM: %s"%(docinfo['pageNumberOrigNorm'])) |
|
""" |
|
#figureEntries |
|
# pagedivs = dom.xpath("//div[@class='countFigureEntries']") |
|
# if pagedivs == dom.xpath("//div[@class='countFigureEntries']"): |
|
# if len(pagedivs)>0: |
|
# docinfo['countFigureEntries'] = getTextFromNode(pagedivs[0]) |
|
# s = getTextFromNode(pagedivs[0]) |
|
# if s=='0': |
|
# try: |
|
# docinfo['countFigureEntries'] = int(s) |
|
# except: |
|
# docinfo['countFigureEntries'] = 0 |
|
# else: |
|
# s1 = int(s)/30+1 |
|
# try: |
|
# docinfo['countFigureEntries'] = int(s1) |
|
# except: |
|
# docinfo['countFigureEntries'] = 0 |
|
# |
|
# #allPlaces |
|
# pagedivs = dom.xpath("//div[@class='countPlaces']") |
|
# if pagedivs == dom.xpath("//div[@class='countPlaces']"): |
|
# if len(pagedivs)>0: |
|
# docinfo['countPlaces']= getTextFromNode(pagedivs[0]) |
|
# s = getTextFromNode(pagedivs[0]) |
|
# try: |
|
# docinfo['countPlaces'] = int(s) |
|
# except: |
|
# docinfo['countPlaces'] = 0 |
|
# |
|
# #tocEntries |
|
# pagedivs = dom.xpath("//div[@class='countTocEntries']") |
|
# if pagedivs == dom.xpath("//div[@class='countTocEntries']"): |
|
# if len(pagedivs)>0: |
|
# docinfo['countTocEntries'] = int(getTextFromNode(pagedivs[0])) |
|
# s = getTextFromNode(pagedivs[0]) |
|
# if s=='0': |
|
# try: |
|
# docinfo['countTocEntries'] = int(s) |
|
# except: |
|
# docinfo['countTocEntries'] = 0 |
|
# else: |
|
# s1 = int(s)/30+1 |
|
# try: |
|
# docinfo['countTocEntries'] = int(s1) |
|
# except: |
|
# docinfo['countTocEntries'] = 0 |
|
|
|
#numTextPages |
|
#pagedivs = dom.xpath("//div[@class='countPages']") |
|
alldivs = dom.findall(".//div") |
|
pagediv = None |
pagediv = None |
|
# ElementTree 1.2 in Python 2.6 can't do div[@class='pageContent'] |
|
alldivs = dom.findall(".//div") |
for div in alldivs: |
for div in alldivs: |
dc = div.get('class') |
dc = div.get('class') |
|
# page content div |
if dc == 'pageContent': |
if dc == 'pageContent': |
pagediv = div |
pagediv = div |
|
|
if dc == 'countPages': |
|
try: |
|
np = int(div.text) |
|
docinfo['numPages'] = np |
|
pageinfo['end'] = min(pageinfo['end'], np) |
|
pageinfo['numgroups'] = int(np / pageinfo['groupsize']) |
|
if np % pageinfo['groupsize'] > 0: |
|
pageinfo['numgroups'] += 1 |
|
|
|
except: |
|
docinfo['numPages'] = 0 |
|
|
|
break |
break |
|
|
# ROC: why? |
|
# else: |
|
# #no full text -- init to 0 |
|
# docinfo['pageNumberOrig'] = 0 |
|
# docinfo['countFigureEntries'] = 0 |
|
# docinfo['countPlaces'] = 0 |
|
# docinfo['countTocEntries'] = 0 |
|
# docinfo['numPages'] = 0 |
|
# docinfo['pageNumberOrigNorm'] = 0 |
|
# #return docinfo |
|
|
|
# plain text mode |
# plain text mode |
if mode == "text": |
if mode == "text": |
#pagedivs = dom.xpath("/div") |
|
if pagediv: |
if pagediv: |
links = pagediv.findall(".//a") |
links = pagediv.findall(".//a") |
for l in links: |
for l in links: |
href = l.get('href') |
href = l.get('href') |
if href and href.startswith('#note-'): |
if href and href.startswith('#note-'): |
href = href.replace('#note-',"?url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn)) |
href = href.replace('#note-',"?mode=%s&url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn)) |
l.set('href', href) |
l.set('href', href) |
logging.debug("page=%s"%ET.tostring(pagediv, 'UTF-8')) |
|
return serialize(pagediv) |
|
|
|
if mode == "xml": |
|
if pagediv: |
|
return serialize(pagediv) |
|
|
|
if mode == "pureXml": |
|
if pagediv: |
|
return serialize(pagediv) |
return serialize(pagediv) |
|
|
if mode == "gis": |
|
if pagediv: |
|
# check all a-tags |
|
links = pagediv.findall(".//a") |
|
for l in links: |
|
href = l.get('href') |
|
if href: |
|
if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'): |
|
l.set('href', href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name)) |
|
l.set('target', '_blank') |
|
|
|
return serialize(pagenode) |
|
|
|
# text-with-links mode |
# text-with-links mode |
if mode == "text_dict": |
elif mode == "text_dict": |
if pagediv: |
if pagediv: |
# check all a-tags |
# check all a-tags |
links = pagediv.findall(".//a") |
links = pagediv.findall(".//a") |
Line 423 class MpdlXmlTextServer(SimpleItem):
|
Line 357 class MpdlXmlTextServer(SimpleItem):
|
l.set('ondblclick', 'popupWin.focus();') |
l.set('ondblclick', 'popupWin.focus();') |
|
|
if href.startswith('#note-'): |
if href.startswith('#note-'): |
l.set('href', href.replace('#note-',"?url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn))) |
l.set('href', href.replace('#note-',"?mode=%s&url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn))) |
|
|
|
return serialize(pagediv) |
|
|
|
# xml mode |
|
elif mode == "xml": |
|
if pagediv: |
|
return serialize(pagediv) |
|
|
|
# pureXml mode |
|
elif mode == "pureXml": |
|
if pagediv: |
|
return serialize(pagediv) |
|
|
|
# gis mode |
|
elif mode == "gis": |
|
name = docinfo['name'] |
|
if pagediv: |
|
# check all a-tags |
|
links = pagediv.findall(".//a") |
|
for l in links: |
|
href = l.get('href') |
|
if href: |
|
if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'): |
|
l.set('href', href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name)) |
|
l.set('target', '_blank') |
|
|
return serialize(pagediv) |
return serialize(pagediv) |
|
|
return "no text here" |
return "no text here" |
|
|
|
# WTF: is this needed? |
def getOrigPages(self, docinfo=None, pageinfo=None): |
def getOrigPages(self, docinfo=None, pageinfo=None): |
docpath = docinfo['textURLPath'] |
logging.debug("CALLED: getOrigPages!") |
pn =pageinfo['current'] |
if not pageinfo.has_key('pageNumberOrig'): |
selfurl = self.absolute_url() |
logging.warning("getOrigPages: not in pageinfo!") |
pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn)) |
return None |
dom = Parse(pagexml) |
|
pagedivs = dom.xpath("//div[@class='pageNumberOrig']") |
return pageinfo['pageNumberOrig'] |
if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"): |
|
if len(pagedivs)>0: |
|
docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0]) |
|
return docinfo['pageNumberOrig'] |
|
|
|
|
# WTF: is this needed? |
def getOrigPagesNorm(self, docinfo=None, pageinfo=None): |
def getOrigPagesNorm(self, docinfo=None, pageinfo=None): |
docpath = docinfo['textURLPath'] |
logging.debug("CALLED: getOrigPagesNorm!") |
pn =pageinfo['current'] |
if not pageinfo.has_key('pageNumberOrigNorm'): |
selfurl = self.absolute_url() |
logging.warning("getOrigPagesNorm: not in pageinfo!") |
pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn)) |
return None |
dom = Parse(pagexml) |
|
pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']") |
|
if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"): |
|
if len(pagedivs)>0: |
|
docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0]) |
|
return docinfo['pageNumberOrigNorm'] |
|
|
|
|
return pageinfo['pageNumberOrigNorm'] |
|
|
|
# TODO: should be getWordInfo |
def getTranslate(self, word=None, language=None): |
def getTranslate(self, word=None, language=None): |
"""translate into another languages""" |
"""translate into another languages""" |
data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&output=html") |
data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&output=html") |
#pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lex.xql","document=&language="+str(language)+"&query="+url_quote(str(query))) |
|
return data |
return data |
|
|
|
# WTF: what does this do? |
def getLemma(self, lemma=None, language=None): |
def getLemma(self, lemma=None, language=None): |
"""simular words lemma """ |
"""simular words lemma """ |
data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html") |
data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html") |
return data |
return data |
|
|
|
# WTF: what does this do? |
def getLemmaQuery(self, query=None, language=None): |
def getLemmaQuery(self, query=None, language=None): |
"""simular words lemma """ |
"""simular words lemma """ |
data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html") |
data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html") |
return data |
return data |
|
|
|
# WTF: what does this do? |
def getLex(self, query=None, language=None): |
def getLex(self, query=None, language=None): |
#simular words lemma |
#simular words lemma |
data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query)) |
data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query)) |
return data |
return data |
|
|
|
# WTF: what does this do? |
def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1): |
def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1): |
#number of |
#number of |
docpath = docinfo['textURLPath'] |
docpath = docinfo['textURLPath'] |
Line 493 class MpdlXmlTextServer(SimpleItem):
|
Line 449 class MpdlXmlTextServer(SimpleItem):
|
return tc |
return tc |
|
|
def getToc(self, mode="text", docinfo=None): |
def getToc(self, mode="text", docinfo=None): |
"""loads table of contents and stores in docinfo""" |
"""loads table of contents and stores XML in docinfo""" |
|
logging.debug("getToc mode=%s"%mode) |
if mode == "none": |
if mode == "none": |
return docinfo |
return docinfo |
|
|
if 'tocSize_%s'%mode in docinfo: |
if 'tocSize_%s'%mode in docinfo: |
# cached toc |
# cached toc |
return docinfo |
return docinfo |
Line 511 class MpdlXmlTextServer(SimpleItem):
|
Line 469 class MpdlXmlTextServer(SimpleItem):
|
# number of entries in toc |
# number of entries in toc |
tocSize = 0 |
tocSize = 0 |
tocDiv = None |
tocDiv = None |
|
# fetch full toc |
pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn)) |
pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn)) |
|
dom = ET.fromstring(pagexml) |
|
# page content is in <div class="queryResultPage"> |
|
pagediv = None |
|
# ElementTree 1.2 in Python 2.6 can't do div[@class='queryResultPage'] |
|
alldivs = dom.findall("div") |
|
for div in alldivs: |
|
dc = div.get('class') |
|
# page content div |
|
if dc == 'queryResultPage': |
|
pagediv = div |
|
|
|
elif dc == 'queryResultHits': |
|
docinfo['tocSize_%s'%mode] = getInt(div.text) |
|
|
|
if pagediv: |
|
# store XML in docinfo |
|
docinfo['tocXML_%s'%mode] = ET.tostring(pagediv, 'UTF-8') |
|
|
# post-processing downloaded xml |
|
pagedom = Parse(pagexml) |
|
# get number of entries |
|
numdivs = pagedom.xpath("//div[@class='queryResultHits']") |
|
if len(numdivs) > 0: |
|
tocSize = int(getTextFromNode(numdivs[0])) |
|
docinfo['tocSize_%s'%mode] = tocSize |
|
return docinfo |
return docinfo |
|
|
def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None): |
def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None): |
"""returns single page from the table of contents""" |
"""returns single page from the table of contents""" |
# TODO: this should use the cached TOC |
logging.debug("getTocPage mode=%s, pn=%s"%(mode,pn)) |
if mode == "text": |
if mode == "text": |
queryType = "toc" |
queryType = "toc" |
else: |
else: |
queryType = mode |
queryType = mode |
docpath = docinfo['textURLPath'] |
|
path = docinfo['textURLPath'] |
# check for cached TOC |
pagesize = pageinfo['tocPageSize'] |
if not docinfo.has_key('tocXML_%s'%mode): |
pn = pageinfo['tocPN'] |
self.getToc(mode=mode, docinfo=docinfo) |
|
|
|
tocxml = docinfo.get('tocXML_%s'%mode, None) |
|
if not tocxml: |
|
logging.error("getTocPage: unable to find tocXML") |
|
return "No ToC" |
|
|
|
pagesize = int(pageinfo['tocPageSize']) |
url = docinfo['url'] |
url = docinfo['url'] |
selfurl = self.absolute_url() |
urlmode = docinfo['mode'] |
|
selfurl = docinfo['viewerUrl'] |
viewMode= pageinfo['viewMode'] |
viewMode= pageinfo['viewMode'] |
characterNormalization = pageinfo ['characterNormalization'] |
|
#optionToggle =pageinfo ['optionToggle'] |
|
tocMode = pageinfo['tocMode'] |
tocMode = pageinfo['tocMode'] |
tocPN = pageinfo['tocPN'] |
tocPN = int(pageinfo['tocPN']) |
|
pn = tocPN |
|
|
|
fulltoc = ET.fromstring(tocxml) |
|
|
|
if fulltoc: |
|
# paginate |
|
start = (pn - 1) * pagesize * 2 |
|
len = pagesize * 2 |
|
del fulltoc[:start] |
|
del fulltoc[len:] |
|
tocdivs = fulltoc |
|
|
|
# check all a-tags |
|
links = tocdivs.findall(".//a") |
|
for l in links: |
|
href = l.get('href') |
|
if href: |
|
# take pn from href |
|
m = re.match(r'page-fragment\.xql.*pn=(\d+)', href) |
|
if m is not None: |
|
# and create new url |
|
l.set('href', '%s?mode=%s&url=%s&viewMode=%s&pn=%s&tocMode=%s&tocPN=%s'%(selfurl, urlmode, url, viewMode, m.group(1), tocMode, tocPN)) |
|
else: |
|
logging.warning("getTocPage: Problem with link=%s"%href) |
|
|
|
return serialize(tocdivs) |
|
|
data = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s&characterNormalization=regPlusNorm"%(docpath,queryType, pagesize, pn)) |
|
page = data.replace('page-fragment.xql?document=%s'%str(path),'%s?url=%s&viewMode=%s&tocMode=%s&tocPN=%s'%(selfurl,url, viewMode, tocMode, tocPN)) |
|
text = page.replace('mode=image','mode=texttool') |
|
return text |
|
|
|
def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None): |
def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None): |
#def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None): |
|
"""change settings""" |
"""change settings""" |
self.title=title |
self.title=title |
self.timeout = timeout |
self.timeout = timeout |
Line 569 def manage_addMpdlXmlTextServer(self,id,
|
Line 564 def manage_addMpdlXmlTextServer(self,id,
|
self.Destination()._setObject(id, newObj) |
self.Destination()._setObject(id, newObj) |
if RESPONSE is not None: |
if RESPONSE is not None: |
RESPONSE.redirect('manage_main') |
RESPONSE.redirect('manage_main') |
|
|
|
|
|
|
|
|