version 1.238.2.5, 2011/07/29 18:36:04
|
version 1.245, 2012/03/20 14:09:17
|
Line 1
|
Line 1
|
|
|
from OFS.SimpleItem import SimpleItem |
from OFS.SimpleItem import SimpleItem |
from Products.PageTemplates.PageTemplateFile import PageTemplateFile |
from Products.PageTemplates.PageTemplateFile import PageTemplateFile |
|
|
from Ft.Xml import EMPTY_NAMESPACE, Parse |
from Ft.Xml import EMPTY_NAMESPACE, Parse |
from Ft.Xml.Domlette import NonvalidatingReader |
from Ft.Xml.Domlette import NonvalidatingReader |
import Ft.Xml.Domlette |
|
import cStringIO |
|
|
|
import xml.etree.ElementTree as ET |
|
|
|
import re |
import md5 |
|
import sys |
import logging |
import logging |
import urllib |
import urllib |
|
import documentViewer |
from SrvTxtUtils import getInt, getText, getHttpData |
from documentViewer import getTextFromNode, serializeNode |
|
|
def serialize(node): |
|
"""returns a string containing an XML snippet of node""" |
|
s = ET.tostring(node, 'UTF-8') |
|
# snip off XML declaration |
|
if s.startswith('<?xml'): |
|
i = s.find('?>') |
|
return s[i+3:] |
|
|
|
return s |
|
|
|
|
|
def getTextFromNode(node): |
|
"""get the cdata content of a node""" |
|
if node is None: |
|
return "" |
|
# ET: |
|
# text = node.text or "" |
|
# for e in node: |
|
# text += gettext(e) |
|
# if e.tail: |
|
# text += e.tail |
|
|
|
# 4Suite: |
|
nodelist=node.childNodes |
|
text = "" |
|
for n in nodelist: |
|
if n.nodeType == node.TEXT_NODE: |
|
text = text + n.data |
|
|
|
return text |
|
|
|
def serializeNode(node, encoding="utf-8"): |
|
"""returns a string containing node as XML""" |
|
#s = ET.tostring(node) |
|
|
|
# 4Suite: |
|
stream = cStringIO.StringIO() |
|
Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding) |
|
s = stream.getvalue() |
|
stream.close() |
|
|
|
return s |
|
|
|
|
|
class MpdlXmlTextServer(SimpleItem): |
class MpdlXmlTextServer(SimpleItem): |
"""TextServer implementation for MPDL-XML eXist server""" |
"""TextServer implementation for MPDL-XML eXist server""" |
Line 69 class MpdlXmlTextServer(SimpleItem):
|
Line 21 class MpdlXmlTextServer(SimpleItem):
|
|
|
manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals()) |
manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals()) |
|
|
def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40): |
def __init__(self,id,title="",serverUrl="http://mpdl-test.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40): |
|
#def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/", serverName=None, timeout=40): |
|
|
"""constructor""" |
"""constructor""" |
self.id=id |
self.id=id |
self.title=title |
self.title=title |
Line 81 class MpdlXmlTextServer(SimpleItem):
|
Line 35 class MpdlXmlTextServer(SimpleItem):
|
|
|
def getHttpData(self, url, data=None): |
def getHttpData(self, url, data=None): |
"""returns result from url+data HTTP request""" |
"""returns result from url+data HTTP request""" |
return getHttpData(url,data,timeout=self.timeout) |
return documentViewer.getHttpData(url,data,timeout=self.timeout) |
|
|
def getServerData(self, method, data=None): |
def getServerData(self, method, data=None): |
"""returns result from text server for method+data""" |
"""returns result from text server for method+data""" |
url = self.serverUrl+method |
url = self.serverUrl+method |
return getHttpData(url,data,timeout=self.timeout) |
return documentViewer.getHttpData(url,data,timeout=self.timeout) |
|
|
# WTF: what does this really do? can it be integrated in getPage? |
|
def getSearch(self, pageinfo=None, docinfo=None): |
def getSearch(self, pageinfo=None, docinfo=None): |
"""get search list""" |
"""get search list""" |
logging.debug("getSearch()") |
|
docpath = docinfo['textURLPath'] |
docpath = docinfo['textURLPath'] |
url = docinfo['url'] |
url = docinfo['url'] |
pagesize = pageinfo['queryPageSize'] |
pagesize = pageinfo['queryPageSize'] |
pn = pageinfo.get('searchPN',1) |
pn = pageinfo.get('searchPN',1) |
sn = pageinfo['sn'] |
#sn = pageinfo['sn'] |
|
s = pageinfo['s'] |
|
highlightElementPos =pageinfo ['highlightElementPos'] |
|
highlightElement = pageinfo ['highlightElement'] |
|
|
highlightQuery = pageinfo['highlightQuery'] |
highlightQuery = pageinfo['highlightQuery'] |
query =pageinfo['query'] |
query =pageinfo['query'] |
queryType =pageinfo['queryType'] |
queryType =pageinfo['queryType'] |
Line 106 class MpdlXmlTextServer(SimpleItem):
|
Line 62 class MpdlXmlTextServer(SimpleItem):
|
#optionToggle = pageinfo['optionToggle'] |
#optionToggle = pageinfo['optionToggle'] |
tocPN = pageinfo['tocPN'] |
tocPN = pageinfo['tocPN'] |
selfurl = self.absolute_url() |
selfurl = self.absolute_url() |
data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization, urllib.quote(highlightQuery))) |
data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&s=%s&viewMode=%s&characterNormalization=%s&highlightElementPos=%s&highlightElement=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, s, viewMode,characterNormalization, highlightElementPos, highlightElement, urllib.quote(highlightQuery))) |
|
#data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization, urllib.quote(highlightQuery))) |
pagexml = data.replace('?document=%s'%str(docpath),'?url=%s'%url) |
pagexml = data.replace('?document=%s'%str(docpath),'?url=%s'%url) |
pagedom = Parse(pagexml) |
pagedom = Parse(pagexml) |
|
|
Line 154 class MpdlXmlTextServer(SimpleItem):
|
Line 111 class MpdlXmlTextServer(SimpleItem):
|
if href.startswith('../lt/lemma.xql'): |
if href.startswith('../lt/lemma.xql'): |
hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_query'%(selfurl)) |
hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_query'%(selfurl)) |
l.setAttributeNS(None, 'target', '_blank') |
l.setAttributeNS(None, 'target', '_blank') |
l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;") |
l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=300,height=400,top=180, left=400, scrollbars=1'); return false;") |
l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();') |
l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();') |
pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']") |
pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']") |
return serializeNode(pagenode) |
return serializeNode(pagenode) |
Line 176 class MpdlXmlTextServer(SimpleItem):
|
Line 133 class MpdlXmlTextServer(SimpleItem):
|
if href.startswith('../lt/lemma.xql'): |
if href.startswith('../lt/lemma.xql'): |
hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%(selfurl)) |
hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%(selfurl)) |
l.setAttributeNS(None, 'target', '_blank') |
l.setAttributeNS(None, 'target', '_blank') |
l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;") |
l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=300,height=400,top=180, left=400, scrollbars=1'); return false;") |
l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();') |
l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();') |
return serializeNode(pagenode) |
return serializeNode(pagenode) |
return "no text here" |
return "no text here" |
Line 194 class MpdlXmlTextServer(SimpleItem):
|
Line 151 class MpdlXmlTextServer(SimpleItem):
|
hrefList=[] |
hrefList=[] |
myList= "" |
myList= "" |
text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn)) |
text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn)) |
dom = ET.fromstring(text) |
dom = Parse(text) |
result = dom.findall(".//result/resultPage/place") |
result = dom.xpath("//result/resultPage/place") |
for l in result: |
for l in result: |
href = l.get("id") |
hrefNode= l.getAttributeNodeNS(None, u"id") |
|
href= hrefNode.nodeValue |
hrefList.append(href) |
hrefList.append(href) |
# WTF: what does this do? |
|
myList = ",".join(hrefList) |
myList = ",".join(hrefList) |
#logging.debug("getGisPlaces :%s"%(myList)) |
#logging.debug("getGisPlaces :%s"%(myList)) |
return myList |
return myList |
Line 214 class MpdlXmlTextServer(SimpleItem):
|
Line 171 class MpdlXmlTextServer(SimpleItem):
|
hrefList=[] |
hrefList=[] |
myList="" |
myList="" |
text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath)) |
text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath)) |
dom = ET.fromstring(text) |
dom =Parse(text) |
result = dom.findall(".//result/resultPage/place") |
result = dom.xpath("//result/resultPage/place") |
|
|
for l in result: |
for l in result: |
href = l.get("id") |
hrefNode = l.getAttributeNodeNS(None, u"id") |
|
href= hrefNode.nodeValue |
hrefList.append(href) |
hrefList.append(href) |
# WTF: what does this do? |
|
myList = ",".join(hrefList) |
myList = ",".join(hrefList) |
#logging.debug("getALLGisPlaces :%s"%(myList)) |
#logging.debug("getALLGisPlaces :%s"%(myList)) |
return myList |
return myList |
|
|
def processPageInfo(self, dom, docinfo, pageinfo): |
|
"""processes page info divs from dom and stores in docinfo and pageinfo""" |
|
# process all toplevel divs |
|
alldivs = dom.findall(".//div") |
|
pagediv = None |
|
for div in alldivs: |
|
dc = div.get('class') |
|
|
|
# page content div |
|
if dc == 'pageContent': |
|
pagediv = div |
|
|
|
# pageNumberOrig |
|
elif dc == 'pageNumberOrig': |
|
pageinfo['pageNumberOrig'] = div.text |
|
|
|
# pageNumberOrigNorm |
|
elif dc == 'pageNumberOrigNorm': |
|
pageinfo['pageNumberOrigNorm'] = div.text |
|
|
|
# pageNumberOrigNorm |
|
elif dc == 'countFigureEntries': |
|
docinfo['countFigureEntries'] = getInt(div.text) |
|
|
|
# pageNumberOrigNorm |
|
elif dc == 'countTocEntries': |
|
# WTF: s1 = int(s)/30+1 |
|
docinfo['countTocEntries'] = getInt(div.text) |
|
|
|
# numTextPages |
|
elif dc == 'countPages': |
|
np = getInt(div.text) |
|
if np > 0: |
|
docinfo['numTextPages'] = np |
|
if docinfo.get('numPages', 0) == 0: |
|
# seems to be text-only - update page count |
|
docinfo['numPages'] = np |
|
pageinfo['end'] = min(pageinfo['end'], np) |
|
pageinfo['numgroups'] = int(np / pageinfo['groupsize']) |
|
if np % pageinfo['groupsize'] > 0: |
|
pageinfo['numgroups'] += 1 |
|
|
|
return |
|
|
|
|
|
def getTextPage(self, mode="text_dict", pn=1, docinfo=None, pageinfo=None): |
def getTextPage(self, mode="text_dict", pn=1, docinfo=None, pageinfo=None): |
"""returns single page from fulltext""" |
"""returns single page from fulltext""" |
logging.debug("getTextPage mode=%s, pn=%s"%(mode,pn)) |
|
# check for cached text -- but this shouldn't be called twice |
|
if pageinfo.has_key('textPage'): |
|
logging.debug("getTextPage: using cached text") |
|
return pageinfo['textPage'] |
|
|
|
docpath = docinfo['textURLPath'] |
docpath = docinfo['textURLPath'] |
# just checking |
path = docinfo['textURLPath'] |
if pageinfo['current'] != pn: |
url = docinfo.get('url',None) |
logging.warning("getTextPage: current!=pn!") |
name = docinfo.get('name',None) |
|
pn =pageinfo['current'] |
# stuff for constructing full urls |
#sn = pageinfo['sn'] |
url = docinfo['url'] |
s = pageinfo['s'] |
urlmode = docinfo['mode'] |
highlightElementPos =pageinfo ['highlightElementPos'] |
sn = pageinfo.get('sn', None) |
highlightElement = pageinfo ['highlightElement'] |
highlightQuery = pageinfo.get('highlightQuery', None) |
#optionToggle =pageinfo ['optionToggle'] |
tocMode = pageinfo.get('tocMode', None) |
highlightQuery = pageinfo['highlightQuery'] |
tocPN = pageinfo.get('tocPN',None) |
#mode = pageinfo ['viewMode'] |
characterNormalization = pageinfo.get('characterNormalization', None) |
tocMode = pageinfo['tocMode'] |
selfurl = docinfo['viewerUrl'] |
xpointer = pageinfo['xpointer'] |
|
characterNormalization=pageinfo['characterNormalization'] |
|
tocPN = pageinfo['tocPN'] |
|
selfurl = self.absolute_url() |
|
|
if mode == "text_dict": |
if mode == "text_dict": |
# text_dict is called textPollux in the backend |
|
textmode = "textPollux" |
textmode = "textPollux" |
else: |
else: |
textmode = mode |
textmode = mode |
|
|
textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s"%(docpath,textmode,pn,characterNormalization) |
textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s&xpointer=%s"%(docpath,textmode,pn,characterNormalization, xpointer) |
if highlightQuery: |
#textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s&xpointer=%s&options=withIdentifier"%(docpath,textmode,pn,characterNormalization, xpointer) |
textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn) |
if highlightQuery is not None: |
|
#textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn) |
|
textParam +="&highlightQuery=%s&s=%s&highlightElement=%s&highlightElementPos=%s"%(urllib.quote(highlightQuery),s, highlightElement, highlightElementPos) |
|
|
# fetch the page |
|
pagexml = self.getServerData("page-fragment.xql",textParam) |
pagexml = self.getServerData("page-fragment.xql",textParam) |
dom = ET.fromstring(pagexml) |
dom = Parse(pagexml) |
# extract additional info |
#dom = NonvalidatingReader.parseStream(pagexml) |
self.processPageInfo(dom, docinfo, pageinfo) |
|
# page content is in <div class="pageContent"> |
#original Pages |
pagediv = None |
pagedivs = dom.xpath("//div[@class='pageNumberOrig']") |
# ElementTree 1.2 in Python 2.6 can't do div[@class='pageContent'] |
|
alldivs = dom.findall(".//div") |
"""if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"): |
for div in alldivs: |
if len(pagedivs)>0: |
dc = div.get('class') |
docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0]) |
# page content div |
logging.debug("ORIGINAL PAGE: %s"%(docinfo['pageNumberOrig'])) |
if dc == 'pageContent': |
|
pagediv = div |
#original Pages Norm |
break |
pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']") |
|
if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"): |
|
if len(pagedivs)>0: |
|
docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0]) |
|
logging.debug("ORIGINAL PAGE NORM: %s"%(docinfo['pageNumberOrigNorm'])) |
|
""" |
|
#figureEntries |
|
pagedivs = dom.xpath("//div[@class='countFigureEntries']") |
|
if pagedivs == dom.xpath("//div[@class='countFigureEntries']"): |
|
if len(pagedivs)>0: |
|
docinfo['countFigureEntries'] = getTextFromNode(pagedivs[0]) |
|
s = getTextFromNode(pagedivs[0]) |
|
if s=='0': |
|
try: |
|
docinfo['countFigureEntries'] = int(s) |
|
except: |
|
docinfo['countFigureEntries'] = 0 |
|
else: |
|
s1 = int(s)/30+1 |
|
try: |
|
docinfo['countFigureEntries'] = int(s1) |
|
except: |
|
docinfo['countFigureEntries'] = 0 |
|
|
|
#allPlaces |
|
pagedivs = dom.xpath("//div[@class='countPlaces']") |
|
if pagedivs == dom.xpath("//div[@class='countPlaces']"): |
|
if len(pagedivs)>0: |
|
docinfo['countPlaces']= getTextFromNode(pagedivs[0]) |
|
s = getTextFromNode(pagedivs[0]) |
|
try: |
|
docinfo['countPlaces'] = int(s) |
|
except: |
|
docinfo['countPlaces'] = 0 |
|
|
|
#tocEntries |
|
pagedivs = dom.xpath("//div[@class='countTocEntries']") |
|
if pagedivs == dom.xpath("//div[@class='countTocEntries']"): |
|
if len(pagedivs)>0: |
|
docinfo['countTocEntries'] = int(getTextFromNode(pagedivs[0])) |
|
s = getTextFromNode(pagedivs[0]) |
|
if s=='0': |
|
try: |
|
docinfo['countTocEntries'] = int(s) |
|
except: |
|
docinfo['countTocEntries'] = 0 |
|
else: |
|
s1 = int(s)/30+1 |
|
try: |
|
docinfo['countTocEntries'] = int(s1) |
|
except: |
|
docinfo['countTocEntries'] = 0 |
|
|
|
#numTextPages |
|
pagedivs = dom.xpath("//div[@class='countPages']") |
|
if pagedivs == dom.xpath("//div[@class='countPages']"): |
|
if len(pagedivs)>0: |
|
docinfo['numPages'] = getTextFromNode(pagedivs[0]) |
|
s = getTextFromNode(pagedivs[0]) |
|
|
|
try: |
|
docinfo['numPages'] = int(s) |
|
#logging.debug("PAGE NUMBER: %s"%(s)) |
|
|
|
np = docinfo['numPages'] |
|
pageinfo['end'] = min(pageinfo['end'], np) |
|
pageinfo['numgroups'] = int(np / pageinfo['groupsize']) |
|
if np % pageinfo['groupsize'] > 0: |
|
pageinfo['numgroups'] += 1 |
|
except: |
|
docinfo['numPages'] = 0 |
|
|
|
else: |
|
#no full text -- init to 0 |
|
docinfo['pageNumberOrig'] = 0 |
|
docinfo['countFigureEntries'] = 0 |
|
docinfo['countPlaces'] = 0 |
|
docinfo['countTocEntries'] = 0 |
|
docinfo['numPages'] = 0 |
|
docinfo['pageNumberOrigNorm'] = 0 |
|
#return docinfo |
|
|
# plain text mode |
# plain text mode |
if mode == "text": |
if mode == "text": |
if pagediv: |
# first div contains text |
links = pagediv.findall(".//a") |
pagedivs = dom.xpath("/div") |
|
if len(pagedivs) > 0: |
|
pagenode = pagedivs[0] |
|
links = pagenode.xpath("//a") |
for l in links: |
for l in links: |
href = l.get('href') |
hrefNode = l.getAttributeNodeNS(None, u"href") |
if href and href.startswith('#note-'): |
if hrefNode: |
href = href.replace('#note-',"?mode=%s&url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn)) |
href= hrefNode.nodeValue |
l.set('href', href) |
if href.startswith('#note-'): |
|
hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn)) |
return serialize(pagediv) |
#if href.startswith(): |
|
return serializeNode(pagenode) |
|
if mode == "xml": |
|
# first div contains text |
|
pagedivs = dom.xpath("/div") |
|
if len(pagedivs) > 0: |
|
pagenode = pagedivs[0] |
|
return serializeNode(pagenode) |
|
if mode == "gis": |
|
# first div contains text |
|
pagedivs = dom.xpath("/div") |
|
if len(pagedivs) > 0: |
|
pagenode = pagedivs[0] |
|
links =pagenode.xpath("//a") |
|
for l in links: |
|
hrefNode =l.getAttributeNodeNS(None, u"href") |
|
if hrefNode: |
|
href=hrefNode.nodeValue |
|
if href.startswith('http://mappit.mpiwg-berlin.mpg.de'): |
|
hrefNode.nodeValue =href.replace('db/REST/db/chgis/mpdl','db/RESTdb/db/mpdl/%s'%name) |
|
l.setAttributeNS(None, 'target', '_blank') |
|
return serializeNode(pagenode) |
|
|
|
if mode == "pureXml": |
|
# first div contains text |
|
pagedivs = dom.xpath("/div") |
|
if len(pagedivs) > 0: |
|
pagenode = pagedivs[0] |
|
return serializeNode(pagenode) |
# text-with-links mode |
# text-with-links mode |
elif mode == "text_dict": |
if mode == "text_dict": |
if pagediv: |
# first div contains text |
|
#mode = pageinfo ['viewMode'] |
|
pagedivs = dom.xpath("/div") |
|
if len(pagedivs) > 0: |
|
pagenode = pagedivs[0] |
# check all a-tags |
# check all a-tags |
links = pagediv.findall(".//a") |
links = pagenode.xpath("//a") |
|
|
for l in links: |
for l in links: |
href = l.get('href') |
hrefNode = l.getAttributeNodeNS(None, u"href") |
|
|
if href: |
if hrefNode: |
# is link with href |
# is link with href |
if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql'): |
href = hrefNode.nodeValue |
|
if href.startswith('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql'): |
# is pollux link |
# is pollux link |
selfurl = self.absolute_url() |
selfurl = self.absolute_url() |
# change href |
# change href |
l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/head_main_voc'%selfurl)) |
hrefNode.nodeValue = href.replace('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/head_main_voc'%selfurl) |
# add target |
# add target |
l.set('target', '_blank') |
l.setAttributeNS(None, 'target', '_blank') |
|
#l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;") |
|
#l.setAttributeNS(None, "ondblclick", "popupWin.focus();") |
|
#window.open("this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=yes, scrollbars=1'"); return false;") |
|
|
if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql'): |
if href.startswith('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql'): |
selfurl = self.absolute_url() |
selfurl = self.absolute_url() |
l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl)) |
hrefNode.nodeValue = href.replace('http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl) |
l.set('target', '_blank') |
l.setAttributeNS(None, 'target', '_blank') |
l.set('onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;") |
l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=300,height=400,top=180, left=700, toolbar=no, scrollbars=1'); return false;") |
l.set('ondblclick', 'popupWin.focus();') |
l.setAttributeNS(None, 'ondblclick', 'popupWin.focus();') |
|
|
if href.startswith('#note-'): |
if href.startswith('#note-'): |
l.set('href', href.replace('#note-',"?mode=%s&url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn))) |
hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn)) |
|
|
return serialize(pagediv) |
|
|
|
# xml mode |
|
elif mode == "xml": |
|
if pagediv: |
|
return serialize(pagediv) |
|
|
|
# pureXml mode |
|
elif mode == "pureXml": |
|
if pagediv: |
|
return serialize(pagediv) |
|
|
|
# gis mode |
|
elif mode == "gis": |
|
name = docinfo['name'] |
|
if pagediv: |
|
# check all a-tags |
|
links = pagediv.findall(".//a") |
|
for l in links: |
|
href = l.get('href') |
|
if href: |
|
if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'): |
|
l.set('href', href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name)) |
|
l.set('target', '_blank') |
|
|
|
return serialize(pagediv) |
|
|
|
|
return serializeNode(pagenode) |
return "no text here" |
return "no text here" |
|
|
# WTF: is this needed? |
|
def getOrigPages(self, docinfo=None, pageinfo=None): |
def getOrigPages(self, docinfo=None, pageinfo=None): |
logging.debug("CALLED: getOrigPages!") |
docpath = docinfo['textURLPath'] |
if not pageinfo.has_key('pageNumberOrig'): |
pn =pageinfo['current'] |
logging.warning("getOrigPages: not in pageinfo!") |
selfurl = self.absolute_url() |
return None |
pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn)) |
|
dom = Parse(pagexml) |
return pageinfo['pageNumberOrig'] |
pagedivs = dom.xpath("//div[@class='pageNumberOrig']") |
|
if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"): |
|
if len(pagedivs)>0: |
|
docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0]) |
|
return docinfo['pageNumberOrig'] |
|
|
# WTF: is this needed? |
|
def getOrigPagesNorm(self, docinfo=None, pageinfo=None): |
def getOrigPagesNorm(self, docinfo=None, pageinfo=None): |
logging.debug("CALLED: getOrigPagesNorm!") |
docpath = docinfo['textURLPath'] |
if not pageinfo.has_key('pageNumberOrigNorm'): |
pn =pageinfo['current'] |
logging.warning("getOrigPagesNorm: not in pageinfo!") |
selfurl = self.absolute_url() |
return None |
pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn)) |
|
dom = Parse(pagexml) |
|
pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']") |
|
if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"): |
|
if len(pagedivs)>0: |
|
docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0]) |
|
return docinfo['pageNumberOrigNorm'] |
|
|
return pageinfo['pageNumberOrigNorm'] |
|
|
|
# TODO: should be getWordInfo |
def getTranslate(self, word=None, language=None, display=None): |
def getTranslate(self, word=None, language=None): |
|
"""translate into another languages""" |
"""translate into another languages""" |
data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&output=html") |
data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&display="+urllib.quote(display)+"&output=html") |
|
#pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lex.xql","document=&language="+str(language)+"&query="+url_quote(str(query))) |
return data |
return data |
|
|
# WTF: what does this do? |
|
def getLemma(self, lemma=None, language=None): |
def getLemma(self, lemma=None, language=None): |
"""simular words lemma """ |
"""simular words lemma """ |
data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html") |
data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html") |
return data |
return data |
|
|
# WTF: what does this do? |
|
def getLemmaQuery(self, query=None, language=None): |
def getLemmaQuery(self, query=None, language=None): |
"""simular words lemma """ |
"""simular words lemma """ |
data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html") |
data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html") |
return data |
return data |
|
|
# WTF: what does this do? |
|
def getLex(self, query=None, language=None): |
def getLex(self, query=None, language=None): |
#simular words lemma |
#simular words lemma |
data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query)) |
data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query)) |
return data |
return data |
|
|
# WTF: what does this do? |
|
def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1): |
def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1): |
#number of |
#number of |
docpath = docinfo['textURLPath'] |
docpath = docinfo['textURLPath'] |
Line 449 class MpdlXmlTextServer(SimpleItem):
|
Line 455 class MpdlXmlTextServer(SimpleItem):
|
return tc |
return tc |
|
|
def getToc(self, mode="text", docinfo=None): |
def getToc(self, mode="text", docinfo=None): |
"""loads table of contents and stores XML in docinfo""" |
"""loads table of contents and stores in docinfo""" |
logging.debug("getToc mode=%s"%mode) |
|
if mode == "none": |
if mode == "none": |
return docinfo |
return docinfo |
|
|
if 'tocSize_%s'%mode in docinfo: |
if 'tocSize_%s'%mode in docinfo: |
# cached toc |
# cached toc |
return docinfo |
return docinfo |
Line 469 class MpdlXmlTextServer(SimpleItem):
|
Line 473 class MpdlXmlTextServer(SimpleItem):
|
# number of entries in toc |
# number of entries in toc |
tocSize = 0 |
tocSize = 0 |
tocDiv = None |
tocDiv = None |
# fetch full toc |
|
pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn)) |
pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn)) |
dom = ET.fromstring(pagexml) |
|
# page content is in <div class="queryResultPage"> |
|
pagediv = None |
|
# ElementTree 1.2 in Python 2.6 can't do div[@class='queryResultPage'] |
|
alldivs = dom.findall("div") |
|
for div in alldivs: |
|
dc = div.get('class') |
|
# page content div |
|
if dc == 'queryResultPage': |
|
pagediv = div |
|
|
|
elif dc == 'queryResultHits': |
|
docinfo['tocSize_%s'%mode] = getInt(div.text) |
|
|
|
if pagediv: |
|
# store XML in docinfo |
|
docinfo['tocXML_%s'%mode] = ET.tostring(pagediv, 'UTF-8') |
|
|
|
|
# post-processing downloaded xml |
|
pagedom = Parse(pagexml) |
|
# get number of entries |
|
numdivs = pagedom.xpath("//div[@class='queryResultHits']") |
|
if len(numdivs) > 0: |
|
tocSize = int(getTextFromNode(numdivs[0])) |
|
docinfo['tocSize_%s'%mode] = tocSize |
return docinfo |
return docinfo |
|
|
def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None): |
def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None): |
"""returns single page from the table of contents""" |
"""returns single page from the table of contents""" |
logging.debug("getTocPage mode=%s, pn=%s"%(mode,pn)) |
# TODO: this should use the cached TOC |
if mode == "text": |
if mode == "text": |
queryType = "toc" |
queryType = "toc" |
else: |
else: |
queryType = mode |
queryType = mode |
|
docpath = docinfo['textURLPath'] |
# check for cached TOC |
path = docinfo['textURLPath'] |
if not docinfo.has_key('tocXML_%s'%mode): |
pagesize = pageinfo['tocPageSize'] |
self.getToc(mode=mode, docinfo=docinfo) |
pn = pageinfo['tocPN'] |
|
|
tocxml = docinfo.get('tocXML_%s'%mode, None) |
|
if not tocxml: |
|
logging.error("getTocPage: unable to find tocXML") |
|
return "No ToC" |
|
|
|
pagesize = int(pageinfo['tocPageSize']) |
|
url = docinfo['url'] |
url = docinfo['url'] |
urlmode = docinfo['mode'] |
selfurl = self.absolute_url() |
selfurl = docinfo['viewerUrl'] |
|
viewMode= pageinfo['viewMode'] |
viewMode= pageinfo['viewMode'] |
|
characterNormalization = pageinfo ['characterNormalization'] |
|
#optionToggle =pageinfo ['optionToggle'] |
tocMode = pageinfo['tocMode'] |
tocMode = pageinfo['tocMode'] |
tocPN = int(pageinfo['tocPN']) |
tocPN = pageinfo['tocPN'] |
pn = tocPN |
|
|
|
fulltoc = ET.fromstring(tocxml) |
|
|
|
if fulltoc: |
|
# paginate |
|
start = (pn - 1) * pagesize * 2 |
|
len = pagesize * 2 |
|
del fulltoc[:start] |
|
del fulltoc[len:] |
|
tocdivs = fulltoc |
|
|
|
# check all a-tags |
|
links = tocdivs.findall(".//a") |
|
for l in links: |
|
href = l.get('href') |
|
if href: |
|
# take pn from href |
|
m = re.match(r'page-fragment\.xql.*pn=(\d+)', href) |
|
if m is not None: |
|
# and create new url |
|
l.set('href', '%s?mode=%s&url=%s&viewMode=%s&pn=%s&tocMode=%s&tocPN=%s'%(selfurl, urlmode, url, viewMode, m.group(1), tocMode, tocPN)) |
|
else: |
|
logging.warning("getTocPage: Problem with link=%s"%href) |
|
|
|
return serialize(tocdivs) |
|
|
|
|
data = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s&characterNormalization=regPlusNorm"%(docpath,queryType, pagesize, pn)) |
|
page = data.replace('page-fragment.xql?document=%s'%str(path),'%s?url=%s&viewMode=%s&tocMode=%s&tocPN=%s'%(selfurl,url, viewMode, tocMode, tocPN)) |
|
text = page.replace('mode=image','mode=texttool') |
|
return text |
|
|
def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None): |
def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None): |
|
#def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None): |
"""change settings""" |
"""change settings""" |
self.title=title |
self.title=title |
self.timeout = timeout |
self.timeout = timeout |
Line 564 def manage_addMpdlXmlTextServer(self,id,
|
Line 531 def manage_addMpdlXmlTextServer(self,id,
|
self.Destination()._setObject(id, newObj) |
self.Destination()._setObject(id, newObj) |
if RESPONSE is not None: |
if RESPONSE is not None: |
RESPONSE.redirect('manage_main') |
RESPONSE.redirect('manage_main') |
|
|
|
|
|
|