from OFS.SimpleItem import SimpleItem from Products.PageTemplates.PageTemplateFile import PageTemplateFile from Ft.Xml import EMPTY_NAMESPACE, Parse from Ft.Xml.Domlette import NonvalidatingReader import Ft.Xml.Domlette import cStringIO import xml.etree.ElementTree as ET import re import logging import urllib from SrvTxtUtils import getInt, getText, getHttpData def serialize(node): """returns a string containing an XML snippet of node""" s = ET.tostring(node, 'UTF-8') # snip off XML declaration if s.startswith('') return s[i+3:] return s def getTextFromNode(node): """get the cdata content of a node""" if node is None: return "" # 4Suite: nodelist=node.childNodes text = "" for n in nodelist: if n.nodeType == node.TEXT_NODE: text = text + n.data return text def serializeNode(node, encoding="utf-8"): """returns a string containing node as XML""" #s = ET.tostring(node) # 4Suite: stream = cStringIO.StringIO() Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding) s = stream.getvalue() stream.close() return s class MpdlXmlTextServer(SimpleItem): """TextServer implementation for MPDL-XML eXist server""" meta_type="MPDL-XML TextServer" manage_options=( {'label':'Config','action':'manage_changeMpdlXmlTextServerForm'}, )+SimpleItem.manage_options manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals()) def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40): """constructor""" self.id=id self.title=title self.timeout = timeout if serverName is None: self.serverUrl = serverUrl else: self.serverUrl = "http://%s/mpdl/interface/"%serverName def getHttpData(self, url, data=None): """returns result from url+data HTTP request""" return getHttpData(url,data,timeout=self.timeout) def getServerData(self, method, data=None): """returns result from text server for method+data""" url = self.serverUrl+method return getHttpData(url,data,timeout=self.timeout) # WTF: what does this really do? can it be integrated in getPage? def getSearch(self, pageinfo=None, docinfo=None): """get search list""" logging.debug("getSearch()") docpath = docinfo['textURLPath'] url = docinfo['url'] pagesize = pageinfo['queryPageSize'] pn = pageinfo.get('searchPN',1) sn = pageinfo['sn'] highlightQuery = pageinfo['highlightQuery'] query =pageinfo['query'] queryType =pageinfo['queryType'] viewMode= pageinfo['viewMode'] tocMode = pageinfo['tocMode'] characterNormalization = pageinfo['characterNormalization'] #optionToggle = pageinfo['optionToggle'] tocPN = pageinfo['tocPN'] selfurl = self.absolute_url() data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization, urllib.quote(highlightQuery))) pagexml = data.replace('?document=%s'%str(docpath),'?url=%s'%url) pagedom = Parse(pagexml) """ pagedivs = pagedom.xpath("//div[@class='queryResultHits']") if (pagedivs == pagedom.xpath("//div[@class='queryResultHits']")): if len(pagedivs)>0: docinfo['queryResultHits'] = int(getTextFromNode(pagedivs[0])) s = getTextFromNode(pagedivs[0]) s1 = int(s)/10+1 try: docinfo['queryResultHits'] = int(s1) logging.debug("SEARCH ENTRIES: %s"%(s1)) except: docinfo['queryResultHits'] = 0 """ if (queryType=="fulltext")or(queryType=="xpath")or(queryType=="xquery")or(queryType=="fulltextMorphLemma"): pagedivs = pagedom.xpath("//div[@class='queryResultPage']") if len(pagedivs)>0: pagenode=pagedivs[0] links=pagenode.xpath("//a") for l in links: hrefNode = l.getAttributeNodeNS(None, u"href") if hrefNode: href = hrefNode.nodeValue if href.startswith('page-fragment.xql'): selfurl = self.absolute_url() pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN, characterNormalization)) hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl) #logging.debug("PUREXML :%s"%(serializeNode(pagenode))) return serializeNode(pagenode) if (queryType=="fulltextMorph"): pagedivs = pagedom.xpath("//div[@class='queryResult']") if len(pagedivs)>0: pagenode=pagedivs[0] links=pagenode.xpath("//a") for l in links: hrefNode = l.getAttributeNodeNS(None, u"href") if hrefNode: href = hrefNode.nodeValue if href.startswith('page-fragment.xql'): selfurl = self.absolute_url() pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN,characterNormalization)) hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl) if href.startswith('../lt/lemma.xql'): hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_query'%(selfurl)) l.setAttributeNS(None, 'target', '_blank') l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;") l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();') pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']") return serializeNode(pagenode) if (queryType=="ftIndex")or(queryType=="ftIndexMorph"): pagedivs= pagedom.xpath("//div[@class='queryResultPage']") if len(pagedivs)>0: pagenode=pagedivs[0] links=pagenode.xpath("//a") for l in links: hrefNode = l.getAttributeNodeNS(None, u"href") if hrefNode: href = hrefNode.nodeValue hrefNode.nodeValue=href.replace('mode=text','mode=texttool&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s&characterNormalization=%s'%(viewMode,tocMode,tocPN,pn,characterNormalization)) if href.startswith('../lt/lex.xql'): hrefNode.nodeValue = href.replace('../lt/lex.xql','%s/template/head_main_lex'%selfurl) l.setAttributeNS(None, 'target', '_blank') l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;") l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();') if href.startswith('../lt/lemma.xql'): hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%(selfurl)) l.setAttributeNS(None, 'target', '_blank') l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;") l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();') return serializeNode(pagenode) return "no text here" def getGisPlaces(self, docinfo=None, pageinfo=None): """ Show all Gis Places of whole Page""" xpath='//place' docpath = docinfo.get('textURLPath',None) if not docpath: return None pn = pageinfo['current'] hrefList=[] myList= "" text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn)) dom = ET.fromstring(text) result = dom.findall(".//result/resultPage/place") for l in result: href = l.get("id") hrefList.append(href) # WTF: what does this do? myList = ",".join(hrefList) #logging.debug("getGisPlaces :%s"%(myList)) return myList def getAllGisPlaces (self, docinfo=None, pageinfo=None): """Show all Gis Places of whole Book """ xpath ='//echo:place' hrefList=[] myList="" text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath)) dom = ET.fromstring(text) result = dom.findall(".//result/resultPage/place") for l in result: href = l.get("id") hrefList.append(href) # WTF: what does this do? myList = ",".join(hrefList) #logging.debug("getALLGisPlaces :%s"%(myList)) return myList def processPageInfo(self, dom, docinfo, pageinfo): """processes page info divs from dom and stores in docinfo and pageinfo""" # assume first second level div is pageMeta alldivs = dom.find("div") if alldivs is None or alldivs.get('class', '') != 'pageMeta': logging.error("processPageInfo: pageMeta div not found!") return for div in alldivs: dc = div.get('class') # pageNumberOrig if dc == 'pageNumberOrig': pageinfo['pageNumberOrig'] = div.text # pageNumberOrigNorm elif dc == 'pageNumberOrigNorm': pageinfo['pageNumberOrigNorm'] = div.text # pageHeaderTitle elif dc == 'pageHeaderTitle': pageinfo['pageHeaderTitle'] = div.text # numFigureEntries elif dc == 'countFigureEntries': docinfo['numFigureEntries'] = getInt(div.text) # numTocEntries elif dc == 'countTocEntries': # WTF: s1 = int(s)/30+1 docinfo['numTocEntries'] = getInt(div.text) # numPlaces elif dc == 'countPlaces': docinfo['numPlaces'] = getInt(div.text) # numTextPages elif dc == 'countPages': np = getInt(div.text) if np > 0: docinfo['numTextPages'] = np if docinfo.get('numPages', 0) == 0: # seems to be text-only - update page count docinfo['numPages'] = np #pageinfo['end'] = min(pageinfo['end'], np) pageinfo['numgroups'] = int(np / pageinfo['groupsize']) if np % pageinfo['groupsize'] > 0: pageinfo['numgroups'] += 1 #logging.debug("processPageInfo: pageinfo=%s"%repr(pageinfo)) return def getTextPage(self, mode="text", pn=1, docinfo=None, pageinfo=None): """returns single page from fulltext""" logging.debug("getTextPage mode=%s, pn=%s"%(mode,pn)) # check for cached text -- but this shouldn't be called twice if pageinfo.has_key('textPage'): logging.debug("getTextPage: using cached text") return pageinfo['textPage'] docpath = docinfo['textURLPath'] # just checking if pageinfo['current'] != pn: logging.warning("getTextPage: current!=pn!") # stuff for constructing full urls url = docinfo['url'] urlmode = docinfo['mode'] sn = pageinfo.get('sn', None) highlightQuery = pageinfo.get('highlightQuery', None) tocMode = pageinfo.get('tocMode', None) tocPN = pageinfo.get('tocPN',None) characterNormalization = pageinfo.get('characterNormalization', None) selfurl = docinfo['viewerUrl'] if mode == "dict" or mode == "text_dict": # dict is called textPollux in the backend textmode = "textPollux" elif not mode: # default is text mode = "text" textmode = "text" else: textmode = mode textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s"%(docpath,textmode,pn,characterNormalization) if highlightQuery: textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn) # fetch the page pagexml = self.getServerData("page-fragment.xql",textParam) dom = ET.fromstring(pagexml) # extract additional info self.processPageInfo(dom, docinfo, pageinfo) # page content is in
pagediv = None # ElementTree 1.2 in Python 2.6 can't do div[@class='pageContent'] # so we look at the second level divs alldivs = dom.findall("div") for div in alldivs: dc = div.get('class') # page content div if dc == 'pageContent': pagediv = div break # plain text mode if mode == "text": if pagediv is not None: links = pagediv.findall(".//a") for l in links: href = l.get('href') if href and href.startswith('#note-'): href = href.replace('#note-',"?mode=%s&url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn)) l.set('href', href) return serialize(pagediv) # text-with-links mode elif mode == "dict": if pagediv is not None: # check all a-tags links = pagediv.findall(".//a") for l in links: href = l.get('href') if href: # is link with href if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql'): # is pollux link selfurl = self.absolute_url() # change href l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/head_main_voc'%selfurl)) # add target l.set('target', '_blank') if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql'): selfurl = self.absolute_url() l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl)) l.set('target', '_blank') l.set('onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;") l.set('ondblclick', 'popupWin.focus();') if href.startswith('#note-'): l.set('href', href.replace('#note-',"?mode=%s&url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(urlmode,url,tocMode,tocPN,pn))) return serialize(pagediv) # xml mode elif mode == "xml": if pagediv is not None: return serialize(pagediv) # pureXml mode elif mode == "pureXml": if pagediv is not None: return serialize(pagediv) # gis mode elif mode == "gis": name = docinfo['name'] if pagediv is not None: # check all a-tags links = pagediv.findall(".//a") for l in links: href = l.get('href') if href: if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'): l.set('href', href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name)) l.set('target', '_blank') return serialize(pagediv) return "no text here" # WTF: is this needed? def getOrigPages(self, docinfo=None, pageinfo=None): logging.debug("CALLED: getOrigPages!") if not pageinfo.has_key('pageNumberOrig'): logging.warning("getOrigPages: not in pageinfo!") return None return pageinfo['pageNumberOrig'] # WTF: is this needed? def getOrigPagesNorm(self, docinfo=None, pageinfo=None): logging.debug("CALLED: getOrigPagesNorm!") if not pageinfo.has_key('pageNumberOrigNorm'): logging.warning("getOrigPagesNorm: not in pageinfo!") return None return pageinfo['pageNumberOrigNorm'] # TODO: should be getWordInfo def getTranslate(self, word=None, language=None): """translate into another languages""" data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&output=html") return data # WTF: what does this do? def getLemma(self, lemma=None, language=None): """simular words lemma """ data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html") return data # WTF: what does this do? def getLemmaQuery(self, query=None, language=None): """simular words lemma """ data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html") return data # WTF: what does this do? def getLex(self, query=None, language=None): #simular words lemma data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query)) return data # WTF: what does this do? def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1): #number of docpath = docinfo['textURLPath'] pagesize = pageinfo['queryPageSize'] pn = pageinfo['searchPN'] query =pageinfo['query'] queryType =pageinfo['queryType'] tocSearch = 0 tocDiv = None pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn)) pagedom = Parse(pagexml) numdivs = pagedom.xpath("//div[@class='queryResultHits']") tocSearch = int(getTextFromNode(numdivs[0])) tc=int((tocSearch/10)+1) return tc def getToc(self, mode="text", docinfo=None): """loads table of contents and stores XML in docinfo""" logging.debug("getToc mode=%s"%mode) if mode == "none": return docinfo if 'tocSize_%s'%mode in docinfo: # cached toc return docinfo docpath = docinfo['textURLPath'] # we need to set a result set size pagesize = 1000 pn = 1 if mode == "text": queryType = "toc" else: queryType = mode # number of entries in toc tocSize = 0 tocDiv = None # fetch full toc pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn)) dom = ET.fromstring(pagexml) # page content is in
pagediv = None # ElementTree 1.2 in Python 2.6 can't do div[@class='queryResultPage'] alldivs = dom.findall("div") for div in alldivs: dc = div.get('class') # page content div if dc == 'queryResultPage': pagediv = div elif dc == 'queryResultHits': docinfo['tocSize_%s'%mode] = getInt(div.text) if pagediv: # store XML in docinfo docinfo['tocXML_%s'%mode] = ET.tostring(pagediv, 'UTF-8') return docinfo def getTocPage(self, mode="text", pn=0, pageinfo=None, docinfo=None): """returns single page from the table of contents""" logging.debug("getTocPage mode=%s, pn=%s"%(mode,pn)) if mode == "text": queryType = "toc" else: queryType = mode # check for cached TOC if not docinfo.has_key('tocXML_%s'%mode): self.getToc(mode=mode, docinfo=docinfo) tocxml = docinfo.get('tocXML_%s'%mode, None) if not tocxml: logging.error("getTocPage: unable to find tocXML") return "No ToC" pagesize = pageinfo['tocPageSize'] tocPN = pageinfo['tocPN'] if not pn: pn = tocPN fulltoc = ET.fromstring(tocxml) if fulltoc: # paginate start = (pn - 1) * pagesize * 2 len = pagesize * 2 del fulltoc[:start] del fulltoc[len:] tocdivs = fulltoc # check all a-tags links = tocdivs.findall(".//a") for l in links: href = l.get('href') if href: # take pn from href m = re.match(r'page-fragment\.xql.*pn=(\d+)', href) if m is not None: # and create new url (assuming parent is documentViewer) url = self.getLink('pn', m.group(1)) l.set('href', url) else: logging.warning("getTocPage: Problem with link=%s"%href) return serialize(tocdivs) def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None): """change settings""" self.title=title self.timeout = timeout self.serverUrl = serverUrl if RESPONSE is not None: RESPONSE.redirect('manage_main') # management methods def manage_addMpdlXmlTextServerForm(self): """Form for adding""" pt = PageTemplateFile("zpt/manage_addMpdlXmlTextServer", globals()).__of__(self) return pt() def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None): #def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None): """add zogiimage""" newObj = MpdlXmlTextServer(id,title,serverUrl,timeout) self.Destination()._setObject(id, newObj) if RESPONSE is not None: RESPONSE.redirect('manage_main')