--- documentViewer/Attic/MpdlXmlTextServer_old.py 2011/07/15 19:34:41 1.1 +++ documentViewer/Attic/MpdlXmlTextServer_old.py 2011/07/15 19:34:41 1.1.2.1 @@ -0,0 +1,520 @@ + +from OFS.SimpleItem import SimpleItem +from Products.PageTemplates.PageTemplateFile import PageTemplateFile +from Ft.Xml import EMPTY_NAMESPACE, Parse +from Ft.Xml.Domlette import NonvalidatingReader + +import md5 +import sys +import logging +import urllib +import documentViewer +from documentViewer import getTextFromNode, serializeNode + +class MpdlXmlTextServer(SimpleItem): + """TextServer implementation for MPDL-XML eXist server""" + meta_type="MPDL-XML TextServer" + + manage_options=( + {'label':'Config','action':'manage_changeMpdlXmlTextServerForm'}, + )+SimpleItem.manage_options + + manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals()) + + def __init__(self,id,title="",serverUrl="http://mpdl-system.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40): + #def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/", serverName=None, timeout=40): + + """constructor""" + self.id=id + self.title=title + self.timeout = timeout + if serverName is None: + self.serverUrl = serverUrl + else: + self.serverUrl = "http://%s/mpdl/interface/"%serverName + + def getHttpData(self, url, data=None): + """returns result from url+data HTTP request""" + return documentViewer.getHttpData(url,data,timeout=self.timeout) + + def getServerData(self, method, data=None): + """returns result from text server for method+data""" + url = self.serverUrl+method + return documentViewer.getHttpData(url,data,timeout=self.timeout) + + def getSearch(self, pageinfo=None, docinfo=None): + """get search list""" + docpath = docinfo['textURLPath'] + url = docinfo['url'] + pagesize = pageinfo['queryPageSize'] + pn = pageinfo.get('searchPN',1) + sn = pageinfo['sn'] + highlightQuery = pageinfo['highlightQuery'] + query =pageinfo['query'] + queryType =pageinfo['queryType'] + viewMode= pageinfo['viewMode'] + tocMode = pageinfo['tocMode'] + characterNormalization = pageinfo['characterNormalization'] + #optionToggle = pageinfo['optionToggle'] + tocPN = pageinfo['tocPN'] + selfurl = self.absolute_url() + data = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&characterNormalization=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, sn, viewMode,characterNormalization, urllib.quote(highlightQuery))) + pagexml = data.replace('?document=%s'%str(docpath),'?url=%s'%url) + pagedom = Parse(pagexml) + + """ + pagedivs = pagedom.xpath("//div[@class='queryResultHits']") + if (pagedivs == pagedom.xpath("//div[@class='queryResultHits']")): + if len(pagedivs)>0: + docinfo['queryResultHits'] = int(getTextFromNode(pagedivs[0])) + s = getTextFromNode(pagedivs[0]) + s1 = int(s)/10+1 + try: + docinfo['queryResultHits'] = int(s1) + logging.debug("SEARCH ENTRIES: %s"%(s1)) + except: + docinfo['queryResultHits'] = 0 + """ + if (queryType=="fulltext")or(queryType=="xpath")or(queryType=="xquery")or(queryType=="fulltextMorphLemma"): + pagedivs = pagedom.xpath("//div[@class='queryResultPage']") + if len(pagedivs)>0: + pagenode=pagedivs[0] + links=pagenode.xpath("//a") + for l in links: + hrefNode = l.getAttributeNodeNS(None, u"href") + if hrefNode: + href = hrefNode.nodeValue + if href.startswith('page-fragment.xql'): + selfurl = self.absolute_url() + pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN, characterNormalization)) + hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl) + #logging.debug("PUREXML :%s"%(serializeNode(pagenode))) + return serializeNode(pagenode) + if (queryType=="fulltextMorph"): + pagedivs = pagedom.xpath("//div[@class='queryResult']") + if len(pagedivs)>0: + pagenode=pagedivs[0] + links=pagenode.xpath("//a") + for l in links: + hrefNode = l.getAttributeNodeNS(None, u"href") + if hrefNode: + href = hrefNode.nodeValue + if href.startswith('page-fragment.xql'): + selfurl = self.absolute_url() + pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s&characterNormalization=%s'%(viewMode,queryType,urllib.quote(query),pagesize,pn,tocMode,pn,tocPN,characterNormalization)) + hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl) + if href.startswith('../lt/lemma.xql'): + hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_query'%(selfurl)) + l.setAttributeNS(None, 'target', '_blank') + l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;") + l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();') + pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']") + return serializeNode(pagenode) + if (queryType=="ftIndex")or(queryType=="ftIndexMorph"): + pagedivs= pagedom.xpath("//div[@class='queryResultPage']") + if len(pagedivs)>0: + pagenode=pagedivs[0] + links=pagenode.xpath("//a") + for l in links: + hrefNode = l.getAttributeNodeNS(None, u"href") + if hrefNode: + href = hrefNode.nodeValue + hrefNode.nodeValue=href.replace('mode=text','mode=texttool&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s&characterNormalization=%s'%(viewMode,tocMode,tocPN,pn,characterNormalization)) + if href.startswith('../lt/lex.xql'): + hrefNode.nodeValue = href.replace('../lt/lex.xql','%s/template/head_main_lex'%selfurl) + l.setAttributeNS(None, 'target', '_blank') + l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;") + l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();') + if href.startswith('../lt/lemma.xql'): + hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%(selfurl)) + l.setAttributeNS(None, 'target', '_blank') + l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;") + l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();') + return serializeNode(pagenode) + return "no text here" + + def getGisPlaces(self, docinfo=None, pageinfo=None): + """ Show all Gis Places of whole Page""" + xpath='//place' + docpath = docinfo.get('textURLPath',None) + if not docpath: + return None + + url = docinfo['url'] + selfurl = self.absolute_url() + pn = pageinfo['current'] + hrefList=[] + myList= "" + text=self.getServerData("xpath.xql", "document=%s&xpath=%s&pn=%s"%(docinfo['textURLPath'],xpath,pn)) + dom = Parse(text) + result = dom.xpath("//result/resultPage/place") + for l in result: + hrefNode= l.getAttributeNodeNS(None, u"id") + href= hrefNode.nodeValue + hrefList.append(href) + myList = ",".join(hrefList) + #logging.debug("getGisPlaces :%s"%(myList)) + return myList + + def getAllGisPlaces (self, docinfo=None, pageinfo=None): + """Show all Gis Places of whole Book """ + xpath ='//echo:place' + docpath =docinfo['textURLPath'] + url = docinfo['url'] + selfurl =self.absolute_url() + pn =pageinfo['current'] + hrefList=[] + myList="" + text=self.getServerData("xpath.xql", "document=%s&xpath=%s"%(docinfo['textURLPath'],xpath)) + dom =Parse(text) + result = dom.xpath("//result/resultPage/place") + + for l in result: + hrefNode = l.getAttributeNodeNS(None, u"id") + href= hrefNode.nodeValue + hrefList.append(href) + myList = ",".join(hrefList) + #logging.debug("getALLGisPlaces :%s"%(myList)) + return myList + + + def getTextPage(self, mode="text_dict", pn=1, docinfo=None, pageinfo=None): + """returns single page from fulltext""" + docpath = docinfo['textURLPath'] + path = docinfo['textURLPath'] + url = docinfo.get('url',None) + name = docinfo.get('name',None) + pn =pageinfo['current'] + sn = pageinfo['sn'] + #optionToggle =pageinfo ['optionToggle'] + highlightQuery = pageinfo['highlightQuery'] + #mode = pageinfo ['viewMode'] + tocMode = pageinfo['tocMode'] + characterNormalization=pageinfo['characterNormalization'] + tocPN = pageinfo['tocPN'] + selfurl = self.absolute_url() + if mode == "text_dict": + textmode = "textPollux" + else: + textmode = mode + + textParam = "document=%s&mode=%s&pn=%s&characterNormalization=%s"%(docpath,textmode,pn,characterNormalization) + if highlightQuery is not None: + textParam +="&highlightQuery=%s&sn=%s"%(urllib.quote(highlightQuery),sn) + + pagexml = self.getServerData("page-fragment.xql",textParam) + dom = Parse(pagexml) + #dom = NonvalidatingReader.parseStream(pagexml) + + #original Pages + pagedivs = dom.xpath("//div[@class='pageNumberOrig']") + + """if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"): + if len(pagedivs)>0: + docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0]) + logging.debug("ORIGINAL PAGE: %s"%(docinfo['pageNumberOrig'])) + + #original Pages Norm + pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']") + if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"): + if len(pagedivs)>0: + docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0]) + logging.debug("ORIGINAL PAGE NORM: %s"%(docinfo['pageNumberOrigNorm'])) + """ + #figureEntries + pagedivs = dom.xpath("//div[@class='countFigureEntries']") + if pagedivs == dom.xpath("//div[@class='countFigureEntries']"): + if len(pagedivs)>0: + docinfo['countFigureEntries'] = getTextFromNode(pagedivs[0]) + s = getTextFromNode(pagedivs[0]) + if s=='0': + try: + docinfo['countFigureEntries'] = int(s) + except: + docinfo['countFigureEntries'] = 0 + else: + s1 = int(s)/30+1 + try: + docinfo['countFigureEntries'] = int(s1) + except: + docinfo['countFigureEntries'] = 0 + + #allPlaces + pagedivs = dom.xpath("//div[@class='countPlaces']") + if pagedivs == dom.xpath("//div[@class='countPlaces']"): + if len(pagedivs)>0: + docinfo['countPlaces']= getTextFromNode(pagedivs[0]) + s = getTextFromNode(pagedivs[0]) + try: + docinfo['countPlaces'] = int(s) + except: + docinfo['countPlaces'] = 0 + + #tocEntries + pagedivs = dom.xpath("//div[@class='countTocEntries']") + if pagedivs == dom.xpath("//div[@class='countTocEntries']"): + if len(pagedivs)>0: + docinfo['countTocEntries'] = int(getTextFromNode(pagedivs[0])) + s = getTextFromNode(pagedivs[0]) + if s=='0': + try: + docinfo['countTocEntries'] = int(s) + except: + docinfo['countTocEntries'] = 0 + else: + s1 = int(s)/30+1 + try: + docinfo['countTocEntries'] = int(s1) + except: + docinfo['countTocEntries'] = 0 + + #numTextPages + pagedivs = dom.xpath("//div[@class='countPages']") + if pagedivs == dom.xpath("//div[@class='countPages']"): + if len(pagedivs)>0: + docinfo['numPages'] = getTextFromNode(pagedivs[0]) + s = getTextFromNode(pagedivs[0]) + + try: + docinfo['numPages'] = int(s) + #logging.debug("PAGE NUMBER: %s"%(s)) + + np = docinfo['numPages'] + pageinfo['end'] = min(pageinfo['end'], np) + pageinfo['numgroups'] = int(np / pageinfo['groupsize']) + if np % pageinfo['groupsize'] > 0: + pageinfo['numgroups'] += 1 + except: + docinfo['numPages'] = 0 + + else: + #no full text -- init to 0 + docinfo['pageNumberOrig'] = 0 + docinfo['countFigureEntries'] = 0 + docinfo['countPlaces'] = 0 + docinfo['countTocEntries'] = 0 + docinfo['numPages'] = 0 + docinfo['pageNumberOrigNorm'] = 0 + #return docinfo + + # plain text mode + if mode == "text": + # first div contains text + pagedivs = dom.xpath("/div") + if len(pagedivs) > 0: + pagenode = pagedivs[0] + links = pagenode.xpath("//a") + for l in links: + hrefNode = l.getAttributeNodeNS(None, u"href") + if hrefNode: + href= hrefNode.nodeValue + if href.startswith('#note-'): + hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn)) + return serializeNode(pagenode) + if mode == "xml": + # first div contains text + pagedivs = dom.xpath("/div") + if len(pagedivs) > 0: + pagenode = pagedivs[0] + return serializeNode(pagenode) + if mode == "gis": + # first div contains text + pagedivs = dom.xpath("/div") + if len(pagedivs) > 0: + pagenode = pagedivs[0] + links =pagenode.xpath("//a") + for l in links: + hrefNode =l.getAttributeNodeNS(None, u"href") + if hrefNode: + href=hrefNode.nodeValue + if href.startswith('http://chinagis.mpiwg-berlin.mpg.de'): + hrefNode.nodeValue =href.replace('chinagis_REST/REST/db/chgis/mpdl','chinagis/REST/db/mpdl/%s'%name) + l.setAttributeNS(None, 'target', '_blank') + return serializeNode(pagenode) + + if mode == "pureXml": + # first div contains text + pagedivs = dom.xpath("/div") + if len(pagedivs) > 0: + pagenode = pagedivs[0] + return serializeNode(pagenode) + # text-with-links mode + if mode == "text_dict": + # first div contains text + #mode = pageinfo ['viewMode'] + pagedivs = dom.xpath("/div") + if len(pagedivs) > 0: + pagenode = pagedivs[0] + # check all a-tags + links = pagenode.xpath("//a") + + for l in links: + hrefNode = l.getAttributeNodeNS(None, u"href") + + if hrefNode: + # is link with href + href = hrefNode.nodeValue + if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql'): + # is pollux link + selfurl = self.absolute_url() + # change href + hrefNode.nodeValue = href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/head_main_voc'%selfurl) + # add target + l.setAttributeNS(None, 'target', '_blank') + #l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;") + #l.setAttributeNS(None, "ondblclick", "popupWin.focus();") + #window.open("this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=yes, scrollbars=1'"); return false;") + + if href.startswith('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql'): + selfurl = self.absolute_url() + hrefNode.nodeValue = href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/lt/lemma.xql','%s/head_main_lemma'%selfurl) + l.setAttributeNS(None, 'target', '_blank') + l.setAttributeNS(None, 'onclick',"popupWin = window.open(this.href, 'InfoWindow', 'menubar=no, location,width=500,height=600,top=180, left=700, toolbar=no, scrollbars=1'); return false;") + l.setAttributeNS(None, 'ondblclick', 'popupWin.focus();') + + if href.startswith('#note-'): + hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=text_dict&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,tocMode,tocPN,pn)) + + return serializeNode(pagenode) + return "no text here" + + def getOrigPages(self, docinfo=None, pageinfo=None): + docpath = docinfo['textURLPath'] + pn =pageinfo['current'] + selfurl = self.absolute_url() + pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn)) + dom = Parse(pagexml) + pagedivs = dom.xpath("//div[@class='pageNumberOrig']") + if pagedivs == dom.xpath("//div[@class='pageNumberOrig']"): + if len(pagedivs)>0: + docinfo['pageNumberOrig']= getTextFromNode(pagedivs[0]) + return docinfo['pageNumberOrig'] + + def getOrigPagesNorm(self, docinfo=None, pageinfo=None): + docpath = docinfo['textURLPath'] + pn =pageinfo['current'] + selfurl = self.absolute_url() + pagexml = self.getServerData("page-fragment.xql","document=%s&pn=%s"%(docpath, pn)) + dom = Parse(pagexml) + pagedivs = dom.xpath("//div[@class='pageNumberOrigNorm']") + if pagedivs == dom.xpath("//div[@class='pageNumberOrigNorm']"): + if len(pagedivs)>0: + docinfo['pageNumberOrigNorm']= getTextFromNode(pagedivs[0]) + return docinfo['pageNumberOrigNorm'] + + + def getTranslate(self, word=None, language=None): + """translate into another languages""" + data = self.getServerData("lt/wordInfo.xql","language="+str(language)+"&word="+urllib.quote(word)+"&output=html") + #pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lex.xql","document=&language="+str(language)+"&query="+url_quote(str(query))) + return data + + def getLemma(self, lemma=None, language=None): + """simular words lemma """ + data = self.getServerData("lt/lemma.xql","language="+str(language)+"&lemma="+urllib.quote(lemma)+"&output=html") + return data + + def getLemmaQuery(self, query=None, language=None): + """simular words lemma """ + data = self.getServerData("lt/lemma.xql","language="+str(language)+"&query="+urllib.quote(query)+"&output=html") + return data + + def getLex(self, query=None, language=None): + #simular words lemma + data = self.getServerData("lt/lex.xql","document=&language="+str(language)+"&query="+urllib.quote(query)) + return data + + def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1): + #number of + docpath = docinfo['textURLPath'] + pagesize = pageinfo['queryPageSize'] + pn = pageinfo['searchPN'] + query =pageinfo['query'] + queryType =pageinfo['queryType'] + tocSearch = 0 + tocDiv = None + + pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn)) + pagedom = Parse(pagexml) + numdivs = pagedom.xpath("//div[@class='queryResultHits']") + tocSearch = int(getTextFromNode(numdivs[0])) + tc=int((tocSearch/10)+1) + return tc + + def getToc(self, mode="text", docinfo=None): + """loads table of contents and stores in docinfo""" + if mode == "none": + return docinfo + if 'tocSize_%s'%mode in docinfo: + # cached toc + return docinfo + + docpath = docinfo['textURLPath'] + # we need to set a result set size + pagesize = 1000 + pn = 1 + if mode == "text": + queryType = "toc" + else: + queryType = mode + # number of entries in toc + tocSize = 0 + tocDiv = None + + pagexml = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn)) + + # post-processing downloaded xml + pagedom = Parse(pagexml) + # get number of entries + numdivs = pagedom.xpath("//div[@class='queryResultHits']") + if len(numdivs) > 0: + tocSize = int(getTextFromNode(numdivs[0])) + docinfo['tocSize_%s'%mode] = tocSize + return docinfo + + def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None): + """returns single page from the table of contents""" + # TODO: this should use the cached TOC + if mode == "text": + queryType = "toc" + else: + queryType = mode + docpath = docinfo['textURLPath'] + path = docinfo['textURLPath'] + pagesize = pageinfo['tocPageSize'] + pn = pageinfo['tocPN'] + url = docinfo['url'] + selfurl = self.absolute_url() + viewMode= pageinfo['viewMode'] + characterNormalization = pageinfo ['characterNormalization'] + #optionToggle =pageinfo ['optionToggle'] + tocMode = pageinfo['tocMode'] + tocPN = pageinfo['tocPN'] + + data = self.getServerData("doc-query.xql","document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s&characterNormalization=regPlusNorm"%(docpath,queryType, pagesize, pn)) + page = data.replace('page-fragment.xql?document=%s'%str(path),'%s?url=%s&viewMode=%s&tocMode=%s&tocPN=%s'%(selfurl,url, viewMode, tocMode, tocPN)) + text = page.replace('mode=image','mode=texttool') + return text + + def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None): + #def manage_changeMpdlXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None): + """change settings""" + self.title=title + self.timeout = timeout + self.serverUrl = serverUrl + if RESPONSE is not None: + RESPONSE.redirect('manage_main') + +# management methods +def manage_addMpdlXmlTextServerForm(self): + """Form for adding""" + pt = PageTemplateFile("zpt/manage_addMpdlXmlTextServer", globals()).__of__(self) + return pt() + +def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None): +#def manage_addMpdlXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None): + """add zogiimage""" + newObj = MpdlXmlTextServer(id,title,serverUrl,timeout) + self.Destination()._setObject(id, newObj) + if RESPONSE is not None: + RESPONSE.redirect('manage_main') \ No newline at end of file