from OFS.SimpleItem import SimpleItem from Products.PageTemplates.PageTemplateFile import PageTemplateFile import xml.etree.ElementTree as ET import re import logging import urllib import urlparse import base64 from SrvTxtUtils import getInt, getText, getHttpData def serialize(node): """returns a string containing an XML snippet of node""" s = ET.tostring(node, 'UTF-8') # snip off XML declaration if s.startswith('') return s[i+3:] return s class MpiwgXmlTextServer(SimpleItem): """TextServer implementation for MPIWG-XML server""" meta_type="MPIWG-XML TextServer" manage_options=( {'label':'Config','action':'manage_changeMpiwgXmlTextServerForm'}, )+SimpleItem.manage_options manage_changeMpiwgXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpiwgXmlTextServer", globals()) def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpiwg-mpdl-cms-web/", timeout=40, serverName=None, repositoryType='production'): """constructor""" self.id=id self.title=title self.timeout = timeout self.repositoryType = repositoryType if serverName is None: self.serverUrl = serverUrl else: self.serverUrl = "http://%s/mpiwg-mpdl-cms-web/"%serverName def getHttpData(self, url, data=None): """returns result from url+data HTTP request""" return getHttpData(url,data,timeout=self.timeout) def getServerData(self, method, data=None): """returns result from text server for method+data""" url = self.serverUrl+method return getHttpData(url,data,timeout=self.timeout) def getRepositoryType(self): """returns the repository type, e.g. 'production'""" return self.repositoryType def getTextDownloadUrl(self, type='xml', docinfo=None): """returns a URL to download the current text""" docpath = docinfo.get('textURLPath', None) if not docpath: return None docpath = docpath.replace('.xml','.'+type) url = '%sdoc/GetDocument?id=%s'%(self.serverUrl.replace('interface/',''), docpath) return url def getPlacesOnPage(self, docinfo=None, pn=None): """Returns list of GIS places of page pn""" #FIXME! docpath = docinfo.get('textURLPath',None) if not docpath: return None places=[] text=self.getServerData("xpath.xql", "document=%s&xpath=//place&pn=%s"%(docpath,pn)) dom = ET.fromstring(text) result = dom.findall(".//resultPage/place") for l in result: id = l.get("id") name = l.text place = {'id': id, 'name': name} places.append(place) return places def getTextInfo(self, mode=None, docinfo=None): """reads document info, including page concordance, from text server""" logging.debug("getTextInfo mode=%s"%mode) field = '' if mode in ['pages', 'toc', 'figures']: # translate mode to field param field = '&field=%s'%mode else: mode = None # check cached info if mode: # cached toc-request? if 'full_%s'%mode in docinfo: return docinfo else: # cached but no toc-request? if 'numTextPages' in docinfo: return docinfo docpath = docinfo.get('textURLPath', None) if docpath is None: logging.error("getTextInfo: no textURLPath!") return docinfo # fetch docinfo pagexml = self.getServerData("query/GetDocInfo","docId=%s%s"%(docpath,field)) dom = ET.fromstring(pagexml) # all info in tag doc = dom if doc is None: logging.error("getTextInfo: unable to find document-tag!") else: if mode is None: # get general info from system-tag cp = doc.find('system/countPages') if cp is not None: docinfo['numTextPages'] = getInt(cp.text) else: # result is in list-tag l = doc.find('list') if l is not None: lt = l.get('type') # pageNumbers if lt == 'pages': # contains tags with page numbers # # n=scan number, o=original page no, on=normalized original page no # pageNumbers is a dict indexed by scan number pages = {} for i in l: page = {} pn = getInt(i.get('n')) page['pn'] = pn no = getInt(i.get('o')) page['no'] = no non = getInt(i.get('o-norm')) page['non'] = non if pn > 0: pages[pn] = page docinfo['pageNumbers'] = pages logging.debug("got pageNumbers=%s"%repr(pages)) # toc elif name == 'toc': # contains tags with table of contents/figures # 133Chapter I1.1 tocs = [] for te in tag: toc = {} for t in te: if t.tag == 'page': toc['pn'] = getInt(t.text) elif t.tag == 'level': toc['level'] = t.text elif t.tag == 'content': toc['content'] = t.text elif t.tag == 'level-string': toc['level-string'] = t.text elif t.tag == 'real-level': toc['real-level'] = t.text tocs.append(toc) # save as full_toc/full_figures docinfo['full_%s'%mode] = tocs return docinfo def processPageInfo(self, dom, docinfo, pageinfo): """processes page info divs from dom and stores in docinfo and pageinfo""" # assume first second level div is pageMeta alldivs = dom.find("div") if alldivs is None or alldivs.get('class', '') != 'pageMeta': logging.error("processPageInfo: pageMeta div not found!") return for div in alldivs: dc = div.get('class') # pageNumberOrig if dc == 'pageNumberOrig': pageinfo['pageNumberOrig'] = div.text # pageNumberOrigNorm elif dc == 'pageNumberOrigNorm': pageinfo['pageNumberOrigNorm'] = div.text # pageHeaderTitle elif dc == 'pageHeaderTitle': pageinfo['pageHeaderTitle'] = div.text #logging.debug("processPageInfo: pageinfo=%s"%repr(pageinfo)) return def getTextPage(self, mode="text", pn=1, docinfo=None, pageinfo=None): """returns single page from fulltext""" logging.debug("getTextPage mode=%s, pn=%s"%(mode,pn)) # check for cached text -- but ideally this shouldn't be called twice if pageinfo.has_key('textPage'): logging.debug("getTextPage: using cached text") return pageinfo['textPage'] docpath = docinfo.get('textURLPath', None) if not docpath: return None # just checking if pageinfo['current'] != pn: logging.warning("getTextPage: current!=pn!") # stuff for constructing full urls selfurl = docinfo['viewerUrl'] textParams = {'docId': docpath, 'page': pn} if 'characterNormalization' in pageinfo: cn = pageinfo['characterNormalization'] # TODO: change values in form if cn == 'regPlusNorm': cn = 'norm' textParams['normalization'] = cn if not mode: # default is dict mode = 'text' modes = mode.split(',') # check for multiple layers if len(modes) > 1: logging.debug("getTextPage: more than one mode=%s"%mode) # search mode if 'search' in modes: # add highlighting highlightQuery = pageinfo.get('highlightQuery', None) if highlightQuery: textParams['highlightQuery'] = highlightQuery textParams['highlightElem'] = pageinfo.get('highlightElement', '') textParams['highlightElemPos'] = pageinfo.get('highlightElementPos', '') # ignore mode in the following modes.remove('search') # pundit mode punditMode = False if 'pundit' in modes: punditMode = True # ignore mode in the following modes.remove('pundit') # other modes don't combine if 'dict' in modes: textmode = 'dict' textParams['mode'] = 'tokenized' textParams['outputFormat'] = 'html' elif 'xml' in modes: textmode = 'xml' textParams['mode'] = 'untokenized' textParams['outputFormat'] = 'xmlDisplay' textParams['normalization'] = 'orig' elif 'gis' in modes: #FIXME! textmode = 'gis' else: # text is default mode textmode = 'text' textParams['mode'] = 'untokenized' textParams['outputFormat'] = 'html' # fetch the page pagexml = self.getServerData("query/GetPage",urllib.urlencode(textParams)) try: dom = ET.fromstring(pagexml) except Exception, e: logging.error("Error parsing page: %s"%e) return None pagediv = None body = dom.find('.//body') if body is None: logging.error("getTextPage: no body!") return None # the text is in div@class=text pagediv = body.find(".//div[@class='text']") logging.debug("pagediv: %s"%repr(pagediv)) # plain text or text-with-links mode if textmode == "text" or textmode == "dict": if pagediv is not None: #self._processPbTag(pagediv, pageinfo) self._processFigures(pagediv, docinfo) #self._fixEmptyDivs(pagediv) # get full url assuming documentViewer is parent selfurl = self.getLink() # check all a-tags links = pagediv.findall('.//a') for l in links: href = l.get('href') if href: # is link with href linkurl = urlparse.urlparse(href) if linkurl.path.endswith('GetDictionaryEntries'): #TODO: replace wordInfo page # is dictionary link - change href (keeping parameters) #l.set('href', href.replace('http://mpdl-proto.mpiwg-berlin.mpg.de/mpdl/interface/lt/wordInfo.xql','%s/template/viewer_wordinfo'%viewerurl)) # add target to open new page l.set('target', '_blank') elif href.startswith('#note-'): # note link FIXME! l.set('href', href.replace('#note-',"%s#note-"%selfurl)) if punditMode: self._addPunditAttributes(pagediv, pageinfo, docinfo) return serialize(pagediv) # xml mode elif textmode == "xml": if pagediv is not None: return serialize(pagediv) # pureXml mode WTF? elif textmode == "pureXml": if pagediv is not None: return serialize(pagediv) # gis mode FIXME! elif textmode == "gis": if pagediv is not None: # fix empty div tags self._fixEmptyDivs(pagediv) # check all a-tags links = pagediv.findall(".//a") # add our URL as backlink selfurl = self.getLink() doc = base64.b64encode(selfurl) for l in links: href = l.get('href') if href: if href.startswith('http://mappit.mpiwg-berlin.mpg.de'): l.set('href', re.sub(r'doc=[\w+/=]+', 'doc=%s'%doc, href)) l.set('target', '_blank') return serialize(pagediv) return None def _processPbTag(self, pagediv, pageinfo): """extracts information from pb-tag and removes it from pagediv""" pbdiv = pagediv.find(".//span[@class='pb']") if pbdiv is None: logging.warning("getTextPage: no pb-span!") return pagediv # extract running head rh = pbdiv.find(".//span[@class='rhead']") if rh is not None: pageinfo['pageHeaderTitle'] = getText(rh) # remove pb-div from parent ppdiv = pagediv.find(".//span[@class='pb']/..") ppdiv.remove(pbdiv) return pagediv def _addPunditAttributes(self, pagediv, pageinfo, docinfo): """add about attributes for pundit annotation tool""" textid = docinfo.get('DRI', "fn=%s"%docinfo.get('documentPath', '???')) pn = pageinfo.get('pn', '1') # TODO: use pn as well? # check all div-tags divs = pagediv.findall(".//div") for d in divs: id = d.get('id') if id: # TODO: check path (cf RFC2396) d.set('about', "http://echo.mpiwg-berlin.mpg.de/%s/pn=%s/#%s"%(textid,pn,id)) cls = d.get('class','') cls += ' pundit-content' d.set('class', cls.strip()) return pagediv def _processFigures(self, pagediv, docinfo): """processes figure-tags""" divs = pagediv.findall(".//span[@class='figure']") scalerUrl = docinfo['digilibScalerUrl'] viewerUrl = docinfo['digilibViewerUrl'] for d in divs: try: a = d.find('a') img = a.find('img') imgsrc = img.get('src') imgurl = urlparse.urlparse(imgsrc) imgq = imgurl.query imgparams = urlparse.parse_qs(imgq) fn = imgparams.get('fn', None) if fn is not None: # parse_qs puts parameters in lists fn = fn[0] # TODO: check valid path # fix img@src newsrc = '%s?fn=%s&dw=200&dh=200'%(scalerUrl,fn) img.set('src', newsrc) # fix a@href newlink = '%s?fn=%s'%(viewerUrl,fn) a.set('href', newlink) a.set('target', '_blank') except: logging.warn("processFigures: strange figure!") def _fixEmptyDivs(self, pagediv): """fixes empty div-tags by inserting a space""" divs = pagediv.findall('.//div') for d in divs: if len(d) == 0 and not d.text: # make empty divs non-empty d.text = ' ' return pagediv def getSearchResults(self, mode, query=None, pageinfo=None, docinfo=None): """loads list of search results and stores XML in docinfo""" logging.debug("getSearchResults mode=%s query=%s"%(mode, query)) if mode == "none": return docinfo cachedQuery = docinfo.get('cachedQuery', None) if cachedQuery is not None: # cached search result if cachedQuery == '%s_%s'%(mode,query): # same query return docinfo else: # different query del docinfo['resultSize'] del docinfo['resultXML'] # cache query docinfo['cachedQuery'] = '%s_%s'%(mode,query) # fetch full results docpath = docinfo['textURLPath'] params = {'document': docpath, 'mode': 'text', 'queryType': mode, 'query': query, 'queryResultPageSize': 1000, 'queryResultPN': 1, 'characterNormalization': pageinfo.get('characterNormalization', 'reg')} pagexml = self.getServerData("doc-query.xql",urllib.urlencode(params)) #pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&s=%s&viewMode=%s&characterNormalization=%s&highlightElementPos=%s&highlightElement=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, s, viewMode,characterNormalization, highlightElementPos, highlightElement, urllib.quote(highlightQuery))) dom = ET.fromstring(pagexml) # page content is in
pagediv = None # ElementTree 1.2 in Python 2.6 can't do div[@class='queryResultPage'] alldivs = dom.findall("div") for div in alldivs: dc = div.get('class') # page content div if dc == 'queryResultPage': pagediv = div elif dc == 'queryResultHits': docinfo['resultSize'] = getInt(div.text) if pagediv is not None: # store XML in docinfo docinfo['resultXML'] = ET.tostring(pagediv, 'UTF-8') return docinfo def getResultsPage(self, mode="text", query=None, pn=None, start=None, size=None, pageinfo=None, docinfo=None): """returns single page from the table of contents""" logging.debug("getResultsPage mode=%s, pn=%s"%(mode,pn)) # get (cached) result self.getSearchResults(mode=mode, query=query, pageinfo=pageinfo, docinfo=docinfo) resultxml = docinfo.get('resultXML', None) if not resultxml: logging.error("getResultPage: unable to find resultXML") return "Error: no result!" if size is None: size = pageinfo.get('resultPageSize', 10) if start is None: start = (pn - 1) * size fullresult = ET.fromstring(resultxml) if fullresult is not None: # paginate first = start-1 len = size del fullresult[:first] del fullresult[len:] tocdivs = fullresult # check all a-tags links = tocdivs.findall(".//a") for l in links: href = l.get('href') if href: # assume all links go to pages linkUrl = urlparse.urlparse(href) linkParams = urlparse.parse_qs(linkUrl.query) # take some parameters params = {'pn': linkParams['pn'], 'highlightQuery': linkParams.get('highlightQuery',''), 'highlightElement': linkParams.get('highlightElement',''), 'highlightElementPos': linkParams.get('highlightElementPos','') } url = self.getLink(params=params) l.set('href', url) return serialize(tocdivs) return "ERROR: no results!" def getToc(self, mode='text', docinfo=None): """returns list of table of contents from docinfo""" logging.debug("getToc mode=%s"%mode) if mode == 'text': queryType = 'toc' else: queryType = mode if not 'full_%s'%queryType in docinfo: # get new toc docinfo = self.getTextInfo(queryType, docinfo) return docinfo.get('full_%s'%queryType, []) def getTocPage(self, mode='text', pn=None, start=None, size=None, pageinfo=None, docinfo=None): """returns single page from the table of contents""" logging.debug("getTocPage mode=%s, pn=%s start=%s size=%s"%(mode,repr(pn),repr(start),repr(size))) fulltoc = self.getToc(mode=mode, docinfo=docinfo) if len(fulltoc) < 1: logging.error("getTocPage: unable to find toc!") return "Error: no table of contents!" if size is None: size = pageinfo.get('tocPageSize', 30) if start is None: start = (pn - 1) * size # paginate first = (start - 1) last = first + size tocs = fulltoc[first:last] tp = '
' for toc in tocs: pageurl = self.getLink('pn', toc['pn']) tp += '
' tp += '
[%s %s]
'%(toc['level-string'], toc['content']) tp += ''%(pageurl, toc['pn']) tp += '
\n' tp += '
\n' return tp def manage_changeMpiwgXmlTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,repositoryType=None,RESPONSE=None): """change settings""" self.title=title self.timeout = timeout self.serverUrl = serverUrl if repositoryType: self.repositoryType = repositoryType if RESPONSE is not None: RESPONSE.redirect('manage_main') # management methods def manage_addMpiwgXmlTextServerForm(self): """Form for adding""" pt = PageTemplateFile("zpt/manage_addMpiwgXmlTextServer", globals()).__of__(self) return pt() def manage_addMpiwgXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None): #def manage_addMpiwgXmlTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None): """add zogiimage""" newObj = MpiwgXmlTextServer(id=id,title=title,serverUrl=serverUrl,timeout=timeout) self.Destination()._setObject(id, newObj) if RESPONSE is not None: RESPONSE.redirect('manage_main')