changeset 617:7aefbddddaf9

alpaha of hocr server support
author dwinter
date Wed, 23 Jul 2014 17:36:04 +0200
parents 3f9b42840901
children 54d3498a6e78
files HocrTextServer.py HocrTxtUtils.py __init__.py css/docuviewer.css documentViewer.py zpt/viewer/layer_text_annotator.zpt
diffstat 6 files changed, 743 insertions(+), 3 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/HocrTextServer.py	Wed Jul 23 17:36:04 2014 +0200
@@ -0,0 +1,500 @@
+from OFS.SimpleItem import SimpleItem
+from Products.PageTemplates.PageTemplateFile import PageTemplateFile 
+
+import xml.etree.ElementTree as ET
+
+import re
+import logging
+import urllib
+import urlparse
+import base64
+
+from HocrTxtUtils import getInt, getText, getHttpData
+
+def serialize(node):
+    """returns a string containing an XML snippet of node"""
+    s = ET.tostring(node, 'UTF-8')
+    # snip off XML declaration
+    if s.startswith('<?xml'):
+        i = s.find('?>')
+        return s[i+3:]
+
+    return s
+
+
+class HocrTextServer(SimpleItem):
+    """TextServer implementation for MPDL-XML eXist server"""
+    meta_type="Hocr TextServer"
+
+    manage_options=(
+        {'label':'Config','action':'manage_changeHocrTextServerForm'},
+       )+SimpleItem.manage_options
+    
+    manage_changeHocrTextServerForm = PageTemplateFile("zpt/manage_changeHocrTextServer", globals())
+        
+    def __init__(self,id,title="",serverUrl="http://localhost:8080/hocr", timeout=40, repositoryType='production'):
+        """constructor"""
+        self.id=id
+        self.title=title
+        self.timeout = timeout
+        self.repositoryType = repositoryType
+       
+        self.serverUrl = serverUrl
+      
+    def getHttpData(self, url, data=None):
+        """returns result from url+data HTTP request"""
+        return getHttpData(url,data,timeout=self.timeout)
+    
+    def getServerData(self, pn, data=None):
+        """returns result from text server for method+data"""
+        url = self.serverUrl
+        return getHttpData(url,pn,data=data,timeout=self.timeout)
+
+
+    def getRepositoryType(self):
+        """returns the repository type, e.g. 'production'"""
+        return getattr(self, 'repositoryType', None)
+
+    def getTextDownloadUrl(self, type='xml', docinfo=None):
+        """returns a URL to download the current text"""
+        docpath = docinfo.get('textURLPath', None)
+        if not docpath:
+            return None
+
+        docpath = docpath.replace('.xml','.'+type)
+        url = '%sgetDoc?doc=%s'%(self.serverUrl.replace('interface/',''), docpath)
+        return url
+
+
+    def getPlacesOnPage(self, docinfo=None, pn=None):
+        """Returns list of GIS places of page pn"""
+        docpath = docinfo.get('textURLPath',None)
+        if not docpath:
+            return None
+
+        places=[]
+        text=self.getServerData("xpath.xql", "document=%s&xpath=//place&pn=%s"%(docpath,pn))
+        dom = ET.fromstring(text)
+        result = dom.findall(".//resultPage/place")
+        for l in result:
+            id = l.get("id")
+            name = l.text
+            place = {'id': id, 'name': name}
+            places.append(place)
+
+        return places
+    
+          
+    def getTextInfo(self, mode='', docinfo=None):
+        """reads document info, including page concordance, from text server"""
+        logging.debug("getTextInfo mode=%s"%mode)
+        if mode not in ['toc', 'figures', '']:
+            mode = ''
+        # check cached info
+        if mode:
+            # cached toc-request?
+            if 'full_%s'%mode in docinfo:
+                return docinfo
+            
+        else:
+            # no toc-request
+            if 'numTextPages' in docinfo:
+                return docinfo
+                
+        docpath = docinfo.get('textURLPath', None)
+        if docpath is None:
+            logging.error("getTextInfo: no textURLPath!")
+            return docinfo
+              
+        try:
+            # we need to set a result set size
+            pagesize = 10000
+            pn = 1
+            # fetch docinfo            
+            pagexml = self.getServerData("doc-info.xql","document=%s&info=%s&pageSize=%s&pn=%s"%(docpath,mode,pagesize,pn))
+            dom = ET.fromstring(pagexml)
+            # all info in tag <document>
+            doc = dom.find("document")
+        except Exception, e:
+            logging.error("getTextInfo: Error reading doc info: %s"%e)
+            return docinfo
+            
+        if doc is None:
+            logging.error("getTextInfo: unable to find document-tag!")
+        else:
+            # go through all child elements
+            for tag in doc:
+                name = tag.tag
+                # numTextPages
+                if name == 'countPages':
+                    np = getInt(tag.text)                    
+                    if np > 0:
+                        docinfo['numTextPages'] = np
+                   
+                # numFigureEntries
+                elif name == 'countFigureEntries':
+                    docinfo['numFigureEntries'] = getInt(tag.text)
+                    
+                # numTocEntries
+                elif name == 'countTocEntries':
+                    # WTF: s1 = int(s)/30+1
+                    docinfo['numTocEntries'] = getInt(tag.text)
+                    
+                # numPlaces
+                elif name == 'countPlaces':
+                    docinfo['numPlaces'] = getInt(tag.text)
+                    
+                # pageNumbers
+                elif name == 'pageNumbers':
+                    # contains tags with page numbers
+                    # <pn><n>4</n><no>4</no><non/></pn>
+                    # n=scan number, no=original page no, non=normalized original page no
+                    # pageNumbers is a dict indexed by scan number
+                    pages = {}
+                    for pn in tag:
+                        page = {}
+                        n = 0
+                        for p in pn:
+                            if p.tag == 'n':
+                                n = getInt(p.text)
+                                page['pn'] = n
+                            elif p.tag == 'no':
+                                page['no'] = p.text
+                            elif p.tag == 'non':
+                                page['non'] = p.text
+                                
+                        if n > 0:
+                            pages[n] = page
+                        
+                    docinfo['pageNumbers'] = pages
+                    #logging.debug("got pageNumbers=%s"%repr(pages))
+                                
+                # toc
+                elif name == 'toc':
+                    # contains tags with table of contents/figures
+                    # <toc-entry><page>13</page><level>3</level><content>Chapter I</content><level-string>1.</level-string><real-level>1</real-level></toc-entry>
+                    tocs = []
+                    for te in tag:
+                        toc = {}
+                        for t in te:
+                            if t.tag == 'page':
+                                toc['pn'] = getInt(t.text)
+                            elif t.tag == 'level':
+                                toc['level'] = t.text
+                            elif t.tag == 'content':
+                                toc['content'] = t.text
+                            elif t.tag == 'level-string':
+                                toc['level-string'] = t.text
+                            elif t.tag == 'real-level':
+                                toc['real-level'] = t.text
+                                
+                        tocs.append(toc)
+                    
+                    # save as full_toc/full_figures
+                    docinfo['full_%s'%mode] = tocs
+
+        return docinfo
+        
+          
+    def processPageInfo(self, dom, docinfo, pageinfo):
+        """processes page info divs from dom and stores in docinfo and pageinfo"""
+        # assume first second level div is pageMeta
+        alldivs = dom.find("div")
+        
+        if alldivs is None or alldivs.get('class', '') != 'pageMeta':
+            logging.error("processPageInfo: pageMeta div not found!")
+            return
+        
+        for div in alldivs:
+            dc = div.get('class')
+            
+            # pageNumberOrig  
+            if dc == 'pageNumberOrig':
+                pageinfo['pageNumberOrig'] = div.text
+                
+            # pageNumberOrigNorm
+            elif dc == 'pageNumberOrigNorm':
+                pageinfo['pageNumberOrigNorm'] = div.text
+                
+            # pageHeaderTitle
+            elif dc == 'pageHeaderTitle':
+                pageinfo['pageHeaderTitle'] = div.text
+                        
+        #logging.debug("processPageInfo: pageinfo=%s"%repr(pageinfo))
+        return
+         
+           
+    def getTextPage(self, mode="text", pn=1, docinfo=None, pageinfo=None):
+        """returns single page from fulltext"""
+        
+        
+        logging.debug("getTextPage Hocr mode=%s, pn=%s"%(mode,pn))
+        # check for cached text -- but ideally this shouldn't be called twice
+        if pageinfo.has_key('textPage'):
+            logging.debug("getTextPage: using cached text")
+            return pageinfo['textPage']
+        
+        docpath = docinfo.get('textURLPath', None)
+        
+        docpath=docpath.replace("pages","hocr")
+        
+        logging.debug("getTextPage docpath= %s"%docpath)
+        if not docpath:
+            return None
+        
+        # stuff for constructing full urls
+        selfurl = docinfo['viewerUrl']
+        textParams = {'document': docpath,
+                      'pn': pn}
+        if 'characterNormalization' in pageinfo:
+            textParams['characterNormalization'] = pageinfo['characterNormalization']
+        
+        if not mode:
+            # default is dict
+            mode = 'text'
+
+        logging.debug("getTextPage mode=%s, pn=%s"%(mode,pn))
+        modes = mode.split(',')
+        # check for multiple layers
+        if len(modes) > 1:
+            logging.debug("getTextPage: more than one mode=%s"%mode)
+                        
+        # search mode
+        if 'search' in modes:
+            # add highlighting
+            highlightQuery = pageinfo.get('highlightQuery', None)
+            if highlightQuery:
+                textParams['highlightQuery'] = highlightQuery
+                textParams['highlightElement'] = pageinfo.get('highlightElement', '')
+                textParams['highlightElementPos'] = pageinfo.get('highlightElementPos', '')
+                
+            # ignore mode in the following
+            modes.remove('search')
+                            
+        # pundit mode
+        punditMode = False
+        if 'pundit' in modes:
+            punditMode = True
+            # ignore mode in the following
+            modes.remove('pundit')
+                            
+        # other modes don't combine
+        if 'dict' in modes:
+            # dict is called textPollux in the backend
+            textmode = 'textPollux'
+        elif 'xml' in modes:
+            # xml mode
+            textmode = 'xml'
+            textParams['characterNormalization'] = 'orig'
+        elif 'gis' in modes:
+            textmode = 'gis'
+        else:
+            # text is default mode
+            textmode = 'text'
+        
+        textParams['mode'] = textmode
+        
+        logging.debug("getTextPage (textparams: %s"%textParams)
+          
+        try:
+            # fetch the page
+            pagexml = self.getServerData(pn,urllib.urlencode(textParams))
+            return pagexml
+        except Exception, e:
+            logging.error("getTextPage: Error reading page: %s"%e)
+            return None
+            
+
+
+        return None
+    
+    def addPunditAttributes(self, pagediv, pageinfo, docinfo):
+        """add about attributes for pundit annotation tool"""
+        textid = docinfo.get('DRI', "fn=%s"%docinfo.get('documentPath', '???'))
+        pn = pageinfo.get('pn', '1')
+        #  TODO: use pn as well?
+        # check all div-tags
+        divs = pagediv.findall(".//div")
+        for d in divs:
+            id = d.get('id')
+            if id:
+                d.set('about', "http://echo.mpiwg-berlin.mpg.de/%s/pn=%s/#%s"%(textid,pn,id))
+                cls = d.get('class','')
+                cls += ' pundit-content'
+                d.set('class', cls.strip())
+
+        return pagediv
+
+    def getSearchResults(self, mode, query=None, pageinfo=None, docinfo=None):
+        """loads list of search results and stores XML in docinfo"""
+        
+        logging.debug("getSearchResults mode=%s query=%s"%(mode, query))
+        if mode == "none":
+            return docinfo
+              
+        cachedQuery = docinfo.get('cachedQuery', None)
+        if cachedQuery is not None:
+            # cached search result
+            if cachedQuery == '%s_%s'%(mode,query):
+                # same query
+                return docinfo
+            
+            else:
+                # different query
+                del docinfo['resultSize']
+                del docinfo['resultXML']
+        
+        # cache query
+        docinfo['cachedQuery'] = '%s_%s'%(mode,query)
+        
+        # fetch full results
+        docpath = docinfo['textURLPath']
+        params = {'document': docpath,
+                  'mode': 'text',
+                  'queryType': mode,
+                  'query': query,
+                  'queryResultPageSize': 1000,
+                  'queryResultPN': 1,
+                  'characterNormalization': pageinfo.get('characterNormalization', 'reg')}
+        pagexml = self.getServerData("doc-query.xql",urllib.urlencode(params))
+        #pagexml = self.getServerData("doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&s=%s&viewMode=%s&characterNormalization=%s&highlightElementPos=%s&highlightElement=%s&highlightQuery=%s"%(docpath, 'text', queryType, urllib.quote(query), pagesize, pn, s, viewMode,characterNormalization, highlightElementPos, highlightElement, urllib.quote(highlightQuery)))
+        dom = ET.fromstring(pagexml)
+        # page content is in <div class="queryResultPage">
+        pagediv = None
+        # ElementTree 1.2 in Python 2.6 can't do div[@class='queryResultPage']
+        alldivs = dom.findall("div")
+        for div in alldivs:
+            dc = div.get('class')
+            # page content div
+            if dc == 'queryResultPage':
+                pagediv = div
+                
+            elif dc == 'queryResultHits':
+                docinfo['resultSize'] = getInt(div.text)
+
+        if pagediv is not None:
+            # store XML in docinfo
+            docinfo['resultXML'] = ET.tostring(pagediv, 'UTF-8')
+
+        return docinfo
+    
+
+    def getResultsPage(self, mode="text", query=None, pn=None, start=None, size=None, pageinfo=None, docinfo=None):
+        """returns single page from the table of contents"""
+        logging.debug("getResultsPage mode=%s, pn=%s"%(mode,pn))
+        # get (cached) result
+        self.getSearchResults(mode=mode, query=query, pageinfo=pageinfo, docinfo=docinfo)
+            
+        resultxml = docinfo.get('resultXML', None)
+        if not resultxml:
+            logging.error("getResultPage: unable to find resultXML")
+            return "Error: no result!"
+        
+        if size is None:
+            size = pageinfo.get('resultPageSize', 10)
+            
+        if start is None:
+            start = (pn - 1) * size
+
+        fullresult = ET.fromstring(resultxml)
+        
+        if fullresult is not None:
+            # paginate
+            first = start-1
+            len = size
+            del fullresult[:first]
+            del fullresult[len:]
+            tocdivs = fullresult
+            
+            # check all a-tags
+            links = tocdivs.findall(".//a")
+            for l in links:
+                href = l.get('href')
+                if href:
+                    # assume all links go to pages
+                    linkUrl = urlparse.urlparse(href)
+                    linkParams = urlparse.parse_qs(linkUrl.query)
+                    # take some parameters
+                    params = {'pn': linkParams['pn'],
+                              'highlightQuery': linkParams.get('highlightQuery',''),
+                              'highlightElement': linkParams.get('highlightElement',''),
+                              'highlightElementPos': linkParams.get('highlightElementPos','')
+                              }
+                    url = self.getLink(params=params)
+                    l.set('href', url)
+                        
+            return serialize(tocdivs)
+        
+        return "ERROR: no results!"
+
+
+    def getToc(self, mode='text', docinfo=None):
+        """returns list of table of contents from docinfo"""
+        logging.debug("getToc mode=%s"%mode)
+        if mode == 'text':
+            queryType = 'toc'
+        else:
+            queryType = mode
+            
+        if not 'full_%s'%queryType in docinfo:
+            # get new toc
+            docinfo = self.getTextInfo(queryType, docinfo)
+            
+        return docinfo.get('full_%s'%queryType, [])
+
+    def getTocPage(self, mode='text', pn=None, start=None, size=None, pageinfo=None, docinfo=None):
+        """returns single page from the table of contents"""
+        logging.debug("getTocPage mode=%s, pn=%s start=%s size=%s"%(mode,repr(pn),repr(start),repr(size)))
+        fulltoc = self.getToc(mode=mode, docinfo=docinfo)
+        if len(fulltoc) < 1:
+            logging.error("getTocPage: unable to find toc!")
+            return "Error: no table of contents!"        
+        
+        if size is None:
+            size = pageinfo.get('tocPageSize', 30)
+            
+        if start is None:
+            start = (pn - 1) * size
+
+        # paginate
+        first = (start - 1)
+        last = first + size
+        tocs = fulltoc[first:last]
+        tp = '<div>'
+        for toc in tocs:
+            pageurl = self.getLink('pn', toc['pn'])
+            tp += '<div class="tocline">'
+            tp += '<div class="toc name">[%s %s]</div>'%(toc['level-string'], toc['content'])
+            tp += '<div class="toc float right page"><a href="%s">Page: %s</a></div>'%(pageurl, toc['pn'])
+            tp += '</div>\n'
+            
+        tp += '</div>\n'
+        
+        return tp
+           
+    
+    def manage_changeHocrTextServer(self,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,repositoryType=None,RESPONSE=None):
+        """change settings"""
+        self.title=title
+        self.timeout = timeout
+        self.serverUrl = serverUrl
+        if repositoryType:
+            self.repositoryType = repositoryType
+        if RESPONSE is not None:
+            RESPONSE.redirect('manage_main')
+        
+# management methods
+def manage_addHocrTextServerForm(self):
+    """Form for adding"""
+    pt = PageTemplateFile("zpt/manage_addHocrTextServer", globals()).__of__(self)
+    return pt()
+
+def manage_addHocrTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/",timeout=40,RESPONSE=None):
+#def manage_addHocrTextServer(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de:30030/mpdl/interface/",timeout=40,RESPONSE=None):    
+    """add zogiimage"""
+    newObj = HocrTextServer(id,title,serverUrl,timeout)
+    self.Destination()._setObject(id, newObj)
+    if RESPONSE is not None:
+        RESPONSE.redirect('manage_main')
+        
+        
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/HocrTxtUtils.py	Wed Jul 23 17:36:04 2014 +0200
@@ -0,0 +1,163 @@
+"""Utility methods for handling XML, reading HTTP, etc"""
+
+from App.ImageFile import ImageFile
+from App.Common import rfc1123_date
+
+import sys
+import os
+import stat
+import urllib
+import urllib2
+import logging
+
+
+HocrTxtUtilsVersion = "0.1"
+
+def getInt(number, default=0):
+    """returns always an int (0 in case of problems)"""
+    try:
+        return int(number)
+    except:
+        return int(default)
+
+def getAt(array, idx, default=None):
+    """returns element idx from array or default (in case of problems)"""
+    try:
+        return array[idx]
+    except:
+        return default
+
+def unicodify(s):
+    """decode str (utf-8 or latin-1 representation) into unicode object"""
+    if not s:
+        return u""
+    if isinstance(s, str):
+        try:
+            return s.decode('utf-8')
+        except:
+            return s.decode('latin-1')
+    else:
+        return unicode(s)
+
+def utf8ify(s):
+    """encode unicode object or string into byte string in utf-8 representation.
+       assumes string objects to be utf-8"""
+    if not s:
+        return ""
+    if isinstance(s, unicode):
+        return s.encode('utf-8')
+    else:
+        return str(s)
+
+def getText(node, recursive=0):
+    """returns all text content of a node and its subnodes"""
+    if node is None:
+        return ''
+    
+    # ElementTree:
+    text = node.text or ''
+    for e in node:
+        if recursive:
+            text += getText(e)
+        else:
+            text += e.text or ''
+        if e.tail:
+            text += e.tail
+
+    # 4Suite:
+    #nodelist=node.childNodes
+    #text = ""
+    #for n in nodelist:
+    #    if n.nodeType == node.TEXT_NODE:
+    #       text = text + n.data
+    
+    return text
+
+
+
+def getHttpData(url, pn=1,data=None, num_tries=3, timeout=10, noExceptions=False):
+    """returns result from url+data HTTP request"""
+    # we do GET (by appending data to url)
+    if isinstance(data, str) or isinstance(data, unicode):
+        # if data is string then append
+        url = "%s?pn=%s&%s"%(url,pn,data)
+    elif isinstance(data, dict) or isinstance(data, list) or isinstance(data, tuple):
+        # urlencode
+        url = "%s?pn=%s&%s"%(url,pn,urllib.urlencode(data))
+    
+    response = None
+    errmsg = None
+    for cnt in range(num_tries):
+        try:
+            logging.debug("getHttpData(#%s %ss) url=%s"%(cnt+1,timeout,url))
+            if sys.version_info < (2, 6):
+                # set timeout on socket -- ugly :-(
+                import socket
+                socket.setdefaulttimeout(float(timeout))
+                response = urllib2.urlopen(url)
+            else:
+                # timeout as parameter
+                response = urllib2.urlopen(url,timeout=float(timeout))
+            # check result?
+            break
+        except urllib2.HTTPError, e:
+            logging.error("getHttpData: HTTP error(%s): %s"%(e.code,e))
+            errmsg = str(e)
+            # stop trying
+            break
+        except urllib2.URLError, e:
+            logging.error("getHttpData: URLLIB error(%s): %s"%(e.reason,e))
+            errmsg = str(e)
+            # stop trying
+            #break
+
+    if response is not None:
+        data = response.read()
+        response.close()
+        return data
+    
+    if noExceptions:
+        return None
+    
+    raise IOError("ERROR fetching HTTP data from %s: %s"%(url,errmsg))
+    #return None
+
+
+def refreshingImageFileIndexHtml(self, REQUEST, RESPONSE):
+    """index_html method for App.ImageFile that updates the file info for each request."""
+    stat_info = os.stat(self.path)
+    self.size = stat_info[stat.ST_SIZE]
+    self.lmt = float(stat_info[stat.ST_MTIME]) or time.time()
+    self.lmh = rfc1123_date(self.lmt)
+    # call original method
+    return ImageFile.index_html(self, REQUEST, RESPONSE)
+
+
+def getBrowserType(self):
+    """check the browsers request to find out the browser type"""
+    bt = {}
+    ua = self.REQUEST.get_header("HTTP_USER_AGENT")
+    bt['ua'] = ua
+    bt['isIE'] = False
+    bt['isN4'] = False
+    if string.find(ua, 'MSIE') > -1:
+        bt['isIE'] = True
+    else:
+        bt['isN4'] = (string.find(ua, 'Mozilla/4.') > -1)
+        
+    try:
+        nav = ua[string.find(ua, '('):]
+        ie = string.split(nav, "; ")[1]
+        if string.find(ie, "MSIE") > -1:
+            bt['versIE'] = string.split(ie, " ")[1]
+    except: pass
+    
+    bt['isMac'] = string.find(ua, 'Macintosh') > -1
+    bt['isWin'] = string.find(ua, 'Windows') > -1
+    bt['isIEWin'] = bt['isIE'] and bt['isWin']
+    bt['isIEMac'] = bt['isIE'] and bt['isMac']
+    bt['staticHTML'] = False
+
+    return bt
+
+
--- a/__init__.py	Wed Jul 23 17:20:34 2014 +0200
+++ b/__init__.py	Wed Jul 23 17:36:04 2014 +0200
@@ -1,6 +1,7 @@
 import documentViewer
 import MpdlXmlTextServer
 import MpiwgXmlTextServer
+import HocrTextServer
 
 def initialize(context):
     """initialize ImageCollection"""
@@ -27,4 +28,14 @@
           MpiwgXmlTextServer.manage_addMpiwgXmlTextServer
           )
         )
+    
+    
+    context.registerClass(
+        HocrTextServer.HocrTextServer,
+        constructors = (
+          HocrTextServer.manage_addHocrTextServerForm,
+          HocrTextServer.manage_addHocrTextServer
+          )
+        )
+
     
\ No newline at end of file
--- a/css/docuviewer.css	Wed Jul 23 17:20:34 2014 +0200
+++ b/css/docuviewer.css	Wed Jul 23 17:36:04 2014 +0200
@@ -472,4 +472,8 @@
 div.footer div.content a:link,
 div.footer div.content a:visited {
 	color: gray;
+}
+
+span.ocr_line {
+display:block;
 }
\ No newline at end of file
--- a/documentViewer.py	Wed Jul 23 17:20:34 2014 +0200
+++ b/documentViewer.py	Wed Jul 23 17:36:04 2014 +0200
@@ -21,9 +21,21 @@
 
 def getMDText(node):
     """returns the @text content from the MetaDataProvider metadata node"""
+
+    
+
     if isinstance(node, dict):
         return node.get('@text', None)
     
+    if isinstance(node,list): #more than one text file if there is an attribute don't choose it
+        for nodeInList in node:
+            attr = nodeInList.get("@attr",None)
+            if attr is None:
+                return node.get('@text',None)
+        return None
+
+
+
     return node
 
 def getParentPath(path, cnt=1):
@@ -82,6 +94,7 @@
     #
     # viewMode templates
     viewer_text = PageTemplateFile('zpt/viewer/viewer_text', globals())
+    viewer_hocr = PageTemplateFile('zpt/viewer/viewer_hocr', globals())
     viewer_xml = PageTemplateFile('zpt/viewer/viewer_xml', globals())
     viewer_image = PageTemplateFile('zpt/viewer/viewer_image', globals())
     viewer_index = PageTemplateFile('zpt/viewer/viewer_index', globals())
@@ -164,7 +177,11 @@
     # proxy text server methods to fulltextclient
     def getTextPage(self, **args):
         """returns full text content of page"""
+       
         return self.template.fulltextclient.getTextPage(**args)
+    
+   
+   
 
     def getSearchResults(self, **args):
         """loads list of search results and stores XML in docinfo"""
@@ -239,7 +256,7 @@
         show page
         @param url: url which contains display information
         @param mode: defines how to access the document behind url 
-        @param viewMode: 'image': display images, 'text': display text, 'xml': display xml, default is 'auto'
+        @param viewMode: 'image': display images, 'text': display text, 'xml': display xml, default is 'auto', 'hocr' : hocr format
         @param viewLayer: sub-type of viewMode, e.g. layer 'dict' for viewMode='text'
         @param tocMode: type of 'table of contents' for navigation (thumbs, text, figures, none)
         """
@@ -250,7 +267,9 @@
             # this won't work
             logging.error("template folder missing!")
             return "ERROR: template folder missing!"
-            
+        
+        
+
         if not getattr(self, 'digilibBaseUrl', None):
             self.digilibBaseUrl = self.findDigilibUrl() or "http://digilib.mpiwg-berlin.mpg.de/digitallibrary"
             
@@ -287,6 +306,9 @@
             # legacy fix
             viewMode = 'image'
             self.REQUEST['viewMode'] = 'image'
+            
+        
+            
 
         # safe viewLayer in userinfo
         userinfo['viewLayer'] = viewLayer
@@ -532,6 +554,23 @@
             docUrl = getParentPath(url)
             metaDom = self.metadataService.getDomFromPathOrUrl(docUrl)
             docinfo['imagePath'] = url.replace('/mpiwg/online', '', 1)
+            
+        elif mode=="hocr":
+            # url points to folder with images, index.meta optional
+            # asssume index.meta in parent dir
+            docUrl = getParentPath(url)
+            metaDom = self.metadataService.getDomFromPathOrUrl(docUrl)
+            docinfo['imagePath'] = url.replace('/mpiwg/online', '', 1)
+            docinfo['textURLPath'] = url.replace('/mpiwg/online', '', 1)
+            if docinfo.get("creator", None) is None:
+                docinfo['creator'] = "" 
+            
+            if docinfo.get("title", None) is None:
+                docinfo['title'] = "" 
+
+            if docinfo.get("documentPath", None) is None:
+                docinfo['documentPath'] = url.replace('/mpiwg/online', '', 1)
+                docinfo['documentPath'] = url.replace('/pages', '', 1)
 
         elif mode=="filepath":
             # url points to image file, index.meta optional
@@ -725,6 +764,10 @@
         
         # old style text URL
         textUrl = getMDText(texttool.get('text', None))
+
+        
+
+
         if textUrl and docPath:
             if urlparse.urlparse(textUrl)[0] == "": #keine url
                 textUrl = os.path.join(docPath, textUrl) 
--- a/zpt/viewer/layer_text_annotator.zpt	Wed Jul 23 17:20:34 2014 +0200
+++ b/zpt/viewer/layer_text_annotator.zpt	Wed Jul 23 17:36:04 2014 +0200
@@ -4,9 +4,13 @@
 <head>
 <metal:block metal:define-macro="html_head" tal:condition="python:'annotator' in viewLayers">
   <!--  annotator -->
+ <link rel="stylesheet" type="text/css"
+    tal:attributes="href string:$rootUrl/template/annotator_files/css/style.css" />
+
   <link rel="stylesheet" type="text/css"
     tal:attributes="href string:$rootUrl/template/annotator_files/css/annotator.css" />
 
+
   <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/vendor/json2.js"></script>
   <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/vendor/showdown.js"></script>
   <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/extensions.js"></script>
@@ -14,6 +18,9 @@
   <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/class.js"></script>
   <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/range.js"></script>
   <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/annotator.js"></script>
+  <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/locale/en/annotator.js"></script>
+  
+  
   <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/widget.js"></script>
   <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/editor.js"></script>
   <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/viewer.js"></script>
@@ -24,6 +31,13 @@
   <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/plugin/tags.js"></script>
   <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/plugin/markdown.js"></script>
   <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/plugin/unsupported.js"></script>
+  
+  <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/jquery.dateFormat.js"></script>
+  <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/jquery.slimscroll.js"></script>
+  <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/jquery-i18n-master/jquery.i18n.min.js"></script>
+  <script type="text/javascript" tal:attributes="src string:$rootUrl/template/annotator_files/lib/plugin/view_annotator.js"></script>
+   
+  
   <!-- <script tal:attributes="src string:$rootUrl/template/annotator_files/lib/plugin/filter.js"></script> -->
 
   <script type="text/javascript" 
@@ -44,6 +58,9 @@
             // <!--
             $(document).ready(function() {
                 // annotator
+              
+
+                jQuery.i18n.load(i18n_dict);
                 var elem = $('div.pageContent').get(0);
                 var uri = annotatorPageUrl;
                 var devAnnotator = new Annotator(elem).addPlugin('Auth', {
@@ -78,7 +95,9 @@
                         'limit' : 20,
                         'uri' : uri
                     }
-                }).addPlugin('Tags').addPlugin('Markdown');
+                }).addPlugin('Tags').addPlugin('Markdown').addPlugin('AnnotatorViewer');
+                
+                $('#anotacions-uoc-panel').slimscroll({height: '100%'});
             });
         // -->
         </script>