diff documentViewer.py @ 453:beb7ccb92564 elementtree

first version using elementtree instead of 4suite xml
author casties
date Thu, 14 Jul 2011 19:43:56 +0200
parents b8fb4c750d74
children 73e3273c7624
line wrap: on
line diff
--- a/documentViewer.py	Wed Nov 24 17:01:26 2010 +0100
+++ b/documentViewer.py	Thu Jul 14 19:43:56 2011 +0200
@@ -7,8 +7,11 @@
 from Globals import package_home
 from Products.zogiLib.zogiLib import browserCheck
 
-from Ft.Xml import EMPTY_NAMESPACE, Parse
-import Ft.Xml.Domlette
+#from Ft.Xml import EMPTY_NAMESPACE, Parse 
+#import Ft.Xml.Domlette
+
+import xml.etree.ElementTree as ET
+
 import os.path
 import sys
 import urllib
@@ -16,7 +19,6 @@
 import logging
 import math
 import urlparse 
-import cStringIO
 import re
 import string
 
@@ -32,23 +34,37 @@
     except:
         return int(default)
 
-def getTextFromNode(nodename):
+def getText(node):
     """get the cdata content of a node"""
-    if nodename is None:
+    if node is None:
         return ""
-    nodelist=nodename.childNodes
-    rc = ""
-    for node in nodelist:
-        if node.nodeType == node.TEXT_NODE:
-           rc = rc + node.data
-    return rc
+    # ET:
+    text = node.text or ""
+    for e in node:
+        text += gettext(e)
+        if e.tail:
+            text += e.tail
 
-def serializeNode(node, encoding='utf-8'):
+    # 4Suite:
+    #nodelist=node.childNodes
+    #text = ""
+    #for n in nodelist:
+    #    if n.nodeType == node.TEXT_NODE:
+    #       text = text + n.data
+    
+    return text
+
+getTextFromNode = getText
+
+def serializeNode(node, encoding="utf-8"):
     """returns a string containing node as XML"""
-    buf = cStringIO.StringIO()
-    Ft.Xml.Domlette.Print(node, stream=buf, encoding=encoding)
-    s = buf.getvalue()
-    buf.close()
+    s = ET.tostring(node)
+    
+    # 4Suite:
+    #    stream = cStringIO.StringIO()
+    #    Ft.Xml.Domlette.Print(node, stream=stream, encoding=encoding)
+    #    s = stream.getvalue()
+    #    stream.close()
     return s
 
 def browserCheck(self):
@@ -58,17 +74,51 @@
     bt['ua'] = ua
     bt['isIE'] = False
     bt['isN4'] = False
+    bt['versFirefox']=""
+    bt['versIE']=""
+    bt['versSafariChrome']=""
+    bt['versOpera']=""
+    
     if string.find(ua, 'MSIE') > -1:
         bt['isIE'] = True
     else:
         bt['isN4'] = (string.find(ua, 'Mozilla/4.') > -1)
-        
+    # Safari oder Chrome identification    
+    try:
+        nav = ua[string.find(ua, '('):]
+        nav1=ua[string.find(ua,')'):]
+        nav2=nav1[string.find(nav1,'('):]
+        nav3=nav2[string.find(nav2,')'):]
+        ie = string.split(nav, "; ")[1]
+        ie1 =string.split(nav1, " ")[2]
+        ie2 =string.split(nav3, " ")[1]
+        ie3 =string.split(nav3, " ")[2]
+        if string.find(ie3, "Safari") >-1:
+            bt['versSafariChrome']=string.split(ie2, "/")[1]
+    except: pass
+    # IE identification
     try:
         nav = ua[string.find(ua, '('):]
         ie = string.split(nav, "; ")[1]
         if string.find(ie, "MSIE") > -1:
             bt['versIE'] = string.split(ie, " ")[1]
-    except: pass
+    except:pass
+    # Firefox identification
+    try:
+        nav = ua[string.find(ua, '('):]
+        nav1=ua[string.find(ua,')'):]
+        if string.find(ie1, "Firefox") >-1:
+            nav5= string.split(ie1, "/")[1]
+            logging.debug("FIREFOX: %s"%(nav5))
+            bt['versFirefox']=nav5[0:3]                   
+    except:pass
+    #Opera identification
+    try:
+        if string.find(ua,"Opera") >-1:
+            nav = ua[string.find(ua, '('):]
+            nav1=nav[string.find(nav,')'):]
+            bt['versOpera']=string.split(nav1,"/")[2]
+    except:pass
     
     bt['isMac'] = string.find(ua, 'Macintosh') > -1
     bt['isWin'] = string.find(ua, 'Windows') > -1
@@ -127,8 +177,6 @@
     raise IOError("ERROR fetching HTTP data from %s: %s"%(url,errmsg))
     #return None
 
-
-
 ##
 ## documentViewer class
 ##
@@ -147,6 +195,7 @@
     toc_text = PageTemplateFile('zpt/toc_text', globals())
     toc_figures = PageTemplateFile('zpt/toc_figures', globals())
     page_main_images = PageTemplateFile('zpt/page_main_images', globals())
+    page_main_double = PageTemplateFile('zpt/page_main_double', globals())
     page_main_text = PageTemplateFile('zpt/page_main_text', globals())
     page_main_text_dict = PageTemplateFile('zpt/page_main_text_dict', globals())
     page_main_gis =PageTemplateFile ('zpt/page_main_gis', globals())
@@ -196,26 +245,18 @@
         """get page"""
         return self.template.fulltextclient.getTextPage(**args)
 
-    def getQuery(self, **args):
-        """get query"""
-        return self.template.fulltextclient.getQuery(**args)
-    
-    def getQueryResultHits(self, **args):
-        """get query"""
-        return self.template.fulltextclient.getQueryResultHits(**args)
+    def getOrigPages(self, **args):
+        """get page"""
+        return self.template.fulltextclient.getOrigPages(**args)
     
-    def getQueryResultHitsText(self, **args):
-        """get query"""
-        return self.template.fulltextclient.getQueryResultHitsText(**args)
-    
-    def getQueryResultHitsFigures(self, **args):
-        """get query"""
-        return self.template.fulltextclient.getQueryResultHitsFigures(**args)
-    
-    def getPDF(self, **args):
-        """get query"""
-        return self.template.fulltextclient.getPDF(**args)
+    def getOrigPagesNorm(self, **args):
+        """get page"""
+        return self.template.fulltextclient.getOrigPagesNorm(**args)
 
+    def getQuery(self, **args):
+        """get query in search"""
+        return self.template.fulltextclient.getQuery(**args)
+     
     def getSearch(self, **args):
         """get search"""
         return self.template.fulltextclient.getSearch(**args)
@@ -227,19 +268,7 @@
     def getAllGisPlaces(self, **args):
         """get all gis places """
         return self.template.fulltextclient.getAllGisPlaces(**args)
-    
-    def getOrigPages(self, **args):
-        """get original page number """
-        return self.template.fulltextclient.getOrigPages(**args)
-    
-    def getNumPages(self, docinfo):
-        """get numpages"""
-        return self.template.fulltextclient.getNumPages(docinfo)
-   
-    def getNumTextPages(self, docinfo):
-        """get numpages text"""
-        return self.template.fulltextclient.getNumTextPages(docinfo)
-   
+       
     def getTranslate(self, **args):
         """get translate"""
         return self.template.fulltextclient.getTranslate(**args)
@@ -248,6 +277,14 @@
         """get lemma"""
         return self.template.fulltextclient.getLemma(**args)
 
+    def getLemmaQuery(self, **args):
+        """get query"""
+        return self.template.fulltextclient.getLemmaQuery(**args)
+
+    def getLex(self, **args):
+        """get lex"""
+        return self.template.fulltextclient.getLex(**args)
+
     def getToc(self, **args):
         """get toc"""
         return self.template.fulltextclient.getToc(**args)
@@ -283,7 +320,7 @@
         pt = getattr(self.template, 'thumbs_main_rss')
         
         if viewMode=="auto": # automodus gewaehlt
-            if docinfo.has_key("textURL") or docinfo.has_key('textURLPath'): #texturl gesetzt und textViewer konfiguriert
+            if docinfo.has_key("textURL") or docinfo.get('textURLPath',None): #texturl gesetzt und textViewer konfiguriert
                 viewMode="text"
             else:
                 viewMode="images"
@@ -319,14 +356,18 @@
             docinfo = self.getToc(mode=tocMode, docinfo=docinfo)
             
         if viewMode=="auto": # automodus gewaehlt
-            if docinfo.has_key('textURL') or docinfo.has_key('textURLPath'): #texturl gesetzt und textViewer konfiguriert
+            if docinfo.has_key('textURL') or docinfo.get('textURLPath',None): #texturl gesetzt und textViewer konfiguriert
                 viewMode="text_dict"
             else:
                 viewMode="images"
                 
         pageinfo = self.getPageinfo(start=start,current=pn, docinfo=docinfo,viewMode=viewMode,tocMode=tocMode)
         
-        pt = getattr(self.template, 'viewer_main')               
+        if (docinfo.get('textURLPath',None)):
+            page = self.getTextPage(mode=viewMode, docinfo=docinfo, pageinfo=pageinfo)
+            pageinfo['textPage'] = page
+        tt = getattr(self, 'template')   
+        pt = getattr(tt, 'viewer_main')               
         return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode,mk=self.generateMarks(mk))
   
     def generateMarks(self,mk):
@@ -342,10 +383,9 @@
     
     def getBrowser(self):
         """getBrowser the version of browser """
-        names=""
-        names = browserCheck(self)
-        #logging.debug("XXXXXXXXXXXXXXXX: %s"%names)
-        return names
+        bt = browserCheck(self)
+        logging.debug("BROWSER VERSION: %s"%(bt))
+        return bt
         
     def findDigilibUrl(self):
         """try to get the digilib URL from zogilib"""
@@ -364,41 +404,48 @@
         else:
             return style
     
-    def getLink(self,param=None,val=None):
-        """link to documentviewer with parameter param set to val"""
-        params=self.REQUEST.form.copy()
+    def getLink(self, param=None, val=None, params=None, baseUrl=None, paramSep='&'):
+        """returns URL to documentviewer with parameter param set to val or from dict params"""
+        # copy existing request params
+        urlParams=self.REQUEST.form.copy()
+        # change single param
         if param is not None:
             if val is None:
-                if params.has_key(param):
-                    del params[param]
+                if urlParams.has_key(param):
+                    del urlParams[param]
             else:
-                params[param] = str(val)
+                urlParams[param] = str(val)
                 
-        if params.get("mode", None) == "filepath": #wenn beim erst Aufruf filepath gesetzt wurde aendere das nun zu imagepath
-                params["mode"] = "imagepath"
-                params["url"] = getParentDir(params["url"])
+        # change more params
+        if params is not None:
+            for k in params.keys():
+                v = params[k]
+                if v is None:
+                    # val=None removes param
+                    if urlParams.has_key(k):
+                        del urlParams[k]
+                        
+                else:
+                    urlParams[k] = v
+
+        # FIXME: does this belong here?
+        if urlParams.get("mode", None) == "filepath": #wenn beim erst Aufruf filepath gesetzt wurde aendere das nun zu imagepath
+                urlParams["mode"] = "imagepath"
+                urlParams["url"] = getParentDir(urlParams["url"])
                 
-        # quote values and assemble into query string
-        #ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()])
-        ps = urllib.urlencode(params)
-        url=self.REQUEST['URL1']+"?"+ps
+        # quote values and assemble into query string (not escaping '/')
+        ps = paramSep.join(["%s=%s"%(k,urllib.quote_plus(v,'/')) for (k, v) in urlParams.items()])
+        #ps = urllib.urlencode(urlParams)
+        if baseUrl is None:
+            baseUrl = self.REQUEST['URL1']
+            
+        url = "%s?%s"%(baseUrl, ps)
         return url
 
-    def getLinkAmp(self,param=None,val=None):
+
+    def getLinkAmp(self, param=None, val=None, params=None, baseUrl=None):
         """link to documentviewer with parameter param set to val"""
-        params=self.REQUEST.form.copy()
-        if param is not None:
-            if val is None:
-                if params.has_key(param):
-                    del params[param]
-            else:
-                params[param] = str(val)
-                
-        # quote values and assemble into query string
-        logging.debug("XYXXXXX: %s"%repr(params.items()))
-        ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()])
-        url=self.REQUEST['URL1']+"?"+ps
-        return url
+        return self.getLink(param, val, params, baseUrl, '&')
     
     def getInfo_xml(self,url,mode):
         """returns info about the document as XML"""
@@ -463,12 +510,14 @@
         if txt is None:
             raise IOError("Unable to get dir-info from %s"%(infoUrl))
 
-        dom = Parse(txt)
-        sizes=dom.xpath("//dir/size")
-        logging.debug("documentViewer (getparamfromdigilib) dirInfo:size"%sizes)
+        dom = ET.fromstring(txt)
+        #dom = Parse(txt)
+        size=getText(dom.find("size"))
+        #sizes=dom.xpath("//dir/size")
+        logging.debug("documentViewer (getparamfromdigilib) dirInfo:size=%s"%size)
         
-        if sizes:
-            docinfo['numPages'] = int(getTextFromNode(sizes[0]))
+        if size:
+            docinfo['numPages'] = int(size)
         else:
             docinfo['numPages'] = 0
             
@@ -513,7 +562,8 @@
         if txt is None:
             raise IOError("Unable to read index meta from %s"%(url))
         
-        dom = Parse(txt)
+        dom = ET.fromstring(txt)
+        #dom = Parse(txt)
         return dom
     
     def getPresentationInfoXML(self, url):
@@ -532,7 +582,8 @@
         if txt is None:
             raise IOError("Unable to read infoXMLfrom %s"%(url))
             
-        dom = Parse(txt)
+        dom = ET.fromstring(txt)
+        #dom = Parse(txt)
         return dom
                         
         
@@ -550,11 +601,14 @@
                 path=getParentDir(path)
             dom = self.getDomFromIndexMeta(path)
        
-        acctype = dom.xpath("//access-conditions/access/@type")
-        if acctype and (len(acctype)>0):
-            access=acctype[0].value
-            if access in ['group', 'institution']:
-                access = getTextFromNode(dom.xpath("//access-conditions/access/name")[0]).lower()
+        acc = dom.find(".//access-conditions/access")
+        if acc is not None:
+            acctype = acc.get('type')
+            #acctype = dom.xpath("//access-conditions/access/@type")
+            if acctype:
+                access=acctype
+                if access in ['group', 'institution']:
+                    access = dom.find(".//access-conditions/access/name").text.lower()
             
         docinfo['accessType'] = access
         return docinfo
@@ -576,7 +630,8 @@
         
         logging.debug("documentViewer (getbibinfofromindexmeta cutted) path: %s"%(path))
         # put in all raw bib fields as dict "bib"
-        bib = dom.xpath("//bib/*")
+        bib = dom.find(".//bib/*")
+        #bib = dom.xpath("//bib/*")
         if bib and len(bib)>0:
             bibinfo = {}
             for e in bib:
@@ -585,10 +640,10 @@
         
         # extract some fields (author, title, year) according to their mapping
         metaData=self.metadata.main.meta.bib
-        bibtype=dom.xpath("//bib/@type")
-        if bibtype and (len(bibtype)>0):
-            bibtype=bibtype[0].value
-        else:
+        bib = dom.find(".//bib")
+        bibtype=bib.get("type")
+        #bibtype=dom.xpath("//bib/@type")
+        if not bibtype:
             bibtype="generic"
             
         bibtype=bibtype.replace("-"," ") # wrong typesiin index meta "-" instead of " " (not wrong! ROC)
@@ -597,22 +652,63 @@
         logging.debug("documentViewer (getbibinfofromindexmeta) bibmap:"+repr(bibmap))
         logging.debug("documentViewer (getbibinfofromindexmeta) bibtype:"+repr(bibtype))
         # if there is no mapping bibmap is empty (mapping sometimes has empty fields)
-        if len(bibmap) > 0 and len(bibmap['author'][0]) > 0:
+        if len(bibmap) > 0 and len(bibmap['author'][0]) > 0 or len(bibmap['title'][0]) > 0:
             try:
-                docinfo['author']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['author'][0])[0])
+                docinfo['author']=getText(bib.find(bibmap['author'][0]))
             except: pass
             try:
-                docinfo['title']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['title'][0])[0])
+                docinfo['title']=getText(bib.find(bibmap['title'][0]))
             except: pass
             try:
-                docinfo['year']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['year'][0])[0])
+                docinfo['year']=getText(bib.find(bibmap['year'][0]))
             except: pass
-            logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype)
-            try:
-                docinfo['lang']=getTextFromNode(dom.xpath("//bib/lang")[0])
-            except:
-                docinfo['lang']=''
-
+            
+            # ROC: why is this here?
+            #            logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype)
+            #            try:
+            #                docinfo['lang']=getTextFromNode(dom.find(".//bib/lang")[0])
+            #            except:
+            #                docinfo['lang']=''
+            #            try:
+            #                docinfo['city']=getTextFromNode(dom.find(".//bib/city")[0])
+            #            except:
+            #                docinfo['city']=''
+            #            try:
+            #                docinfo['number_of_pages']=getTextFromNode(dom.find(".//bib/number_of_pages")[0])
+            #            except:
+            #                docinfo['number_of_pages']=''
+            #            try:
+            #                docinfo['series_volume']=getTextFromNode(dom.find(".//bib/series_volume")[0])
+            #            except:
+            #                docinfo['series_volume']=''
+            #            try:
+            #                docinfo['number_of_volumes']=getTextFromNode(dom.find(".//bib/number_of_volumes")[0])
+            #            except:
+            #                docinfo['number_of_volumes']=''
+            #            try:
+            #                docinfo['translator']=getTextFromNode(dom.find(".//bib/translator")[0])
+            #            except:
+            #                docinfo['translator']=''
+            #            try:
+            #                docinfo['edition']=getTextFromNode(dom.find(".//bib/edition")[0])
+            #            except:
+            #                docinfo['edition']=''
+            #            try:
+            #                docinfo['series_author']=getTextFromNode(dom.find(".//bib/series_author")[0])
+            #            except:
+            #                docinfo['series_author']=''
+            #            try:
+            #                docinfo['publisher']=getTextFromNode(dom.find(".//bib/publisher")[0])
+            #            except:
+            #                docinfo['publisher']=''
+            #            try:
+            #                docinfo['series_title']=getTextFromNode(dom.find(".//bib/series_title")[0])
+            #            except:
+            #                docinfo['series_title']=''
+            #            try:
+            #                docinfo['isbn_issn']=getTextFromNode(dom.find(".//bib/isbn_issn")[0])
+            #            except:
+            #                docinfo['isbn_issn']=''           
         return docinfo
     
      
@@ -626,7 +722,7 @@
                 path=getParentDir(path)
             dom = self.getDomFromIndexMeta(path)
 
-        docinfo['name']=getTextFromNode(dom.xpath("/resource/name")[0])
+        docinfo['name']=getText(dom.find("name"))
         logging.debug("documentViewer docinfo[name] %s"%docinfo['name'])
         return docinfo
     
@@ -643,15 +739,12 @@
         archivePath = None
         archiveName = None
     
-        archiveNames = dom.xpath("//resource/name")
-        if archiveNames and (len(archiveNames) > 0):
-            archiveName = getTextFromNode(archiveNames[0])
-        else:
+        archiveName = getTextFromNode(dom.find("name"))
+        if not archiveName:
             logging.warning("documentViewer (getdocinfofromtexttool) resource/name missing in: %s" % (url))
         
-        archivePaths = dom.xpath("//resource/archive-path")
-        if archivePaths and (len(archivePaths) > 0):
-            archivePath = getTextFromNode(archivePaths[0])
+        archivePath = getTextFromNode(dom.find("archive-path"))
+        if archivePath:
             # clean up archive path
             if archivePath[0] != '/':
                 archivePath = '/' + archivePath
@@ -667,11 +760,9 @@
             # we balk without archive-path
             raise IOError("Missing archive-path (for text-tool) in %s" % (url))
         
-        imageDirs = dom.xpath("//texttool/image")
-        if imageDirs and (len(imageDirs) > 0):
-            imageDir = getTextFromNode(imageDirs[0])
+        imageDir = getText(dom.find(".//texttool/image"))
             
-        else:
+        if not imageDir:
             # we balk with no image tag / not necessary anymore because textmode is now standard
             #raise IOError("No text-tool info in %s"%(url))
             imageDir = ""
@@ -688,15 +779,13 @@
             
             docinfo['imageURL'] = self.digilibBaseUrl + "/servlet/Scaler?fn=" + imageDir
             
-        viewerUrls = dom.xpath("//texttool/digiliburlprefix")
-        if viewerUrls and (len(viewerUrls) > 0):
-            viewerUrl = getTextFromNode(viewerUrls[0])
+        viewerUrl = getText(dom.find(".//texttool/digiliburlprefix"))
+        if viewerUrl:
             docinfo['viewerURL'] = viewerUrl
         
         # old style text URL
-        textUrls = dom.xpath("//texttool/text")
-        if textUrls and (len(textUrls) > 0):
-            textUrl = getTextFromNode(textUrls[0])
+        textUrl = getText(dom.find(".//texttool/text"))
+        if textUrl:
             if urlparse.urlparse(textUrl)[0] == "": #keine url
                 textUrl = os.path.join(archivePath, textUrl) 
             # fix URLs starting with /mpiwg/online
@@ -706,23 +795,25 @@
             docinfo['textURL'] = textUrl
     
         # new style text-url-path
-        textUrls = dom.xpath("//texttool/text-url-path")
-        if textUrls and (len(textUrls) > 0):
-            textUrl = getTextFromNode(textUrls[0])
+        textUrl = getText(dom.find(".//texttool/text-url-path"))
+        if textUrl:
             docinfo['textURLPath'] = textUrl
-            if not docinfo['imagePath']:
+            textUrlkurz = string.split(textUrl, ".")[0]
+            docinfo['textURLPathkurz'] = textUrlkurz
+            #if not docinfo['imagePath']:
                 # text-only, no page images
-                docinfo = self.getNumTextPages(docinfo)
+                #docinfo = self.getNumTextPages(docinfo)
+                  
          
-        presentationUrls = dom.xpath("//texttool/presentation")
+        presentationUrl = getText(dom.find(".//texttool/presentation"))
         docinfo = self.getBibinfoFromIndexMeta(url, docinfo=docinfo, dom=dom)   # get info von bib tag
         docinfo = self.getNameFromIndexMeta(url, docinfo=docinfo, dom=dom)
         
         
-        if presentationUrls and (len(presentationUrls) > 0): # ueberschreibe diese durch presentation informationen 
+        if presentationUrl: # ueberschreibe diese durch presentation informationen 
              # presentation url ergiebt sich ersetzen von index.meta in der url der fuer die Metadaten
              # durch den relativen Pfad auf die presentation infos
-            presentationPath = getTextFromNode(presentationUrls[0])
+            presentationPath = presentationUrl
             if url.endswith("index.meta"): 
                 presentationUrl = url.replace('index.meta', presentationPath)
             else:
@@ -740,15 +831,15 @@
         """
         dom=self.getPresentationInfoXML(url)
         try:
-            docinfo['author']=getTextFromNode(dom.xpath("//author")[0])
+            docinfo['author']=getText(dom.find(".//author"))
         except:
             pass
         try:
-            docinfo['title']=getTextFromNode(dom.xpath("//title")[0])
+            docinfo['title']=getText(dom.find(".//title"))
         except:
             pass
         try:
-            docinfo['year']=getTextFromNode(dom.xpath("//date")[0])
+            docinfo['year']=getText(dom.find(".//date"))
         except:
             pass
         return docinfo
@@ -796,7 +887,11 @@
         else:
             logging.error("documentViewer (getdocinfo) unknown mode: %s!"%mode)
             raise ValueError("Unknown mode %s! Has to be one of 'texttool','imagepath','filepath'."%(mode))
-                        
+                
+        # FIXME: fake texturlpath 
+        if not docinfo.has_key('textURLPath'):
+            docinfo['textURLPath'] = None
+        
         logging.debug("documentViewer (getdocinfo) docinfo: %s"%docinfo)
         #logging.debug("documentViewer (getdocinfo) docinfo: %s"%)
         self.REQUEST.SESSION['docinfo'] = docinfo
@@ -826,8 +921,8 @@
                 pageinfo['numgroups'] += 1        
         pageinfo['viewMode'] = viewMode
         pageinfo['tocMode'] = tocMode
-        pageinfo['characterNormalization'] = self.REQUEST.get('characterNormalization','regPlusNorm')
-        pageinfo['optionToggle'] = self.REQUEST.get('optionToggle','')
+        pageinfo['characterNormalization'] = self.REQUEST.get('characterNormalization','reg')
+        #pageinfo['optionToggle'] = self.REQUEST.get('optionToggle','1')
         pageinfo['query'] = self.REQUEST.get('query','') 
         pageinfo['queryType'] = self.REQUEST.get('queryType','')
         pageinfo['querySearch'] =self.REQUEST.get('querySearch', 'fulltext')
@@ -835,12 +930,10 @@
         pageinfo['highlightQuery'] = self.REQUEST.get('highlightQuery','')
         pageinfo['tocPageSize'] = self.REQUEST.get('tocPageSize', '30')
         pageinfo['queryPageSize'] =self.REQUEST.get('queryPageSize', '10')
-        pageinfo['tocPN'] = self.REQUEST.get('tocPN', '1')
+        pageinfo['tocPN'] = self.REQUEST.get('tocPN', '1')     
         toc = int (pageinfo['tocPN'])
         pageinfo['textPages'] =int (toc)
         
-        
-        
         if 'tocSize_%s'%tocMode in docinfo:
             tocSize = int(docinfo['tocSize_%s'%tocMode])
             tocPageSize = int(pageinfo['tocPageSize'])