changeset 458:48b135b089c8 elementtree

more renovation
author casties
date Tue, 19 Jul 2011 20:46:35 +0200
parents 9fb231ad0bd2
children aabfa6124cfb
files MpdlXmlTextServer.py SrvTxtUtils.py documentViewer.py
diffstat 3 files changed, 115 insertions(+), 129 deletions(-) [+]
line wrap: on
line diff
--- a/MpdlXmlTextServer.py	Tue Jul 19 11:54:06 2011 +0200
+++ b/MpdlXmlTextServer.py	Tue Jul 19 20:46:35 2011 +0200
@@ -12,28 +12,8 @@
 import re
 import logging
 import urllib
-import documentViewer
-#from documentViewer import getTextFromNode, serializeNode
 
-def intOr0(s, default=0):
-    """convert s to int or return default"""
-    try:
-        return int(s)
-    except:
-        return default
-
-def getText(node):
-    """get the cdata content of a node"""
-    if node is None:
-        return ""
-    # ET:
-    text = node.text or ""
-    for e in node:
-        text += gettext(e)
-        if e.tail:
-            text += e.tail
-
-    return text
+from SrvTxtUtils import getInt, getText, getHttpData
 
 def serialize(node):
     """returns a string containing an XML snippet of node"""
@@ -90,7 +70,6 @@
     manage_changeMpdlXmlTextServerForm = PageTemplateFile("zpt/manage_changeMpdlXmlTextServer", globals())
         
     def __init__(self,id,title="",serverUrl="http://mpdl-text.mpiwg-berlin.mpg.de/mpdl/interface/", serverName=None, timeout=40):
-        
         """constructor"""
         self.id=id
         self.title=title
@@ -102,12 +81,12 @@
         
     def getHttpData(self, url, data=None):
         """returns result from url+data HTTP request"""
-        return documentViewer.getHttpData(url,data,timeout=self.timeout)
+        return getHttpData(url,data,timeout=self.timeout)
     
     def getServerData(self, method, data=None):
         """returns result from text server for method+data"""
         url = self.serverUrl+method
-        return documentViewer.getHttpData(url,data,timeout=self.timeout)
+        return getHttpData(url,data,timeout=self.timeout)
 
     # WTF: what does this really do? can it be integrated in getPage?
     def getSearch(self, pageinfo=None,  docinfo=None):
@@ -268,16 +247,16 @@
                 
             # pageNumberOrigNorm
             elif dc == 'countFigureEntries':
-                docinfo['countFigureEntries'] = intOr0(div.text)
+                docinfo['countFigureEntries'] = getInt(div.text)
                 
             # pageNumberOrigNorm
             elif dc == 'countTocEntries':
                 # WTF: s1 = int(s)/30+1
-                docinfo['countTocEntries'] = intOr0(div.text)
+                docinfo['countTocEntries'] = getInt(div.text)
                 
             # numTextPages
             elif dc == 'countPages':
-                np = intOr0(div.text)                    
+                np = getInt(div.text)                    
                 if np > 0:
                     docinfo['numTextPages'] = np
                     if docinfo.get('numPages', 0) == 0:
@@ -504,17 +483,9 @@
                 pagediv = div
                 
             elif dc == 'queryResultHits':
-                docinfo['tocSize_%s'%mode] = intOr0(div.text)
+                docinfo['tocSize_%s'%mode] = getInt(div.text)
 
         if pagediv:
-#            # split xml in chunks
-#            tocs = []
-#            tocdivs = pagediv.findall('div')
-#            for p in zip(tocdivs[::2], tocdivs[1::2]):
-#                toc = serialize(p[0])
-#                toc += serialize(p[1])
-#                tocs.append(toc)
-#                logging.debug("pair: %s"%(toc))
             # store XML in docinfo
             docinfo['tocXML_%s'%mode] = ET.tostring(pagediv, 'UTF-8')
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/SrvTxtUtils.py	Tue Jul 19 20:46:35 2011 +0200
@@ -0,0 +1,83 @@
+"""Utility methods for handling XML, reading HTTP, etc"""
+
+import sys
+import urllib
+import urllib2
+import logging
+
+
+srvTxtUtilsVersion = "1.0"
+
+def getInt(number, default=0):
+    """returns always an int (0 in case of problems)"""
+    try:
+        return int(number)
+    except:
+        return int(default)
+
+def getText(node):
+    """returns all text content of a node and its subnodes"""
+    if node is None:
+        return ""
+    # ElementTree:
+    text = node.text or ""
+    for e in node:
+        text += gettext(e)
+        if e.tail:
+            text += e.tail
+
+    # 4Suite:
+    #nodelist=node.childNodes
+    #text = ""
+    #for n in nodelist:
+    #    if n.nodeType == node.TEXT_NODE:
+    #       text = text + n.data
+    
+    return text
+
+
+
+def getHttpData(url, data=None, num_tries=3, timeout=10):
+    """returns result from url+data HTTP request"""
+    # we do GET (by appending data to url)
+    if isinstance(data, str) or isinstance(data, unicode):
+        # if data is string then append
+        url = "%s?%s"%(url,data)
+    elif isinstance(data, dict) or isinstance(data, list) or isinstance(data, tuple):
+        # urlencode
+        url = "%s?%s"%(url,urllib.urlencode(data))
+    
+    response = None
+    errmsg = None
+    for cnt in range(num_tries):
+        try:
+            logging.debug("getHttpData(#%s %ss) url=%s"%(cnt+1,timeout,url))
+            if sys.version_info < (2, 6):
+                # set timeout on socket -- ugly :-(
+                import socket
+                socket.setdefaulttimeout(float(timeout))
+                response = urllib2.urlopen(url)
+            else:
+                # timeout as parameter
+                response = urllib2.urlopen(url,timeout=float(timeout))
+            # check result?
+            break
+        except urllib2.HTTPError, e:
+            logging.error("getHttpData: HTTP error(%s): %s"%(e.code,e))
+            errmsg = str(e)
+            # stop trying
+            break
+        except urllib2.URLError, e:
+            logging.error("getHttpData: URLLIB error(%s): %s"%(e.reason,e))
+            errmsg = str(e)
+            # stop trying
+            #break
+
+    if response is not None:
+        data = response.read()
+        response.close()
+        return data
+    
+    raise IOError("ERROR fetching HTTP data from %s: %s"%(url,errmsg))
+    #return None
+
--- a/documentViewer.py	Tue Jul 19 11:54:06 2011 +0200
+++ b/documentViewer.py	Tue Jul 19 20:46:35 2011 +0200
@@ -1,11 +1,9 @@
-
 from OFS.Folder import Folder
 from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate
 from Products.PageTemplates.PageTemplateFile import PageTemplateFile 
 from AccessControl import ClassSecurityInfo
 from AccessControl import getSecurityManager
 from Globals import package_home
-from Products.zogiLib.zogiLib import browserCheck
 
 #from Ft.Xml import EMPTY_NAMESPACE, Parse 
 #import Ft.Xml.Domlette
@@ -15,47 +13,19 @@
 import os.path
 import sys
 import urllib
-import urllib2
 import logging
 import math
 import urlparse 
 import re
 import string
 
+from SrvTxtUtils import getInt, getText, getHttpData
+
 def logger(txt,method,txt2):
     """logging"""
     logging.info(txt+ txt2)
     
     
-def getInt(number, default=0):
-    """returns always an int (0 in case of problems)"""
-    try:
-        return int(number)
-    except:
-        return int(default)
-
-def getText(node):
-    """get the cdata content of a node"""
-    if node is None:
-        return ""
-    # ET:
-    text = node.text or ""
-    for e in node:
-        text += gettext(e)
-        if e.tail:
-            text += e.tail
-
-    # 4Suite:
-    #nodelist=node.childNodes
-    #text = ""
-    #for n in nodelist:
-    #    if n.nodeType == node.TEXT_NODE:
-    #       text = text + n.data
-    
-    return text
-
-getTextFromNode = getText
-
 def serializeNode(node, encoding="utf-8"):
     """returns a string containing node as XML"""
     s = ET.tostring(node)
@@ -128,54 +98,23 @@
 
     return bt
 
-       
 def getParentDir(path):
     """returns pathname shortened by one"""
     return '/'.join(path.split('/')[0:-1])
         
-
-def getHttpData(url, data=None, num_tries=3, timeout=10):
-    """returns result from url+data HTTP request"""
-    # we do GET (by appending data to url)
-    if isinstance(data, str) or isinstance(data, unicode):
-        # if data is string then append
-        url = "%s?%s"%(url,data)
-    elif isinstance(data, dict) or isinstance(data, list) or isinstance(data, tuple):
-        # urlencode
-        url = "%s?%s"%(url,urllib.urlencode(data))
-    
-    response = None
-    errmsg = None
-    for cnt in range(num_tries):
-        try:
-            logging.debug("getHttpData(#%s %ss) url=%s"%(cnt+1,timeout,url))
-            if sys.version_info < (2, 6):
-                # set timeout on socket -- ugly :-(
-                import socket
-                socket.setdefaulttimeout(float(timeout))
-                response = urllib2.urlopen(url)
-            else:
-                response = urllib2.urlopen(url,timeout=float(timeout))
-            # check result?
-            break
-        except urllib2.HTTPError, e:
-            logging.error("getHttpData: HTTP error(%s): %s"%(e.code,e))
-            errmsg = str(e)
-            # stop trying
-            break
-        except urllib2.URLError, e:
-            logging.error("getHttpData: URLLIB error(%s): %s"%(e.reason,e))
-            errmsg = str(e)
-            # stop trying
-            #break
-
-    if response is not None:
-        data = response.read()
-        response.close()
-        return data
-    
-    raise IOError("ERROR fetching HTTP data from %s: %s"%(url,errmsg))
-    #return None
+def getBibdataFromDom(dom):
+    """returns dict with all elements from bib-tag"""
+    bibinfo = {}
+    bib = dom.find(".//meta/bib")
+    if bib is not None:
+        # put type in @type
+        type = bib.get('type')
+        bibinfo['@type'] = type
+        # put all subelements in dict
+        for e in bib:
+            bibinfo[e.tag] = getText(e)
+            
+    return bibinfo
 
 ##
 ## documentViewer class
@@ -304,7 +243,7 @@
         
         '''
         logging.debug("HHHHHHHHHHHHHH:load the rss")
-        logger("documentViewer (index)", logging.INFO, "mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn))
+        logging.debug("documentViewer (index) mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn))
         
         if not hasattr(self, 'template'):
             # create template folder if it doesn't exist
@@ -634,38 +573,31 @@
         
         logging.debug("documentViewer (getbibinfofromindexmeta cutted) path: %s"%(path))
         # put in all raw bib fields as dict "bib"
-        bib = dom.find(".//bib")
-        #bib = dom.xpath("//bib/*")
-        if bib is not None:
-            bibinfo = {}
-            for e in bib:
-                bibinfo[e.tag] = getText(e)
-                
-            docinfo['bib'] = bibinfo
+        bib = getBibdataFromDom(dom)
+        docinfo['bib'] = bib
         
         # extract some fields (author, title, year) according to their mapping
         metaData=self.metadata.main.meta.bib
-        bibtype=bib.get("type")
+        bibtype=bib.get("@type")
         #bibtype=dom.xpath("//bib/@type")
         if not bibtype:
             bibtype="generic"
             
-        bibtype=bibtype.replace("-"," ") # wrong typesiin index meta "-" instead of " " (not wrong! ROC)
+        bibtype=bibtype.replace("-"," ") # wrong types in index meta "-" instead of " " (not wrong! ROC)
         docinfo['bib_type'] = bibtype
         bibmap=metaData.generateMappingForType(bibtype)
         logging.debug("documentViewer (getbibinfofromindexmeta) bibmap:"+repr(bibmap))
         logging.debug("documentViewer (getbibinfofromindexmeta) bibtype:"+repr(bibtype))
         # if there is no mapping bibmap is empty (mapping sometimes has empty fields)
-        logging.debug("bibmap: %s"%repr(bibmap))
         if len(bibmap) > 0 and bibmap.get('author',None) or bibmap.get('title',None):
             try:
-                docinfo['author']=getText(bib.find(bibmap['author'][0]))
+                docinfo['author']=bib.get(bibmap['author'][0])
             except: pass
             try:
-                docinfo['title']=getText(bib.find(bibmap['title'][0]))
+                docinfo['title']=bib.get(bibmap['title'][0])
             except: pass
             try:
-                docinfo['year']=getText(bib.find(bibmap['year'][0]))
+                docinfo['year']=bib.get(bibmap['year'][0])
             except: pass
             
             # ROC: why is this here?
@@ -896,8 +828,8 @@
         if not docinfo.has_key('textURLPath'):
             docinfo['textURLPath'] = None
         
-        logging.debug("documentViewer (getdocinfo) docinfo: %s"%docinfo)
-        #logging.debug("documentViewer (getdocinfo) docinfo: %s"%)
+        logging.debug("documentViewer (getdocinfo) docinfo: keys=%s"%docinfo.keys())
+        #logging.debug("documentViewer (getdocinfo) docinfo: %s"%docinfo)
         self.REQUEST.SESSION['docinfo'] = docinfo
         return docinfo