--- ECHO_content/ECHO_xslt.py 2005/10/26 08:35:53 1.5 +++ ECHO_content/ECHO_xslt.py 2006/09/14 14:31:53 1.11 @@ -7,14 +7,15 @@ from ECHO_Nav import ECHO_pageTemplate from threading import Thread,Timer import threading from ECHO_helpers import * +import ECHO_language import sys import urllib import urlparse - - - +from Ft.Xml.Domlette import Print, PrettyPrint +from StringIO import StringIO +from types import * from Globals import package_home - +import transaction import os.path @@ -22,8 +23,8 @@ import urllib try: from Ft.Xml.Xslt.Processor import Processor - from Ft.Xml import InputSource - + from Ft.Xml import InputSource, EMPTY_NAMESPACE,Parse + from Ft.Xml.Domlette import NonvalidatingReader except: print "4suite has to be installed" @@ -61,7 +62,7 @@ class getXML(Implicit): def run(self): """call it""" xml="" - + try: urlH=urllib.urlopen(self._v_qs) @@ -94,13 +95,64 @@ class getXML(Implicit): return self.result - +from ZODB import DB +from ZODB.FileStorage import FileStorage +class ECHO_cache: + def __init__(self): + """init the storage""" + self.storage=FileStorage("/var/tmp/echo_cache.fs") + self.db=DB(self.storage) + self.connection=self.db.open() + self.root=self.connection.root() -class ECHO_xslt(ECHO_pageTemplate): + def deleteObject(self,name,pn=None): + """delete an object from cache""" + fileStore=self.root.get(name,None) + if fileStore: + if not pn: + del(self.root[name]) + else: + if self.root[name].get(pn,None): + del(self.root[name][pn]) + + + def storeObject(self,name,pn,object): + """store an object""" + + if not self.root.get(name,None): + self.root[name]={} + + + #following is necessary to make clear that object has really changed for ZODB + tmp=self.root[name] + tmp[pn]=object + self.root[name]=tmp + transaction.get().commit() + return True + + def retrieveObject(self,name,pn): + """retrieve it""" + + fileStore=self.root.get(name,None) + if not fileStore: + return None + else: + return self.root[name].get(pn,None) + + +class ECHO_xslt(ECHO_pageTemplate,ECHO_language.ECHO_language): """ECHO_xslt classe""" meta_type="ECHO_xslt" + cache=ECHO_cache() # cache for analysed pages + caching="yes" + + appendQueryString=True # add query string to the cgiUrl can be changed with addChanges + + passURL=False #use url from querystring parameter fn to retrieve the text and not the url in cgi-url can be changed with addChanges + + results={} manage_options=ECHO_pageTemplate.manage_options+( {'label':'Change xml-ressource','action':'change_ECHO_xsltForm'},) @@ -119,13 +171,30 @@ class ECHO_xslt(ECHO_pageTemplate): pt=zptFile(self, 'zpt/ChangeECHO_xsltForm.zpt') return pt() - def addChanges(self,cgiUrl,RESPONSE=None): + def addChanges(self,cgiUrl,appendQueryString=False,passURL=False,caching=False,RESPONSE=None): """change the xslt, ueberschriebt addChanges in ECHO_PageTemplate""" + if urlparse.urlparse(cgiUrl)[0]=="":#relative url in absolute self.cgiUrl=urlparse.urljoin(self.absolute_url(), cgiUrl) else: self.cgiUrl=cgiUrl - + + if appendQueryString: + self.appendQueryString=True + else: + self.appendQueryString=False + + if passURL: + self.passURL=True + else: + self.passURL=False + + if caching: + self.caching="yes" + else: + self.caching="No" + + if RESPONSE: RESPONSE.redirect("manage_main") @@ -141,7 +210,7 @@ class ECHO_xslt(ECHO_pageTemplate): if getattr(self,'cgiUrl','')=='': self.cgiUrl="http://medea.mpiwg-berlin.mpg.de/cgi-bin/search/q1" - qs="%s?%s"%(self.cgiUrl,self.REQUEST['QUERY_STRING']) + qs="%s%s"%(self.cgiUrl,self.REQUEST['QUERY_STRING']) xsl=self.absolute_url()+"/xslt" self._v_xmltrans=getXML().__of__(self) #self._xmltrans.start() @@ -173,6 +242,160 @@ class ECHO_xslt(ECHO_pageTemplate): return self._v_xmltrans.getResult() + def getText(self): + """print nur den text""" + qs,baseUri=self.getTextInput() + self.REQUEST.RESPONSE.redirect(qs) + + def deleteCache(self): + """deletefrom cache""" + fn=self.REQUEST['fn'] + self.cache.deleteObject(fn) + + def getPageLex(self,_pn="1",_id=None,_caching=None): + """getpage mit lexikalischer analyse und xslt transform + if _caching=yes dann wird die lwxikalisch analysierte seite in einem cache abgespeichert + """ + + if not _caching: + _caching=self.caching + + fn=self.REQUEST['fn'] + + if not _id: + fromCache=self.cache.retrieveObject(fn,_pn) + + if fromCache and _caching=="yes": + + txt = fromCache + else: + txt=self.tagLex(nr=_pn) + + self.cache.storeObject(fn,_pn,txt[0:]) + + else: + txt=self.tagLex(id=_id) + + xsl=self.xslt() + + xsltproc=Processor() + if type(txt)==UnicodeType: + document = InputSource.DefaultFactory.fromString(txt.encode('utf-8')) + else: + document = InputSource.DefaultFactory.fromString(txt) + stylesheet = InputSource.DefaultFactory.fromString(xsl) + xsltproc.appendStylesheet(stylesheet) + tmp=xsltproc.run(document) + #bugfix for digilib images which doesn't accept & + tmp=tmp.replace("&","&") + return tmp[0:] + + def getTextInput(self): + """get the text + wie der text geholt wird liegt an der konfiguration, + is appendQueryString gesetzt, dann wir jeweils der Querystring an vorgebenen url gesetzt, erwartet wird fn= + fźr den Pfad, is passURL gesetzt, dann wird falls fn= eine vollstŠndige url enthŠlt, diese anstelle der in cgiurl definierten genommen. + """ + + if getattr(self,'passURL',False) and self.REQUEST.has_key('fn') and (urlparse.urlparse(self.REQUEST['fn'])[0]=='http'): + qs=self.REQUEST['fn'] + baseUri=qs + elif getattr(self,'pappendQueryString',True): + qs="%s%s"%(self.cgiUrl,self.REQUEST['QUERY_STRING']) + baseUri=self.cgiUrl + else: + qs="%s"%(self.cgiUrl) + baseUri=self.cgiUrl + + #fact= InputSource.DefaultFactory.fromUri(qs) + return qs,baseUri + #return InputSource.InputSource(fact) + #xmlt=urllib.urlopen(qs).read() + + def getPage(self,_pn="-1",_id=None,REQUEST=None,_caching=None): + """get a page from an xml""" + + if not _caching: + _caching=self.caching + + pn=int(_pn)-1 + if pn<0 and (not _id): + if REQUEST: + return "Sorry, pagenumbers have to be greater than 0" + else: + return None + + xmlt,self.baseUri=self.getTextInput() + + #get the text from cache, if existing + fromCache=self.cache.retrieveObject(self.baseUri,"-1") + if fromCache and _caching=="yes": + + txt = fromCache + else: + + txt=urllib.urlopen(xmlt).read() + + self.cache.storeObject(self.baseUri,"-1",txt) + + dom=NonvalidatingReader.parseString(txt,self.baseUri) + + #pb should have a namespache + + pbs=dom.xpath("//mpiwg:pb",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}) + + if len(pbs)==0: # versuche nochmal ohne + pbs=dom.xpath("//pb") + + if _id: + #suche wieviele pb for der id + + + idpb=dom.xpath("//*[@id='%s']/preceding::node()/mpiwg:pb"%_id,explicitNss={'html':'http://test.de','mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}) + if len(idpb)==0: + idpb=dom.xpath("//*[@id='%s']/preceding::node()/pb"%_id) + + if len(idpb)==0: + k=0 + for node in dom.xpath("//*[@id='%s']//preceding::node()"%_id,explicitNss={'html':'http://test.de','mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}): + if getattr(node,'tagName',"")=="mpiwg:pb": + k+=1 + else: + k=len(idpb) + pn=k-1 #-1 wegen Seitenzahlzaehlung startet mit 0 + + if pn > len(pbs): + if REQUEST: + return "Sorry, pagenumber %s does not exit"%(pn+1) + else: + return None + + beginNode=pbs[pn] #take the n'th pb + + if not (pn==len(pbs)-1): # nicht die letzte Seite + endNode=pbs[pn+1] + else: + endNode=None + + deleteNodes=beginNode.xpath('preceding::node()') + if endNode: + deleteNodes+=endNode.xpath('following::node()') + for node in deleteNodes: + try: + parent=node.xpath("..") + + if parent: + parent[0].removeChild(node) + except: + zLOG.LOG("ECHO_Resource (getAccessRightMD)", zLOG.INFO,"%s (%s)"%sys.exc_info()[0:2]) + strio = StringIO() + PrettyPrint(dom,strio) + xmlstr = strio.getvalue() + + return xmlstr + + + def manage_addECHO_xsltForm(self): """Form for adding""" pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','AddECHO_xslt.zpt')).__of__(self)