--- ECHO_content/ECHO_xslt.py 2006/09/11 14:43:23 1.10 +++ ECHO_content/ECHO_xslt.py 2007/01/31 14:12:39 1.19 @@ -7,7 +7,13 @@ from ECHO_Nav import ECHO_pageTemplate from threading import Thread,Timer import threading from ECHO_helpers import * -import ECHO_language +try: + from ECHO_language import * +except: + print "no echo language" + class ECHO_language: + """leere Klasse""" + pass import sys import urllib import urlparse @@ -19,7 +25,7 @@ import transaction import os.path -import urllib +import urllib,cgi try: from Ft.Xml.Xslt.Processor import Processor @@ -77,7 +83,7 @@ class getXML(Implicit): #print self.xsl - #print xsltproc.run(document) + #< xsltproc.run(document) tmp=xsltproc.run(document) self.result=tmp[0:] @@ -100,7 +106,7 @@ from ZODB.FileStorage import FileStorage class ECHO_cache: def __init__(self): """init the storage""" - self.storage=FileStorage("/var/tmp/echo_cache.fs") + self.storage=FileStorage(os.path.join(INSTANCE_HOME,"var/echo_cache.fs")) self.db=DB(self.storage) self.connection=self.db.open() self.root=self.connection.root() @@ -137,10 +143,11 @@ class ECHO_cache: if not fileStore: return None else: + return self.root[name].get(pn,None) -class ECHO_xslt(ECHO_pageTemplate,ECHO_language.ECHO_language): +class ECHO_xslt(ECHO_pageTemplate,ECHO_language): """ECHO_xslt classe""" meta_type="ECHO_xslt" @@ -252,28 +259,202 @@ class ECHO_xslt(ECHO_pageTemplate,ECHO_l fn=self.REQUEST['fn'] self.cache.deleteObject(fn) - def getPageLex(self,_pn="1",_caching=None): + + def createLinkNode(self,url,dom): + """createa a link node""" + txt=dom.createTextNode("") + node=dom.createElementNS("http://test.de","a") + node.setAttributeNS("http://test.de","href",url) + node.appendChild(txt) + return node + + def forwardLink(self,linkid,url,type="target",RESPONSE=None): + """forward to link""" + if RESPONSE: + RESPONSE.redirect(self.getLink(linkid,url,type=type)) + + else: + return self.getLink(linkid,url,type=type) + def getLink(self,linkid,url,type="target"): + """get target for linkid""" + dom=NonvalidatingReader.parseUri(url) + + masterurl=dom.xpath("//mpiwg:masterurl/@ref",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + slaveurl=dom.xpath("//mpiwg:slaveurl/@ref",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + + #check now if there are in the link file + + xp="//mpiwg:link[@id='%s']"%linkid + + if type=="target": + for link in dom.xpath(xp,explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}): + fn=link.xpath("mpiwg:target/@filename",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + + if urlparse.urlparse(urllib.unquote(fn))[0]=="http": # fn ist eine url + return urllib.unquote(fn) # dann gibt diese zurueck + + ref=link.xpath("mpiwg:target/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + + ref2=link.xpath("mpiwg:target/mpiwg:pagelink/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + selectionNodeIndex=link.xpath("mpiwg:target/mpiwg:pagelink/@selectionNodeIndex",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + + + + lstr=slaveurl+'fn='+fn+'&_id='+ref+'&_pagelink=%s///%s/%s/%s'%(ref2,selectionNodeIndex,linkid,'target') + lstr+="&_links="+urllib.quote(url) + + else: + for link in dom.xpath(xp,explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}): + fn=link.xpath("mpiwg:source/@filename",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + if urlparse.urlparse(urllib.unquote(fn))[0]=="http": # fn ist eine url + return urllib.unquote(fn) # dann gibt diese zurueck + + ref=link.xpath("mpiwg:source/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + + ref2=link.xpath("mpiwg:source/mpiwg:pagelink/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + selectionNodeIndex=link.xpath("mpiwg:source/mpiwg:pagelink/@selectionNodeIndex",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + + lstr=masterurl+'fn='+fn+'&_id='+ref+'&_pagelink=%s///%s/%s/%s'%(ref2,selectionNodeIndex,linkid,'source') + lstr+="&_links="+urllib.quote(url) + return lstr + + def addLinksUrl(self,txt,url): + """add reference to links to url""" + ret=[] + dom=NonvalidatingReader.parseUri(url) + textDom=NonvalidatingReader.parseString(txt) + + #find ids in txt + ids=textDom.xpath("//*[@id]") + + for textid in ids: + xp="//mpiwg:link[mpiwg:source/@refid='%s']"%textid.xpath("@id")[0].value + for link in dom.xpath(xp,explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}): + ref2=link.xpath("mpiwg:source/mpiwg:pagelink/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + selectionNodeIndex=link.xpath("mpiwg:source/mpiwg:pagelink/@selectionNodeIndex",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + linkid=link.xpath("@id")[0].value + ret.append('%s///%s/%s/%s'%(ref2,selectionNodeIndex,linkid,'source')) + + xp="//mpiwg:link[mpiwg:target/@refid='%s']"%textid.xpath("@id")[0].value + for link in dom.xpath(xp,explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}): + ref2=link.xpath("mpiwg:target/mpiwg:pagelink/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + selectionNodeIndex=link.xpath("mpiwg:target/mpiwg:pagelink/@selectionNodeIndex",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + linkid=link.xpath("@id")[0].value + ret.append('%s///%s/%s/%s'%(ref2,selectionNodeIndex,linkid,'target')) + + + return ret + + def addLinks(self,txt,url="http://127.0.0.1:8080/HFQP/linkCreator/getCollectionXML?collection=commentary2"): + """add links to a page from xml linkfile""" + + dom=NonvalidatingReader.parseUri(url) + textDom=NonvalidatingReader.parseString(txt) + + #find ids in txt + ids=textDom.xpath("//*[@id]") + masterurl=dom.xpath("//mpiwg:masterurl/@ref",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + slaveurl=dom.xpath("//mpiwg:slaveurl/@ref",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + + #check now if there are in the link file + for textid in ids: + xp="//mpiwg:link[mpiwg:source/@refid='%s']"%textid.xpath("@id")[0].value + for link in dom.xpath(xp,explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}): + fn=link.xpath("mpiwg:target/@filename",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + print fn + if urlparse.urlparse(urllib.unquote(fn))[0]=="http": # fn ist eine url + lstr=urllib.unquote(fn) # dann gibt diese zurueck + else: + try: + ref=link.xpath("mpiwg:target/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + + ref2=link.xpath("mpiwg:target/mpiwg:pagelink/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + selectionNodeIndex=link.xpath("mpiwg:target/mpiwg:pagelink/@selectionNodeIndex",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + linkid=link.xpath("@id")[0].value + lstr=slaveurl+'fn='+fn+'&_id='+ref+'&_pagelink=%s///%s/%s/%s'%(ref2,selectionNodeIndex,linkid,'target') + lstr+="&_links="+urllib.quote(url) + except: + lstr="" + node=self.createLinkNode(lstr,textDom) + textid.parentNode.insertBefore(node,textid) + + + xp="//mpiwg:link[mpiwg:target/@refid='%s']"%textid.xpath("@id")[0].value + for link in dom.xpath(xp,explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}): + fn=link.xpath("mpiwg:source/@filename",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + if urlparse.urlparse(urllib.unquote(fn))[0]=="http": # fn ist eine url + lstr=urllib.unquote(fn) # dann gibt diese zurueck + else: + + ref=link.xpath("mpiwg:source/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + + ref2=link.xpath("mpiwg:source/mpiwg:pagelink/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + selectionNodeIndex=link.xpath("mpiwg:source/mpiwg:pagelink/@selectionNodeIndex",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value + linkid=link.xpath("@id")[0].value + lstr=masterurl+'fn='+fn+'&_id='+ref+'&_pagelink=%s///%s/%s/%s'%(ref2,selectionNodeIndex,linkid,"source") + lstr+="&_links="+urllib.quote(url) + + node=self.createLinkNode(lstr,textDom) + textid.parentNode.insertBefore(node,textid) + + + + strio = StringIO() + PrettyPrint(textDom,strio) + xmlstr = strio.getvalue() + + return xmlstr + + + + def getPageLex(self,_pn="1",_id=None,_caching=None,_links=None,_showall="no",_displaylinks="yes"): """getpage mit lexikalischer analyse und xslt transform if _caching=yes dann wird die lwxikalisch analysierte seite in einem cache abgespeichert """ - + def encode(hash): + ret=[] + for x in hash.keys(): + value=hash[x] + + if type(value) is ListType: + for z in value: + ret.append("%s=%s"%(x,z)) + else: + ret.append("%s=%s"%(x,value)) + return "&".join(ret) + + + if not _caching: _caching=self.caching fn=self.REQUEST['fn'] - - fromCache=self.cache.retrieveObject(fn,_pn) - - if fromCache and _caching=="yes": - - txt = fromCache + if not _id: + + fromCache=self.cache.retrieveObject(fn,_pn) + + if fromCache and _caching=="yes": + + txt = fromCache + else: + txt=self.tagLex(nr=_pn) + + self.cache.storeObject(fn,_pn,txt[0:]) + else: - txt=self.tagLex(nr=_pn) - - self.cache.storeObject(fn,_pn,txt[0:]) - - + txt=self.tagLex(id=_id) + + if _showall=="yes": + params=cgi.parse_qs(self.REQUEST['QUERY_STRING']) + + params['_pagelink']=self.addLinksUrl(txt,url=_links) + params['_showall']='no' + + print self.absolute_url()+"?"+encode(params) + self.REQUEST.RESPONSE.redirect(self.absolute_url()+"/getPageLex?"+encode(params)) + + xsl=self.xslt() xsltproc=Processor() @@ -285,6 +466,14 @@ class ECHO_xslt(ECHO_pageTemplate,ECHO_l xsltproc.appendStylesheet(stylesheet) tmp=xsltproc.run(document) + if _links and (_displaylinks=='yes'): + _links=urllib.unquote(_links) + tmp=self.addLinks(tmp,url=_links) + + #bugfix for digilib images which doesn't accept & + tmp=tmp.replace("&","&") + + return tmp[0:] def getTextInput(self): @@ -309,14 +498,14 @@ class ECHO_xslt(ECHO_pageTemplate,ECHO_l #return InputSource.InputSource(fact) #xmlt=urllib.urlopen(qs).read() - def getPage(self,_pn,REQUEST=None,_caching=None): + def getPage(self,_pn="-1",_id=None,REQUEST=None,_caching=None): """get a page from an xml""" if not _caching: _caching=self.caching pn=int(_pn)-1 - if pn<0: + if pn<0 and (not _id): if REQUEST: return "Sorry, pagenumbers have to be greater than 0" else: @@ -325,7 +514,10 @@ class ECHO_xslt(ECHO_pageTemplate,ECHO_l xmlt,self.baseUri=self.getTextInput() #get the text from cache, if existing - fromCache=self.cache.retrieveObject(self.baseUri,"-1") + try: + fromCache=self.cache.retrieveObject(self.baseUri,"-1") + except: + fromCache=None if fromCache and _caching=="yes": txt = fromCache @@ -335,14 +527,33 @@ class ECHO_xslt(ECHO_pageTemplate,ECHO_l self.cache.storeObject(self.baseUri,"-1",txt) + dom=NonvalidatingReader.parseString(txt,self.baseUri) #pb should have a namespache - + pbs=dom.xpath("//mpiwg:pb",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}) + if len(pbs)==0: # versuche nochmal ohne pbs=dom.xpath("//pb") + if _id: + #suche wieviele pb for der id + + + idpb=dom.xpath("//*[@id='%s']/preceding::node()/mpiwg:pb"%_id,explicitNss={'html':'http://www.w3.org/1999/xhtml','mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}) + if len(idpb)==0: + idpb=dom.xpath("//*[@id='%s']/preceding::node()/pb"%_id) + + if len(idpb)==0: + k=0 + for node in dom.xpath("//*[@id='%s']//preceding::node()"%_id,explicitNss={'html':'http://www.w3.org/1999/xhtml','mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}): + if getattr(node,'tagName',"")=="mpiwg:pb": + k+=1 + else: + k=len(idpb) + #pn=k-1 #-1 wegen Seitenzahlzaehlung startet mit 0 + pn=k-1 #-1 wegen Seitenzahlzaehlung startet mit 0 if pn > len(pbs): if REQUEST: return "Sorry, pagenumber %s does not exit"%(pn+1) @@ -366,7 +577,7 @@ class ECHO_xslt(ECHO_pageTemplate,ECHO_l if parent: parent[0].removeChild(node) except: - zLOG.LOG("ECHO_Resource (getAccessRightMD)", zLOG.INFO,"%s (%s)"%sys.exc_info()[0:2]) + logger("ECHO_Resource (getAccessRightMD)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2]) strio = StringIO() PrettyPrint(dom,strio) xmlstr = strio.getvalue() @@ -424,4 +635,3 @@ def manage_addECHO_xslt(self, id, label, u = "%s/%s" % (u, quote(id)) REQUEST.RESPONSE.redirect(u+'/manage_main') return '' -