Annotation of ECHO_content/ECHO_xslt.py, revision 1.22
1.1 dwinter 1: ### XSLT Class ###
2: ### setzt 4 suite vorraus ###
1.4 dwinter 3: from Acquisition import Implicit
1.1 dwinter 4: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
5: from Globals import DTMLFile
6: from ECHO_Nav import ECHO_pageTemplate
1.2 dwinter 7: from threading import Thread,Timer
1.4 dwinter 8: import threading
9: from ECHO_helpers import *
1.15 dwinter 10: try:
11: from ECHO_language import *
12: except:
13: print "no echo language"
14: class ECHO_language:
15: """leere Klasse"""
16: pass
1.4 dwinter 17: import sys
18: import urllib
19: import urlparse
1.6 dwinter 20: from Ft.Xml.Domlette import Print, PrettyPrint
1.7 dwinter 21: from StringIO import StringIO
1.9 dwinter 22: from types import *
1.1 dwinter 23: from Globals import package_home
1.9 dwinter 24: import transaction
1.4 dwinter 25:
1.1 dwinter 26: import os.path
27:
1.14 dwinter 28: import urllib,cgi
1.20 dwinter 29: import logging
1.1 dwinter 30: try:
31: from Ft.Xml.Xslt.Processor import Processor
1.6 dwinter 32: from Ft.Xml import InputSource, EMPTY_NAMESPACE,Parse
1.10 dwinter 33: from Ft.Xml.Domlette import NonvalidatingReader
1.1 dwinter 34: except:
35: print "4suite has to be installed"
36:
37:
1.20 dwinter 38: class getXML(Thread):
1.2 dwinter 39: """get XML thread"""
40:
1.4 dwinter 41: def set(self,qs,xsl,result):
42: """set"""
43:
44: self._v_qs=qs
1.2 dwinter 45: self.xsl=xsl
1.4 dwinter 46: self.result=None
1.2 dwinter 47:
1.4 dwinter 48: # def acquireLock(self):
49: #
50: # lock=getattr(self, "_v_lock", None)
51: # if not lock:
52: # self._v_lock=threading.Lock()
53: # lock=self._v_lock
54: # lock.acquire()
55: #
56: # def releaseLock(self):
57: # # acquire() should have been called
58: # # about one second before. This means the volatile lock
59: # # should still be there
60: #
61: # self._v_lock.release()
62: #
63:
1.2 dwinter 64: def __call__(self):
1.4 dwinter 65: """wait"""
1.20 dwinter 66: self.run()
1.4 dwinter 67: return True
68:
69: def run(self):
70: """call it"""
71: xml=""
1.9 dwinter 72:
1.3 dwinter 73: try:
1.5 dwinter 74:
1.20 dwinter 75: #urlH=urllib.urlopen(self._v_qs)
76: #xml=urlH.read()
77: #urlH.close()
1.4 dwinter 78: xsltproc=Processor()
1.20 dwinter 79: logging.debug("start XML")
80: document = InputSource.DefaultFactory.fromUri(self._v_qs)
1.4 dwinter 81:
82: stylesheet = InputSource.DefaultFactory.fromUri(self.xsl)
1.20 dwinter 83: logging.debug("got all files XML")
1.4 dwinter 84: xsltproc.appendStylesheet(stylesheet)
1.20 dwinter 85: logging.debug("got all files do the transform")
1.4 dwinter 86:
87: #print self.xsl
1.13 dwinter 88: #< xsltproc.run(document)
1.4 dwinter 89: tmp=xsltproc.run(document)
90:
91: self.result=tmp[0:]
92:
1.3 dwinter 93:
94: except:
1.4 dwinter 95:
96: self.result="<html>error: %s %s<br>"%sys.exc_info()[0:2]
97: self.result+=xml
98: self.result+="</html>"
1.2 dwinter 99:
1.4 dwinter 100:
101:
1.2 dwinter 102: def getResult(self):
1.4 dwinter 103:
1.2 dwinter 104: return self.result
105:
1.9 dwinter 106: from ZODB import DB
107: from ZODB.FileStorage import FileStorage
108: class ECHO_cache:
109: def __init__(self):
110: """init the storage"""
1.22 ! dwinter 111:
1.21 dwinter 112: try:
113: self.storage=FileStorage(os.path.join(INSTANCE_HOME,"var/echo_cache.fs"))
114:
115: self.db=DB(self.storage)
116: self.connection=self.db.open()
117: self.root=self.connection.root()
118: except:
119: pass
1.22 ! dwinter 120:
1.9 dwinter 121: def deleteObject(self,name,pn=None):
122: """delete an object from cache"""
123: fileStore=self.root.get(name,None)
124: if fileStore:
125: if not pn:
126: del(self.root[name])
127: else:
128: if self.root[name].get(pn,None):
129: del(self.root[name][pn])
130:
131:
132: def storeObject(self,name,pn,object):
133: """store an object"""
134:
135: if not self.root.get(name,None):
136: self.root[name]={}
137:
138:
139: #following is necessary to make clear that object has really changed for ZODB
140: tmp=self.root[name]
141: tmp[pn]=object
142: self.root[name]=tmp
143: transaction.get().commit()
144: return True
145:
146: def retrieveObject(self,name,pn):
147: """retrieve it"""
148:
149: fileStore=self.root.get(name,None)
150: if not fileStore:
151: return None
152: else:
1.12 dwinter 153:
1.9 dwinter 154: return self.root[name].get(pn,None)
155:
1.2 dwinter 156:
1.15 dwinter 157: class ECHO_xslt(ECHO_pageTemplate,ECHO_language):
1.1 dwinter 158: """ECHO_xslt classe"""
159:
160: meta_type="ECHO_xslt"
1.4 dwinter 161:
1.10 dwinter 162: cache=ECHO_cache() # cache for analysed pages
163: caching="yes"
164:
165: appendQueryString=True # add query string to the cgiUrl can be changed with addChanges
166:
167: passURL=False #use url from querystring parameter fn to retrieve the text and not the url in cgi-url can be changed with addChanges
168:
1.9 dwinter 169:
1.4 dwinter 170: results={}
171: manage_options=ECHO_pageTemplate.manage_options+(
172: {'label':'Change xml-ressource','action':'change_ECHO_xsltForm'},)
173:
1.2 dwinter 174: def refreshTxt(self):
175: """txt fuer refresh"""
176: return """ 2;url=%s?repeat=%s """%(self.absolute_url(),self.threadName)
1.1 dwinter 177:
178: def xslt(self):
1.2 dwinter 179: """xslt"""
1.1 dwinter 180:
1.4 dwinter 181: return self.document_src()
1.1 dwinter 182:
1.4 dwinter 183: def change_ECHO_xsltForm(self):
184: """change form"""
185: pt=zptFile(self, 'zpt/ChangeECHO_xsltForm.zpt')
186: return pt()
187:
1.10 dwinter 188: def addChanges(self,cgiUrl,appendQueryString=False,passURL=False,caching=False,RESPONSE=None):
1.4 dwinter 189: """change the xslt, ueberschriebt addChanges in ECHO_PageTemplate"""
1.10 dwinter 190:
1.4 dwinter 191: if urlparse.urlparse(cgiUrl)[0]=="":#relative url in absolute
192: self.cgiUrl=urlparse.urljoin(self.absolute_url(), cgiUrl)
193: else:
194: self.cgiUrl=cgiUrl
1.10 dwinter 195:
196: if appendQueryString:
197: self.appendQueryString=True
198: else:
199: self.appendQueryString=False
200:
201: if passURL:
202: self.passURL=True
203: else:
204: self.passURL=False
205:
206: if caching:
207: self.caching="yes"
208: else:
209: self.caching="No"
210:
211:
1.4 dwinter 212: if RESPONSE:
213: RESPONSE.redirect("manage_main")
1.2 dwinter 214:
215: def index_html(self,repeat=None):
216: """standard ausgabe"""
1.4 dwinter 217:
1.2 dwinter 218: threadName=repeat
219:
220: if not threadName or threadName=="":
1.4 dwinter 221:
222: #abwaertskompatibilitt mit altem nivht konfigurierbaren prototypen
223:
224: if getattr(self,'cgiUrl','')=='':
225: self.cgiUrl="http://medea.mpiwg-berlin.mpg.de/cgi-bin/search/q1"
226:
1.6 dwinter 227: qs="%s%s"%(self.cgiUrl,self.REQUEST['QUERY_STRING'])
1.2 dwinter 228: xsl=self.absolute_url()+"/xslt"
1.20 dwinter 229: #self._v_xmltrans=getXML().__of__(self)
230: self._v_xmltrans=getXML()
1.4 dwinter 231: #self._xmltrans.start()
1.20 dwinter 232: #thread=Thread(target=self._v_xmltrans)
233: #thread.start()
234: logging.debug("Thread prepared")
1.4 dwinter 235: self._v_xmltrans.set(qs,xsl,None)
1.20 dwinter 236: self._v_xmltrans.start()
237: logging.debug("Thread started")
1.2 dwinter 238:
1.20 dwinter 239: #self.threadName=thread.getName()[0:]
240: self.threadName=self._v_xmltrans.getName()[0:]
1.2 dwinter 241: wait_template=self.aq_parent.ZopeFind(self.aq_parent,obj_ids=['wait_template'])
242: if wait_template:
243: return wait_template[0][1]()
244: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','xsltWait.zpt')).__of__(self)
245: return pt()
1.4 dwinter 246: #_v_xmltrans.run()
1.2 dwinter 247:
248: else:
249:
1.4 dwinter 250: if (self._v_xmltrans.getResult()==None):
1.3 dwinter 251:
1.5 dwinter 252: wait_template=self.aq_parent.ZopeFind(self.aq_parent,obj_ids=['wait_template'])
253: if wait_template:
254: return wait_template[0][1]()
1.2 dwinter 255:
256: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','xsltWait.zpt')).__of__(self)
257: return pt()
258: else:
1.4 dwinter 259: return self._v_xmltrans.getResult()
1.2 dwinter 260:
261:
1.10 dwinter 262: def getText(self):
263: """print nur den text"""
264: qs,baseUri=self.getTextInput()
265: self.REQUEST.RESPONSE.redirect(qs)
1.9 dwinter 266:
267: def deleteCache(self):
268: """deletefrom cache"""
269: fn=self.REQUEST['fn']
270: self.cache.deleteObject(fn)
271:
1.13 dwinter 272:
273: def createLinkNode(self,url,dom):
274: """createa a link node"""
275: txt=dom.createTextNode("<XMLLink>")
276: node=dom.createElementNS("http://test.de","a")
277: node.setAttributeNS("http://test.de","href",url)
278: node.appendChild(txt)
279: return node
280:
281: def forwardLink(self,linkid,url,type="target",RESPONSE=None):
282: """forward to link"""
283: if RESPONSE:
284: RESPONSE.redirect(self.getLink(linkid,url,type=type))
285:
286: else:
287: return self.getLink(linkid,url,type=type)
288: def getLink(self,linkid,url,type="target"):
289: """get target for linkid"""
290: dom=NonvalidatingReader.parseUri(url)
291:
292: masterurl=dom.xpath("//mpiwg:masterurl/@ref",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
293: slaveurl=dom.xpath("//mpiwg:slaveurl/@ref",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
294:
295: #check now if there are in the link file
296:
297: xp="//mpiwg:link[@id='%s']"%linkid
298:
299: if type=="target":
300: for link in dom.xpath(xp,explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}):
301: fn=link.xpath("mpiwg:target/@filename",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
1.14 dwinter 302:
303: if urlparse.urlparse(urllib.unquote(fn))[0]=="http": # fn ist eine url
304: return urllib.unquote(fn) # dann gibt diese zurueck
305:
1.13 dwinter 306: ref=link.xpath("mpiwg:target/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
307:
308: ref2=link.xpath("mpiwg:target/mpiwg:pagelink/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
309: selectionNodeIndex=link.xpath("mpiwg:target/mpiwg:pagelink/@selectionNodeIndex",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
1.14 dwinter 310:
311:
312:
313: lstr=slaveurl+'fn='+fn+'&_id='+ref+'&_pagelink=%s///%s/%s/%s'%(ref2,selectionNodeIndex,linkid,'target')
314: lstr+="&_links="+urllib.quote(url)
1.13 dwinter 315:
316: else:
317: for link in dom.xpath(xp,explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}):
318: fn=link.xpath("mpiwg:source/@filename",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
1.14 dwinter 319: if urlparse.urlparse(urllib.unquote(fn))[0]=="http": # fn ist eine url
320: return urllib.unquote(fn) # dann gibt diese zurueck
321:
1.13 dwinter 322: ref=link.xpath("mpiwg:source/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
323:
324: ref2=link.xpath("mpiwg:source/mpiwg:pagelink/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
325: selectionNodeIndex=link.xpath("mpiwg:source/mpiwg:pagelink/@selectionNodeIndex",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
326:
1.14 dwinter 327: lstr=masterurl+'fn='+fn+'&_id='+ref+'&_pagelink=%s///%s/%s/%s'%(ref2,selectionNodeIndex,linkid,'source')
328: lstr+="&_links="+urllib.quote(url)
1.13 dwinter 329: return lstr
330:
1.14 dwinter 331: def addLinksUrl(self,txt,url):
332: """add reference to links to url"""
333: ret=[]
334: dom=NonvalidatingReader.parseUri(url)
335: textDom=NonvalidatingReader.parseString(txt)
336:
337: #find ids in txt
338: ids=textDom.xpath("//*[@id]")
339:
340: for textid in ids:
341: xp="//mpiwg:link[mpiwg:source/@refid='%s']"%textid.xpath("@id")[0].value
342: for link in dom.xpath(xp,explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}):
343: ref2=link.xpath("mpiwg:source/mpiwg:pagelink/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
344: selectionNodeIndex=link.xpath("mpiwg:source/mpiwg:pagelink/@selectionNodeIndex",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
345: linkid=link.xpath("@id")[0].value
346: ret.append('%s///%s/%s/%s'%(ref2,selectionNodeIndex,linkid,'source'))
347:
348: xp="//mpiwg:link[mpiwg:target/@refid='%s']"%textid.xpath("@id")[0].value
349: for link in dom.xpath(xp,explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}):
350: ref2=link.xpath("mpiwg:target/mpiwg:pagelink/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
351: selectionNodeIndex=link.xpath("mpiwg:target/mpiwg:pagelink/@selectionNodeIndex",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
352: linkid=link.xpath("@id")[0].value
353: ret.append('%s///%s/%s/%s'%(ref2,selectionNodeIndex,linkid,'target'))
354:
355:
356: return ret
357:
1.13 dwinter 358: def addLinks(self,txt,url="http://127.0.0.1:8080/HFQP/linkCreator/getCollectionXML?collection=commentary2"):
359: """add links to a page from xml linkfile"""
360:
361: dom=NonvalidatingReader.parseUri(url)
362: textDom=NonvalidatingReader.parseString(txt)
363:
364: #find ids in txt
365: ids=textDom.xpath("//*[@id]")
366: masterurl=dom.xpath("//mpiwg:masterurl/@ref",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
367: slaveurl=dom.xpath("//mpiwg:slaveurl/@ref",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
368:
369: #check now if there are in the link file
370: for textid in ids:
371: xp="//mpiwg:link[mpiwg:source/@refid='%s']"%textid.xpath("@id")[0].value
372: for link in dom.xpath(xp,explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}):
373: fn=link.xpath("mpiwg:target/@filename",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
1.14 dwinter 374: print fn
375: if urlparse.urlparse(urllib.unquote(fn))[0]=="http": # fn ist eine url
376: lstr=urllib.unquote(fn) # dann gibt diese zurueck
377: else:
378: try:
379: ref=link.xpath("mpiwg:target/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
380:
381: ref2=link.xpath("mpiwg:target/mpiwg:pagelink/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
382: selectionNodeIndex=link.xpath("mpiwg:target/mpiwg:pagelink/@selectionNodeIndex",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
383: linkid=link.xpath("@id")[0].value
384: lstr=slaveurl+'fn='+fn+'&_id='+ref+'&_pagelink=%s///%s/%s/%s'%(ref2,selectionNodeIndex,linkid,'target')
385: lstr+="&_links="+urllib.quote(url)
386: except:
387: lstr=""
1.13 dwinter 388: node=self.createLinkNode(lstr,textDom)
389: textid.parentNode.insertBefore(node,textid)
390:
391:
392: xp="//mpiwg:link[mpiwg:target/@refid='%s']"%textid.xpath("@id")[0].value
393: for link in dom.xpath(xp,explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}):
394: fn=link.xpath("mpiwg:source/@filename",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
1.14 dwinter 395: if urlparse.urlparse(urllib.unquote(fn))[0]=="http": # fn ist eine url
396: lstr=urllib.unquote(fn) # dann gibt diese zurueck
397: else:
1.13 dwinter 398:
1.14 dwinter 399: ref=link.xpath("mpiwg:source/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
400:
401: ref2=link.xpath("mpiwg:source/mpiwg:pagelink/@refid",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
402: selectionNodeIndex=link.xpath("mpiwg:source/mpiwg:pagelink/@selectionNodeIndex",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})[0].value
403: linkid=link.xpath("@id")[0].value
404: lstr=masterurl+'fn='+fn+'&_id='+ref+'&_pagelink=%s///%s/%s/%s'%(ref2,selectionNodeIndex,linkid,"source")
405: lstr+="&_links="+urllib.quote(url)
406:
1.13 dwinter 407: node=self.createLinkNode(lstr,textDom)
408: textid.parentNode.insertBefore(node,textid)
409:
410:
411:
412: strio = StringIO()
413: PrettyPrint(textDom,strio)
414: xmlstr = strio.getvalue()
415:
416: return xmlstr
417:
418:
419:
1.18 dwinter 420: def getPageLex(self,_pn="1",_id=None,_caching=None,_links=None,_showall="no",_displaylinks="yes"):
1.9 dwinter 421: """getpage mit lexikalischer analyse und xslt transform
422: if _caching=yes dann wird die lwxikalisch analysierte seite in einem cache abgespeichert
423: """
1.14 dwinter 424: def encode(hash):
425: ret=[]
426: for x in hash.keys():
427: value=hash[x]
428:
429: if type(value) is ListType:
430: for z in value:
431: ret.append("%s=%s"%(x,z))
432: else:
433: ret.append("%s=%s"%(x,value))
434: return "&".join(ret)
435:
436:
437:
1.10 dwinter 438: if not _caching:
439: _caching=self.caching
440:
1.9 dwinter 441: fn=self.REQUEST['fn']
442:
1.11 dwinter 443: if not _id:
1.12 dwinter 444:
1.11 dwinter 445: fromCache=self.cache.retrieveObject(fn,_pn)
446:
447: if fromCache and _caching=="yes":
448:
449: txt = fromCache
450: else:
451: txt=self.tagLex(nr=_pn)
1.13 dwinter 452:
1.11 dwinter 453: self.cache.storeObject(fn,_pn,txt[0:])
454:
1.9 dwinter 455: else:
1.11 dwinter 456: txt=self.tagLex(id=_id)
1.13 dwinter 457:
1.14 dwinter 458: if _showall=="yes":
459: params=cgi.parse_qs(self.REQUEST['QUERY_STRING'])
460:
461: params['_pagelink']=self.addLinksUrl(txt,url=_links)
462: params['_showall']='no'
463:
464: print self.absolute_url()+"?"+encode(params)
465: self.REQUEST.RESPONSE.redirect(self.absolute_url()+"/getPageLex?"+encode(params))
466:
467:
1.9 dwinter 468: xsl=self.xslt()
469:
470: xsltproc=Processor()
471: if type(txt)==UnicodeType:
472: document = InputSource.DefaultFactory.fromString(txt.encode('utf-8'))
473: else:
474: document = InputSource.DefaultFactory.fromString(txt)
475: stylesheet = InputSource.DefaultFactory.fromString(xsl)
476: xsltproc.appendStylesheet(stylesheet)
477: tmp=xsltproc.run(document)
1.13 dwinter 478:
1.18 dwinter 479: if _links and (_displaylinks=='yes'):
1.13 dwinter 480: _links=urllib.unquote(_links)
481: tmp=self.addLinks(tmp,url=_links)
482:
1.11 dwinter 483: #bugfix for digilib images which doesn't accept &
484: tmp=tmp.replace("&","&")
1.12 dwinter 485:
486:
1.9 dwinter 487: return tmp[0:]
488:
1.10 dwinter 489: def getTextInput(self):
490: """get the text
491: wie der text geholt wird liegt an der konfiguration,
492: is appendQueryString gesetzt, dann wir jeweils der Querystring an vorgebenen url gesetzt, erwartet wird fn=
493: fr den Pfad, is passURL gesetzt, dann wird falls fn= eine vollstndige url enthlt, diese anstelle der in cgiurl definierten genommen.
494: """
495:
496: if getattr(self,'passURL',False) and self.REQUEST.has_key('fn') and (urlparse.urlparse(self.REQUEST['fn'])[0]=='http'):
497: qs=self.REQUEST['fn']
498: baseUri=qs
499: elif getattr(self,'pappendQueryString',True):
500: qs="%s%s"%(self.cgiUrl,self.REQUEST['QUERY_STRING'])
501: baseUri=self.cgiUrl
502: else:
503: qs="%s"%(self.cgiUrl)
504: baseUri=self.cgiUrl
505:
506: #fact= InputSource.DefaultFactory.fromUri(qs)
507: return qs,baseUri
508: #return InputSource.InputSource(fact)
509: #xmlt=urllib.urlopen(qs).read()
510:
1.11 dwinter 511: def getPage(self,_pn="-1",_id=None,REQUEST=None,_caching=None):
1.6 dwinter 512: """get a page from an xml"""
1.10 dwinter 513:
514: if not _caching:
515: _caching=self.caching
516:
1.7 dwinter 517: pn=int(_pn)-1
1.11 dwinter 518: if pn<0 and (not _id):
1.7 dwinter 519: if REQUEST:
520: return "Sorry, pagenumbers have to be greater than 0"
521: else:
522: return None
1.10 dwinter 523:
524: xmlt,self.baseUri=self.getTextInput()
1.9 dwinter 525:
1.10 dwinter 526: #get the text from cache, if existing
1.18 dwinter 527: try:
528: fromCache=self.cache.retrieveObject(self.baseUri,"-1")
529: except:
530: fromCache=None
1.10 dwinter 531: if fromCache and _caching=="yes":
532:
533: txt = fromCache
534: else:
535:
536: txt=urllib.urlopen(xmlt).read()
537:
538: self.cache.storeObject(self.baseUri,"-1",txt)
1.9 dwinter 539:
1.16 dwinter 540:
1.10 dwinter 541: dom=NonvalidatingReader.parseString(txt,self.baseUri)
1.9 dwinter 542:
1.10 dwinter 543: #pb should have a namespache
1.11 dwinter 544:
545: pbs=dom.xpath("//mpiwg:pb",explicitNss={'mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})
1.9 dwinter 546:
1.10 dwinter 547: if len(pbs)==0: # versuche nochmal ohne
548: pbs=dom.xpath("//pb")
549:
1.11 dwinter 550: if _id:
551: #suche wieviele pb for der id
552:
553:
1.16 dwinter 554: idpb=dom.xpath("//*[@id='%s']/preceding::node()/mpiwg:pb"%_id,explicitNss={'html':'http://www.w3.org/1999/xhtml','mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'})
1.11 dwinter 555: if len(idpb)==0:
556: idpb=dom.xpath("//*[@id='%s']/preceding::node()/pb"%_id)
557:
558: if len(idpb)==0:
559: k=0
1.16 dwinter 560: for node in dom.xpath("//*[@id='%s']//preceding::node()"%_id,explicitNss={'html':'http://www.w3.org/1999/xhtml','mpiwg':'http://www.mpiwg-berlin.mpg.de/namespace'}):
1.11 dwinter 561: if getattr(node,'tagName',"")=="mpiwg:pb":
562: k+=1
563: else:
564: k=len(idpb)
1.16 dwinter 565: #pn=k-1 #-1 wegen Seitenzahlzaehlung startet mit 0
1.18 dwinter 566: pn=k-1 #-1 wegen Seitenzahlzaehlung startet mit 0
1.7 dwinter 567: if pn > len(pbs):
568: if REQUEST:
1.10 dwinter 569: return "Sorry, pagenumber %s does not exit"%(pn+1)
1.7 dwinter 570: else:
571: return None
572:
1.6 dwinter 573: beginNode=pbs[pn] #take the n'th pb
1.7 dwinter 574:
575: if not (pn==len(pbs)-1): # nicht die letzte Seite
576: endNode=pbs[pn+1]
577: else:
578: endNode=None
579:
580: deleteNodes=beginNode.xpath('preceding::node()')
581: if endNode:
582: deleteNodes+=endNode.xpath('following::node()')
583: for node in deleteNodes:
584: try:
585: parent=node.xpath("..")
586:
587: if parent:
588: parent[0].removeChild(node)
589: except:
1.19 dwinter 590: logger("ECHO_Resource (getAccessRightMD)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2])
1.7 dwinter 591: strio = StringIO()
592: PrettyPrint(dom,strio)
593: xmlstr = strio.getvalue()
1.10 dwinter 594:
1.7 dwinter 595: return xmlstr
1.6 dwinter 596:
597:
598:
1.1 dwinter 599: def manage_addECHO_xsltForm(self):
600: """Form for adding"""
601: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','AddECHO_xslt.zpt')).__of__(self)
602: return pt()
603:
604: from urllib import quote
605:
606:
1.4 dwinter 607: def manage_addECHO_xslt(self, id, label, weight= 0,contentType=0,title=None, text=None, cgiUrl=None,
1.1 dwinter 608: REQUEST=None, submit=None):
609: "Add a Page Template with optional file content."
610:
611:
612: id = str(id)
613: if REQUEST is None:
614: self._setObject(id, ECHO_xslt(id, text))
615: ob = getattr(self, id)
616: setattr(ob,'weight',weight)
617: setattr(ob,'label',label)
618: setattr(ob,'contentType',contentType)
619: if title:
620: ob.pt_setTitle(title)
621: return ob
1.4 dwinter 622: setattr(ob,'cgiUrl',cgiUrl)
1.1 dwinter 623: else:
624: file = REQUEST.form.get('file')
625: headers = getattr(file, 'headers', None)
626: if headers is None or not file.filename:
627: zpt = ECHO_xslt(id)
628: else:
629: zpt = ECHO_xslt(id, file, headers.get('contentType'))
630:
631: self._setObject(id, zpt)
632: ob = getattr(self, id)
633: setattr(ob,'weight',weight)
634: setattr(ob,'label',label)
1.4 dwinter 635: setattr(ob,'cgiUrl',cgiUrl)
1.1 dwinter 636: if title:
637: ob.pt_setTitle(title)
638:
639: try:
640: u = self.DestinationURL()
641: except AttributeError:
642: u = REQUEST['URL1']
643:
644: if submit == " Add and Edit ":
645: u = "%s/%s" % (u, quote(id))
646: REQUEST.RESPONSE.redirect(u+'/manage_main')
647: return ''
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>