# HG changeset patch
# User dwinter
# Date 1319398406 -7200
# Node ID 0ed5ecf366932370a3ebce9344c28281e6b2a770
# Parent e8640aa396cd8c99147382c502f639ed2b856d7c
history managememt added
diff -r e8640aa396cd -r 0ed5ecf36693 JSONClient.py
--- a/JSONClient.py Thu Oct 13 10:51:54 2011 +0200
+++ b/JSONClient.py Sun Oct 23 21:33:26 2011 +0200
@@ -11,19 +11,51 @@
class JSONClient(SimpleItem):
meta_type="JSONClient"
+ manage_options=SimpleItem.manage_options+(
+ {'label':'Main Config','action':'changeJSONClientConfigPT'},
+ )
+
+
+
+ _v_jsonCache={}
+
def __init__(self,id,url):
"""init"""
self.id=id
self.url=url
- def json(self,method,params={}):
+ def changeJSONClientConfigPT(self):
+ """change form"""
+ pt=zptFile(self, 'zpt/changeJSONClientForm.zpt')
+ pt.content_type="text/html"
+ return pt()
+
+
+ def changeJSONClientConfig(self,url,RESPONSE=None):
+ self.url=url
+ if RESPONSE is not None:
+ RESPONSE.redirect('manage_main')
+
+
+ def getEntityLink(self,entId):
+ return "http://neruda.mpiwg-berlin.mpg.de:8080/om4-ismi/browse/entityDetails.iface?eid="+str(entId)
+
+ def json(self,method,params={},cache=True):
"""json aufruf"""
+
paramString=urllib.urlencode(params)
callUrl = self.url%(method,paramString)
- txt=SrvTxtUtils.getHttpData(callUrl)
- logging.debug(txt)
- obj= json.loads(txt)
+
+ if (cache is True) and (self._v_jsonCache.get(callUrl,None)!=None):
+ return self._v_jsonCache.get(callUrl)
+ else:
+
+ txt=SrvTxtUtils.getHttpData(callUrl)
+ logging.debug(txt)
+ obj= json.loads(txt)
+
+ self._v_jsonCache[callUrl]=obj
return obj
@@ -65,7 +97,123 @@
logging.debug("++++++++++++++++++++++++++++++")
logging.debug(rels)
return ret
+
+ def getPubmanOutput(self,url="http://pubman.mpiwg-berlin.mpg.de/search/SearchAndExport?cqlQuery=escidoc.context.objid=escidoc:79281&exportFormat=APA&outputFormat=html_linked&sortKeys=&sortOrder=ascending&startRecord=1&maximumRecords=50",cache=True):
+ """getPubmanOutput"""
+
+ callUrl = url
+ if (cache is True) and (self._v_jsonCache.get(callUrl,None)!=None):
+ return self._v_jsonCache.get(callUrl)
+ else:
+
+ txt=SrvTxtUtils.getHttpData(callUrl)
+ logging.debug(txt)
+ #obj= json.loads(txt)
+
+ self._v_jsonCache[callUrl]=txt
+ return txt
+
+ def activateUrl(self,value):
+ #wenn ein wert eine url ist, dass mache daraus einen entsprechende a link.
+ if value.startswith("http://") or value.startswith("https://"):
+ return """%s"""%(value,value)
+ else:
+ return value
+
+
+ def getISMIIDfromBiographie(self,name):
+ #nimmt den Filenamen und gibt die ISMI Personen ID zurueck diese liegt auf dem Server in einer Datei bio2ismi.txt
+ data = str(getattr(self.biographies,'bio2ismi.txt'))
+ logging.debug(data)
+ for line in data.split("\n"):
+ splitted= line.split("\t")
+ filename=splitted[1].replace(r"\w","_")
+ filename=filename.replace(r"%\d+","_")
+ logging.debug(line)
+ if (len(splitted)> 1) and (name in filename):
+ return splitted[0]
+ return ""
+
+ def collectURL(self,url,name,REQUEST):
+ history=REQUEST.SESSION.get('history',None);
+ if history is None:
+ history=[]
+
+
+ if len(history)>0:#wenn der letzte Eintrag gleich dem jetzigen ist, dann nicht eintragen
+ urlStored,nameStored=history[-1]
+ if urlStored==url:
+ return self.createBreadcrumb(REQUEST);
+
+
+ history.append((url,name))
+ REQUEST.SESSION.set('history',history);
+ return self.createBreadcrumb(REQUEST);
+
+ def collectTriple(self,s,p,o,inverse=False,REQUEST=None,RESPONSE=None):
+ """collect a triple and redirect"""
+
+ history=REQUEST.SESSION.get('triple',None);
+ if history is None:
+ history=[]
+
+
+
+ if not (s,p,o) in history:
+
+ history.append((s,p,o))
+ REQUEST.SESSION.set('triple',history);
+
+ if inverse:
+ RESPONSE.redirect(s)
+ else:
+ RESPONSE.redirect(o)
+
+ def cleanHistoryAndTriple(self,REQUEST):
+ """clean history"""
+ REQUEST.SESSION.set('triple',[]);
+ REQUEST.SESSION.set('history',[]);
+ REQUEST.response.redirect(REQUEST['HTTP_REFERER'])
+
+ def getTripleAsRDF(self,REQUEST):
+ """show the triple as RDF"""
+ ret="""