50
|
1 #
|
|
2 # removed methods
|
|
3 #
|
|
4 class MPIWGProjects_notused:
|
|
5
|
|
6 def decode(self, str):
|
|
7 """return unicode object"""
|
|
8 return unicodify(str)
|
|
9
|
|
10 def isCheckField(self, fieldname):
|
|
11 """return chechfield"""
|
|
12 return (fieldname in checkFields)
|
|
13
|
|
14 def sortedByPlace(self, metatype):
|
|
15 """find metatype and sort by place"""
|
|
16 def sort(x, y):
|
|
17 return cmp(getattr(x[1], 'place', 0), getattr(y[1], 'place', 0))
|
|
18
|
|
19 logging.debug("MPIWGProjects begin: sorted by place: " + metatype)
|
|
20 founds = self.ZopeFind(self, obj_metatypes=[metatype]);
|
|
21
|
|
22 founds.sort(sort)
|
|
23 logging.debug("MPIWGProjects end: sorted by place: " + metatype)
|
|
24 return founds
|
|
25
|
|
26
|
|
27 def harvest_page(self, context=None, mode="normal"):
|
|
28 """seite fuer harvesting fuer die Projektsuche"""
|
|
29
|
|
30 if not context:
|
|
31 context = self
|
|
32
|
|
33 if self.isActiveProject() and self.isCurrentVersion():
|
|
34 templates = self.en.getHarvestCache()
|
|
35
|
|
36 ext = getattr(self, "harvest_main", None)
|
|
37 if ext:
|
|
38 rendered = getattr(self, ext.getId())()
|
|
39 templates[self.absolute_url()] = rendered
|
|
40 transaction.commit()
|
|
41 return rendered
|
|
42
|
|
43
|
|
44 pt = PageTemplateFile(os.path.join(package_home(globals()), 'zpt', 'harvest_main')).__of__(context)
|
|
45
|
|
46 rendered = pt()
|
|
47 templates[self.absolute_url()] = rendered
|
|
48 transaction.commit()
|
|
49 return rendered
|
|
50
|
|
51
|
|
52
|
|
53 def index_html_old(self, request=True, context=None):
|
|
54 """show homepage"""
|
|
55
|
|
56 bound_names = {}
|
|
57
|
|
58 if not context:
|
|
59 context = self
|
|
60 if request:
|
|
61 if self.REQUEST.has_key('date') and self.REQUEST.SESSION.get('MPI_redirected', None) == None:
|
|
62 self.REQUEST.SESSION['MPI_redirected'] = 1
|
|
63 self.REQUEST.RESPONSE.redirect(self.checkDate(self.REQUEST['date']) + "?date=" + self.REQUEST['date'])
|
|
64 else:
|
|
65 self.REQUEST.SESSION['MPI_redirected'] = None
|
|
66
|
|
67 # ext=self.ZopeFind(self.aq_parent,obj_ids=["project_main"])
|
|
68
|
|
69
|
|
70 request2 = getattr(self, 'REQUEST', None)
|
|
71
|
|
72 if request2 is not None:
|
|
73 response = request2.response
|
|
74 if not response.headers.has_key('content-type'):
|
|
75 response.setHeader('content-type', 'text/html')
|
|
76
|
|
77 security = getSecurityManager()
|
|
78 bound_names['user'] = security.getUser()
|
|
79
|
|
80 # Retrieve the value from the cache.
|
|
81 keyset = None
|
|
82 if self.ZCacheable_isCachingEnabled():
|
|
83
|
|
84 # Prepare a cache key.
|
|
85 keyset = {'here': self, 'params':request2['QUERY_STRING']}
|
|
86
|
|
87 result = self.ZCacheable_get(keywords=keyset)
|
|
88
|
|
89 if result is not None:
|
|
90 # Got a cached value.
|
|
91 return result
|
|
92
|
|
93 pt = getTemplate(self, "project_main")
|
|
94 # Execute the template in a new security context.
|
|
95 security.addContext(self)
|
|
96
|
|
97 try:
|
|
98 # logging.debug("index_html pt=%s"%repr(pt))
|
|
99 result = pt.pt_render(extra_context=bound_names)
|
|
100 # logging.debug("index_html result=%s"%repr(result))
|
|
101 if keyset is not None:
|
|
102 # Store the result in the cache.
|
|
103 self.ZCacheable_set(result, keywords=keyset)
|
|
104
|
|
105 return result
|
|
106 finally:
|
|
107 security.removeContext(self)
|
|
108
|
|
109
|
|
110
|
|
111 def index_html_old2(self, request=True, context=None):
|
|
112 """show homepage"""
|
|
113 if not context:
|
|
114 context = self
|
|
115 if request:
|
|
116 if self.REQUEST.has_key('date') and self.REQUEST.SESSION.get('MPI_redirected', None) == None:
|
|
117 self.REQUEST.SESSION['MPI_redirected'] = 1
|
|
118 self.REQUEST.RESPONSE.redirect(self.checkDate(self.REQUEST['date']) + "?date=" + self.REQUEST['date'])
|
|
119 else:
|
|
120 self.REQUEST.SESSION['MPI_redirected'] = None
|
|
121
|
|
122 # ext=self.ZopeFind(self.aq_parent,obj_ids=["project_main"])
|
|
123
|
|
124 ext = getattr(self, "project_main", None)
|
|
125 if ext:
|
|
126 return getattr(self, ext.getId())()
|
|
127
|
|
128 pt = PageTemplateFile(os.path.join(package_home(globals()), 'zpt', 'project_main')).__of__(context)
|
|
129
|
|
130 return pt()
|
|
131
|
|
132
|
|
133 def no_project(self):
|
|
134 """warnung: project noch nicht existent"""
|
|
135 pt = PageTemplateFile(os.path.join(package_home(globals()), 'zpt', 'no_project')).__of__(self)
|
|
136 return pt()
|
|
137
|
|
138
|
|
139 def showImagesOfPage(self, imageUrl=None):
|
|
140 """show Images of project"""
|
|
141 self.getContent('WEB_project_description', filter='yes') # get the content and store image infos into session
|
|
142 pt = PageTemplateFile(os.path.join(package_home(globals()), 'zpt', 'projectImageView.zpt')).__of__(self)
|
|
143 return pt()
|
|
144
|
|
145
|
|
146 def show_html(self):
|
|
147 """simple index"""
|
|
148 # return "HI"
|
|
149 pt = PageTemplateFile(os.path.join(package_home(globals()), 'zpt', 'MPIWGProject_index.zpt')).__of__(self)
|
|
150 return pt()
|
|
151
|
|
152 def getLabel_old(self):
|
|
153 """returns label (or title) of this project"""
|
|
154 l = self.getContent('xdata_07')
|
|
155 if l:
|
|
156 return l
|
|
157 l = self.getContent('WEB_title')
|
|
158 if l:
|
|
159 return l
|
|
160 return self.title
|
|
161
|
|
162 def getPersonKeyList(self):
|
|
163 """gibt die key Liste der beteiligten Personen zurueck (utf8 codiert)"""
|
|
164 # logging.error("getPersonKeyList:%s"%getattr(self,'responsibleScientistsList',[]))
|
|
165 try:
|
|
166 return [utf8ify(x[1]) for x in getattr(self, 'responsibleScientistsList', [])]
|
|
167 except:
|
|
168 return[]
|
|
169
|
|
170
|
|
171 def myCapitalize(self, txt):
|
|
172 """kapitalisiere auch Namen mit -"""
|
|
173 splitted = [x.capitalize() for x in txt.split("-")]
|
|
174 return "-".join(splitted)
|
|
175
|
|
176 def getNamesOrdered(self, list):
|
|
177 """Sortiert die Liste nach der Reihenfolge in xdata_01"""
|
|
178
|
|
179 nameList = self.getContent('xdata_01')
|
|
180 if nameList.find(";") > -1: # rate Trenner ist ;
|
|
181 names = nameList.split(";")
|
|
182 else:
|
|
183 names = nameList.split(",")
|
|
184
|
|
185 self._v_names = []
|
|
186 for name in names:
|
|
187 self._v_names.append(name.rstrip().lstrip())
|
|
188
|
|
189
|
|
190 def sort(x, y):
|
|
191 try:
|
|
192 return cmp(self._v_names.index(x[0]), self._v_names.index(y[0]))
|
|
193 except:
|
|
194 return 0
|
|
195
|
|
196 list.sort(sort)
|
|
197
|
|
198 return list
|
|
199
|
|
200 def getWebProject_description(self):
|
|
201 """get description"""
|
|
202 debug = self.REQUEST.cookies.get("MP_debug_code", None)
|
|
203
|
|
204 if debug and debug == "western":
|
|
205 return """
|
|
206 <html>
|
|
207 <head>
|
|
208 <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
|
209 </head>
|
|
210 <body>%s</body>
|
|
211 </html>
|
|
212 """ % self.WEB_project_description[0]
|
|
213
|
|
214 return """
|
|
215 <html>
|
|
216 <head>
|
|
217 <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
|
218 </head>
|
|
219 <body>%s</body>
|
|
220 </html>
|
|
221 """ % self.getContent('WEB_project_description')
|
|
222
|
|
223
|
|
224
|
|
225 def isChecked(self, wert, list):
|
|
226 """check if wert is in ; seperated list"""
|
|
227
|
|
228 # felder sind manchmnal als liste mit einem element definiert
|
|
229 if type(list) is StringType or UnicodeType:
|
|
230 splitted = list.split(";")
|
|
231 else:
|
|
232 splitted = list[0].split(";")
|
|
233
|
|
234 splitted = [y.rstrip().lstrip() for y in splitted]
|
|
235
|
|
236 for x in splitted:
|
|
237 x = re.sub(r"[^A-z ]", "", x)
|
|
238 if (not x == u'') and x in wert:
|
|
239 return 1
|
|
240 return 0
|
|
241
|
52
|
242
|
|
243 def getRootProject(self):
|
|
244 """returns the root (=top level) project of the current project"""
|
|
245 ct = self.getContexts(parents=self.getContent('xdata_05'))
|
|
246 if len(ct) > 0:
|
|
247 return ct[-1][0]
|
|
248 else:
|
|
249 return self
|
|
250
|
|
251
|
60
|
252 def getContent(self, field, filter=None):
|
|
253 """Inhalt des Feldes"""
|
|
254 # logging.debug("getContent field=%s filter=%s"%(field,filter))
|
|
255
|
|
256 if field == "short_title":
|
|
257 text = self.getContent("xdata_07")
|
|
258 if text == "":
|
|
259 text = self.getContent("WEB_title")
|
|
260 return text
|
52
|
261
|
60
|
262 text = u''
|
|
263
|
|
264 f = getattr(self, field)
|
|
265 if isinstance(f, list):
|
|
266 # compat with old lists
|
|
267 for x in f:
|
|
268 try:
|
|
269 text += x
|
|
270 except:
|
|
271 text = x
|
|
272 else:
|
|
273 text = f
|
|
274
|
|
275 try:
|
|
276 if text[len(text) - 1] == ";":
|
|
277 text = text[0:len(text) - 1]
|
|
278
|
|
279 except:
|
|
280 pass
|
|
281
|
|
282 if text == '': # # wozu die folgenden Zeilen??
|
|
283 text2 = text
|
|
284 else:
|
|
285 text2 = re.sub(r';([^\s])', '; \g<1>', text)
|
|
286
|
|
287 if field == "WEB_project_description": # #Jedenfalls darf letzteres nicht gemacht werden, falls normaler text
|
|
288 text2 = text
|
|
289
|
|
290 # teste ob ergebnis leer und header dann nehme title
|
|
291
|
|
292 if (text2 == '') and (field == 'WEB_project_header'):
|
|
293 return self.getContent('WEB_title')
|
|
294
|
|
295 if filter:
|
|
296 splitted = text2.split("""<p class="picture">""")
|
|
297 if len(splitted) > 1:
|
|
298 tmp = splitted[1].split("</p>")
|
|
299 # return repr(splitted[1])
|
|
300 try:
|
|
301 self.imageURL = tmp[0].split("\"")[1].encode('utf-8')
|
|
302 except:
|
|
303 try:
|
|
304 self.imageURL = tmp[0].split("src=")[1].split(" ")[0].encode('utf-8')
|
|
305 except:
|
|
306 self.imageURL = ""
|
|
307
|
|
308 split2 = "</p>".join(tmp[1:])
|
|
309
|
|
310 text3 = splitted[0] + split2
|
|
311
|
|
312 splitted = text3.split("""<p class="picturetitle">""")
|
|
313 if len(splitted) > 1:
|
|
314 tmp = splitted[1].split("</p>")
|
|
315 self.imagecap = tmp[0].encode('utf-8')
|
|
316
|
|
317 split4 = "".join(tmp[1:])
|
|
318
|
|
319 text5 = splitted[0] + split4
|
|
320 else:
|
|
321 # keine caption
|
|
322 text5 = text3
|
|
323 else:
|
|
324 # kein bild
|
|
325 text5 = text2
|
|
326 else:
|
|
327 text5 = text2
|
|
328
|
|
329 # teste ob WEB_project_description und keine fuehrenden p tags
|
|
330 if (len(text5) > 4) and (not text5[0:3] == '<p>') and (field == 'WEB_project_description'):
|
|
331 text5 = "<p>" + text5 + "</p>"
|
|
332
|
|
333
|
|
334 # filter image
|
|
335
|
|
336 text5 = text5.lstrip().rstrip() # loescher leerzeichen und einzelndes br
|
|
337 if (text5 == "<br>") or (text5 == "<br/>"):
|
|
338 text5 = ""
|
|
339
|
|
340 # logging.debug("getcontent: field=%s filter=%s -> %s"%(field,filter,repr(text5)))
|
|
341 return unicodify(text5)
|
|
342 # return utf8ify(text5) # return as utf-8 byte string
|
|
343
|
|
344
|
|
345
|
61
|
346 def getImageUrls(self, mode="not_cached"):
|
|
347 """get the image urls"""
|
|
348
|
|
349 if (getattr(self, 'link', '') == ''):
|
|
350 return [] # es gibt keinen link
|
|
351
|
|
352 server = xmlrpclib.ServerProxy(self.link)
|
|
353
|
|
354
|
|
355 if(mode == "cached"):
|
|
356 if (hasattr(self, "_v_imageUrls")):
|
|
357 logging.debug("getImageURL cached")
|
|
358 return self._v_imageUrls
|
|
359
|
|
360 try:
|
|
361 urls = server.getImageUrls()
|
|
362 ret = []
|
|
363 for url in urls:
|
|
364 url = os.path.join(self.link, url)
|
|
365 ret.append(url)
|
|
366
|
|
367 except:
|
|
368 self._v_imageUrls = []
|
|
369 return []
|
|
370 self._v_imageUrls = ret[0:]
|
|
371 return ret
|
60
|
372
|
61
|
373
|