Mercurial > hg > MPIWGWeb
comparison MPIWGRoot.py @ 0:bca61e893fcc
first checkin of MPIWGWeb r2 branch from CVS into mercurial
author | casties |
---|---|
date | Thu, 10 Jan 2013 17:52:13 +0100 |
parents | |
children | 1f2760ed3efe |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:bca61e893fcc |
---|---|
1 from Products.PageTemplates.PageTemplateFile import PageTemplateFile | |
2 from Products.PageTemplates.PageTemplate import PageTemplate | |
3 from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate | |
4 from Products.ZSQLExtend.ZSQLExtend import ZSQLExtendFolder | |
5 from Products.ZCatalog.CatalogPathAwareness import CatalogAware | |
6 from OFS.Image import Image | |
7 from Globals import package_home | |
8 import urllib | |
9 import MPIWGStaff | |
10 import string | |
11 import re | |
12 import os | |
13 from types import * | |
14 import logging | |
15 import xmlhelper # Methoden zur Verwaltung der projekt xml | |
16 from OFS.SimpleItem import SimpleItem | |
17 from OFS.Folder import Folder | |
18 from Products.ZSQLMethods.SQL import SQLConnectionIDs | |
19 from AccessControl import ClassSecurityInfo | |
20 from bibliography import * | |
21 import time | |
22 import xml.dom.minidom | |
23 import sys | |
24 import transaction | |
25 | |
26 #from Ft.Xml.XPath import Evaluate | |
27 #from Ft.Xml.XPath.Context import Context | |
28 #from Ft.Xml.Domlette import NonvalidatingReader,PrettyPrint, Print | |
29 #from Ft.Xml import EMPTY_NAMESPACE | |
30 import copy | |
31 import updatePersonalWWW | |
32 import MPIWGStaff | |
33 from MPIWGHelper import * | |
34 from BeautifulSoup import BeautifulSoup, Comment | |
35 from ZODB import FileStorage, DB | |
36 from ZEO import ClientStorage | |
37 | |
38 def sortWeight(x,y): | |
39 x1=int(getattr(x[1],'weight','0')) | |
40 y1=int(getattr(y[1],'weight','0')) | |
41 return cmp(x1,y1) | |
42 | |
43 | |
44 class MPIWGRoot(ZSQLExtendFolder): | |
45 """Stammordner fuer den Web-Server""" | |
46 | |
47 _v_harvestCache=None | |
48 meta_type='MPIWGRoot' | |
49 | |
50 fieldLabels={'WEB_title':'WEB_Title', | |
51 'xdata_01':'Responsible Scientists', | |
52 'xdata_02':'Department', | |
53 'xdata_03':'Historical Persons', | |
54 'xdata_04':'Time period', | |
55 'xdata_05':'Sorting number', | |
56 'xdata_06':'Keywords', | |
57 'xdata_07':'Short title', | |
58 'xdata_08':'Other involved scholars' , | |
59 'xdata_09':'Disciplines', | |
60 'xdata_10':'Themes', | |
61 'xdata_11':'Object Digitallibrary', | |
62 'xdata_12':'Cooperation partners', | |
63 'xdata_13':'Funding institutions', | |
64 'WEB_project_header':'WEB_project_header', | |
65 'WEB_project_description':'WEB_project_description', | |
66 'WEB_related_pub':'WEB_related_pub'} | |
67 | |
68 # (is this used?) | |
69 folders=['MPIWGProject','Folder','ECHO_Navigation'] | |
70 # language of this instance | |
71 lang = 'en' | |
72 # types of objects that show up in navigation | |
73 nav_meta_types = ['MPIWGTemplate','MPIWGLink','MPIWGFolder'] | |
74 | |
75 manage_options = Folder.manage_options+( | |
76 {'label':'Update personal homepages','action':'updatePersonalwww_html'}, | |
77 {'label':'Reindex catalogs','action':'reindexCatalogs'}, | |
78 {'label':'Main config','action':'changeMPIWGRootForm'}, | |
79 {'label':'add e-mails','action':'showNewDBEntries'}, | |
80 #{'label':'update the institutsbibliography','action':'updateInstitutsbiliography'}, | |
81 #{'label':'Edit Historical Persons','action':'editHistoricalPersonsForm'}, | |
82 #{'label':'Store Historical Persons','action':'storeHistoricalPersons'}, | |
83 ) | |
84 | |
85 | |
86 def getHarvestCachePort(self): | |
87 return getattr(self,"harvestPort",29999) | |
88 | |
89 def getHarvestCacheServer(self): | |
90 return getattr(self,"harvestServer","localhost") | |
91 | |
92 | |
93 def getHarvestCache(self): | |
94 logging.debug("CACHE:"+repr(self._v_harvestCache)) | |
95 if self._v_harvestCache==None: | |
96 #storage = FileStorage.FileStorage('/tmp/'+self.getId()+'test-filestorage.fs') | |
97 addr = self.getHarvestCacheServer(), self.getHarvestCachePort() | |
98 storage = ClientStorage.ClientStorage(addr) | |
99 db = DB(storage) | |
100 self._v_harvestDV=db | |
101 self._v_harvestDV=db | |
102 conn = db.open() | |
103 dbroot = conn.root() | |
104 if not dbroot.has_key('templates'): | |
105 from BTrees.OOBTree import OOBTree | |
106 dbroot['templates'] = OOBTree() | |
107 | |
108 self._v_harvestCache = dbroot['templates'] | |
109 logging.debug("CACHE2:"+repr(self._v_harvestCache)) | |
110 return self._v_harvestCache | |
111 | |
112 | |
113 | |
114 def __del__(self): | |
115 if self._v_harvestCache!=None: | |
116 self._v_harvestDV.close(); | |
117 | |
118 def getGetNeighbourhood(self,obj, wordStr, length=100,tagging=True): | |
119 """finde umgebung um die worte in wordStr, zurueckgegeben wird eine Array mit den Umgebungen von Fundstellen der Worte | |
120 alle Tags werden entfernt, die Fundstellen werden mit <span class="found">XX</span> getaggt, die Umgebungen werden | |
121 case insensitive gesucht | |
122 @param wordStr: string mit Worten getrennt durch Leerzeichen, Phrasen sind mit " gekennzeichnet | |
123 "eine phrase", "*" bezeichnet wildcards und wird ignoriert" | |
124 @param length: optional, default wert 100, 2*length ist die groesse der Umgebung | |
125 @param tagging: optional default wert true, kein span tag wird erzweugt falls tag=false | |
126 """ | |
127 | |
128 ret=[] # nimmt das Array auf, dass spaeter zurueckgegeben wird | |
129 ranges=[] #Array mit tupeln x,y wobei x die Position des Anfang und y des Endes der i-ten Umgebung angiebt | |
130 | |
131 wordStr=wordStr.lstrip().rstrip() | |
132 | |
133 def isInRanges(nr,length): | |
134 """test ob eine gegeben Position nr schon irgendwo in einer Umgebung ist, gibt den Index des ersten Wertes aus ranges zurueck, | |
135 -1, wenn kein Treffer | |
136 | |
137 @param nr: Position die geprueft werden soll | |
138 @param length: Laenge des Wortes das geprueft werden soll | |
139 """ | |
140 for x in ranges: | |
141 if (x[0]<=nr) and (nr < (x[1]-length)): | |
142 return ranges.index(x) | |
143 return -1 | |
144 | |
145 # deal with phrases, in Phrasen werden die Leerzeichen durch "_" ersetzt. | |
146 def rep_empty(str): | |
147 x= re.sub(" ","_",str.group(0)) | |
148 return re.sub("\"","",x) | |
149 | |
150 wordStr=re.sub("\".*?\"", rep_empty,wordStr)#ersetze leerzeichen in " " durch "_" und loesche " | |
151 | |
152 #deal with wildcards, for our purposes it is enough to delete the wildcard | |
153 wordStr=wordStr.replace("*","") | |
154 | |
155 words=wordStr.split(" ") | |
156 #if not words is ListType: | |
157 # words=[words] | |
158 | |
159 | |
160 txtCache = self.en.getHarvestCache(); | |
161 txt= txtCache.get(obj.absolute_url(),None) | |
162 | |
163 if txt==None: | |
164 | |
165 logging.debug("NO CACHE for: "+obj.absolute_url()) | |
166 txt=obj.harvest_page(mode="slim") | |
167 | |
168 | |
169 if not txt: | |
170 return ret | |
171 | |
172 soup = BeautifulSoup(txt) | |
173 | |
174 comments = soup.findAll(text=lambda text:isinstance(text, Comment)) | |
175 [comment.extract() for comment in comments] | |
176 | |
177 txt = ''.join(soup.findAll(text=True)) | |
178 | |
179 | |
180 #txt=re.sub("<.*?>", "", txt) # loesche alle Tags | |
181 for word in words: | |
182 word=re.sub("_"," ",word) # ersetze zurueck "_" durch " " | |
183 pos=0 | |
184 | |
185 n=txt.lower().count(word.lower()) # wie oft tritt das Wort auf | |
186 | |
187 for i in range(n): | |
188 pos=txt.lower().find(word.lower(),pos) | |
189 | |
190 if pos > 0: | |
191 x=max(0,pos-length) | |
192 y=min(len(txt),pos+length) | |
193 | |
194 | |
195 #is word already in one of the results | |
196 nr=isInRanges(pos,len(word)) | |
197 if nr >=0:# word ist in einer schon gefunden Umgebung, dann vergroessere diese | |
198 x=min(ranges[nr][0],x) | |
199 y=max(ranges[nr][1],y) | |
200 | |
201 str=txt[x:y] | |
202 if x!=0: #add dots if in the middle of text | |
203 str="..."+str | |
204 | |
205 if y!=len(txt): #add dots if in the middle of text | |
206 str=str+"..." | |
207 | |
208 | |
209 | |
210 if nr >=0: # word ist in einer schon gefunden Umgebung | |
211 ranges[nr]=(x,y) # neue Position der Umgebung | |
212 | |
213 ret[nr]=str # neue Umgebung | |
214 else: # andernfalls neue Umgebung hinzufuegen | |
215 ranges.append((x,y)) | |
216 | |
217 ret.append(str) | |
218 | |
219 pos=pos+len(word) | |
220 else: | |
221 break; | |
222 | |
223 # now highlight everything | |
224 if tagging: | |
225 for x in range(len(ret)): | |
226 for word in words: | |
227 repl=re.compile(word,re.IGNORECASE) | |
228 ret[x]=repl.sub(""" <span class="found">%s</span>"""%word.upper(),ret[x]) | |
229 | |
230 return ret | |
231 def copyAllImagesToMargin(self): | |
232 """tranformiere alle Bilder in die Margins""" | |
233 projects=self.getTree() | |
234 ret="" | |
235 for project in projects: | |
236 proj=project[3] | |
237 try: | |
238 persons=proj.copyImageToMargin(); | |
239 except: | |
240 logging.error("Cannnot do: %s"%repr(project)) | |
241 | |
242 def transformProjectsToId(self): | |
243 """trnasformiere zu ID, Hilfsfunktion die die alten Templates analysiert und mit der neuen Liste | |
244 verantwortlicher Personen versieht""" | |
245 projects=self.getTree() | |
246 ret="" | |
247 for project in projects: | |
248 | |
249 proj=project[3] | |
250 persons=proj.identifyNames(proj.getContent('xdata_01')) | |
251 if not hasattr(proj,'responsibleScientistsList'): | |
252 proj.responsibleScientistsList=[] | |
253 | |
254 for person in persons.items(): | |
255 | |
256 if len(person[1]) >1: #nicht eindeutig | |
257 ret+="nicht eindeutig --- %s: %s\n"%(proj.getId(),person[0]) | |
258 | |
259 elif len(person[1]) ==0: #kein eintrage | |
260 ret+="kein eintrag--- %s: %s\n"%(proj.getId(),person[0]) | |
261 proj.responsibleScientistsList.append((person[0],"")) | |
262 else: | |
263 proj.responsibleScientistsList.append((person[0],person[1][0].getObject().getKey())) | |
264 | |
265 return ret | |
266 | |
267 | |
268 def harvestProjects(self): | |
269 """harvest""" | |
270 folder="/tmp" | |
271 try: | |
272 os.mkdir("/tmp/harvest_MPIWG") | |
273 except: | |
274 pass | |
275 founds=self.ZopeFind(self.aq_parent.projects,obj_metatypes=['MPIWGProject'],search_sub=1) | |
276 for found in founds: | |
277 txt=found[1].harvest_page() | |
278 | |
279 if txt and (txt != ""): | |
280 name=found[0].replace("/","_") | |
281 fh=file("/tmp/harvest_MPIWG/"+name,"w") | |
282 fh.write(txt) | |
283 fh.close() | |
284 | |
285 def decode(self,str): | |
286 """decoder""" | |
287 | |
288 if not str: | |
289 return "" | |
290 if type(str) is StringType: | |
291 try: | |
292 return str.decode('utf-8') | |
293 except: | |
294 return str.decode('latin-1') | |
295 else: | |
296 return str | |
297 | |
298 | |
299 def getat(self,array,idx=0,default=None): | |
300 """return array element idx or default (but no exception)""" | |
301 if len(array) <= idx: | |
302 return default | |
303 else: | |
304 return array[idx] | |
305 | |
306 def getLang(self): | |
307 """returns the default language""" | |
308 return self.lang | |
309 | |
310 def browserCheck(self): | |
311 """check the browsers request to find out the browser type""" | |
312 bt = {} | |
313 ua = self.REQUEST.get_header("HTTP_USER_AGENT") | |
314 bt['ua'] = ua | |
315 bt['isIE'] = False | |
316 bt['isN4'] = False | |
317 if string.find(ua, 'MSIE') > -1: | |
318 bt['isIE'] = True | |
319 else: | |
320 bt['isN4'] = (string.find(ua, 'Mozilla/4.') > -1) | |
321 | |
322 try: | |
323 nav = ua[string.find(ua, '('):] | |
324 ie = string.split(nav, "; ")[1] | |
325 if string.find(ie, "MSIE") > -1: | |
326 bt['versIE'] = string.split(ie, " ")[1] | |
327 except: pass | |
328 | |
329 bt['isMac'] = string.find(ua, 'Macintosh') > -1 | |
330 bt['isWin'] = string.find(ua, 'Windows') > -1 | |
331 bt['isIEWin'] = bt['isIE'] and bt['isWin'] | |
332 bt['isIEMac'] = bt['isIE'] and bt['isMac'] | |
333 bt['staticHTML'] = False | |
334 | |
335 return bt | |
336 | |
337 | |
338 def versionHeaderEN(self): | |
339 """version header text""" | |
340 | |
341 date= self.REQUEST.get('date',None) | |
342 if date: | |
343 txt="""<h2>This pages shows the project which existed at %s</h2>"""%str(date) | |
344 return txt | |
345 return "" | |
346 | |
347 def versionHeaderDE(self): | |
348 """version header text""" | |
349 date= self.REQUEST.get('date',None) | |
350 if date: | |
351 txt="""<h2>Auf dieser Seite finden Sie die Projekte mit Stand vom %s</h2>"""%str(date) | |
352 return "" | |
353 | |
354 | |
355 def createOrUpdateId_raw(self): | |
356 """create sequence to create ids for bibliography""" | |
357 debug=None | |
358 #suche groesste existierende id | |
359 founds=self.ZSQLQuery("select id from bibliography") | |
360 | |
361 if founds: | |
362 ids=[int(x.id[1:]) for x in founds] | |
363 maximum=max(ids) | |
364 | |
365 id_raw=self.ZSQLQuery("select nextval('id_raw')",debug=debug) | |
366 | |
367 if id_raw: | |
368 self.ZSQLQuery("drop sequence id_raw",debug=debug) | |
369 | |
370 self.ZSQLQuery("create sequence id_raw start %i"%(maximum+1),debug=debug) | |
371 | |
372 | |
373 def queryLink(self,link): | |
374 """append querystring to the link""" | |
375 return "%s?%s"%(link,self.REQUEST.get('QUERY_STRING','')) | |
376 | |
377 def getKategory(self,url): | |
378 """kategorie""" | |
379 splitted=url.split("/") | |
380 return splitted[4] | |
381 | |
382 def generateUrlProject(self,url,project=None): | |
383 """erzeuge aus absoluter url, relative des Projektes""" | |
384 if project: | |
385 splitted=url.split("/") | |
386 length=len(splitted) | |
387 short=splitted[length-2:length] | |
388 | |
389 base=self.REQUEST['URL3']+"/"+"/".join(short) | |
390 | |
391 else: | |
392 findPart=url.find("/projects/") | |
393 base=self.REQUEST['URL1']+"/"+url[findPart:] | |
394 | |
395 | |
396 return base | |
397 | |
398 def isNewCapital(self,text=None,reset=None): | |
399 | |
400 if text: | |
401 text=text.upper() | |
402 if reset: | |
403 self.REQUEST['capital']="A" | |
404 return True | |
405 else: | |
406 if len(text)>0 and not (text[0]==self.REQUEST['capital']): | |
407 self.REQUEST['capital']=text[0] | |
408 return True | |
409 else: | |
410 return False | |
411 | |
412 def subNavStatic(self,obj): | |
413 """subnav" von self""" | |
414 subs=self.ZopeFind(obj,obj_metatypes=['MPIWGTemplate','MPIWGLink']) | |
415 subret=[] | |
416 | |
417 for x in subs: | |
418 if not(x[1].title==""): | |
419 subret.append(x) | |
420 subret.sort(sortWeight) | |
421 return subret | |
422 | |
423 def subNav(self,obj): | |
424 """return sub-navigation elements i.e. below sections""" | |
425 # get section -> parent should be MPIWGRoot | |
426 p = obj | |
427 sec = None | |
428 # descend parents to the root (and remember the last id) | |
429 while p is not None and p.meta_type != 'MPIWGRoot': | |
430 sec = p | |
431 p = p.aq_parent | |
432 | |
433 subsecs = sec.objectItems(self.nav_meta_types) | |
434 subsecs = [s for s in subsecs if s[1].title != ""] | |
435 subsecs.sort(sortWeight) | |
436 return subsecs | |
437 | |
438 def isType(self,object,meta_type): | |
439 """teste ob ein object vom meta_type ist.""" | |
440 return (object.meta_type==meta_type) | |
441 | |
442 def isActive(self,name): | |
443 """teste ob subnavigation aktiv""" | |
444 for part in self.REQUEST['URL'].split("/"): | |
445 if part==name: | |
446 return True | |
447 return False | |
448 | |
449 | |
450 def getSections(self): | |
451 """returns a list of all sections i.e. top-level MPIWGFolders""" | |
452 secs = self.objectItems(['MPIWGFolder']) | |
453 secs.sort(sortWeight) | |
454 #logging.debug("root: %s secs: %s"%(repr(self.absolute_url()), repr(secs))) | |
455 # return pure list of objects | |
456 return [s[1] for s in secs] | |
457 | |
458 def getSectionStyle(self, name, style=""): | |
459 """returns a string with the given style + '-sel' if the current section == name""" | |
460 if self.getSection() == name: | |
461 return style + '-sel' | |
462 else: | |
463 return style | |
464 | |
465 def getFeatures(self, num=None): | |
466 """returns a list of the last num Features""" | |
467 dir = getattr(self, 'features') | |
468 features = dir.objectItems(['MPIWGFeature']) | |
469 features.sort(sortWeight) | |
470 if num is not None: | |
471 # take only the last num elements | |
472 features = features[-num:] | |
473 # return pure list of objects | |
474 return [f[1] for f in features] | |
475 | |
476 | |
477 def getMPIWGRoot(self): | |
478 """returns the MPIWG root""" | |
479 return self | |
480 | |
481 def MPIWGrootURL(self): | |
482 """returns the URL to the root""" | |
483 return self.absolute_url() | |
484 | |
485 def upDateSQL(self,fileName): | |
486 """updates SQL databases using fm.jar""" | |
487 fmJarPath=os.path.join(package_home(globals()), 'updateSQL/fm.jar') | |
488 xmlPath=os.path.join(package_home(globals()), "updateSQL/%s"%fileName) | |
489 logger("MPIWG Web",logging.INFO,"java -classpath %s -Djava.awt.headless=true Convert %s"%(fmJarPath,xmlPath)) | |
490 ret=os.popen("java -classpath %s -Djava.awt.headless=true Convert %s"%(fmJarPath,xmlPath),"r").read() | |
491 logger("MPIWG Web",logging.INFO,"result convert: %s"%ret) | |
492 return 1 | |
493 | |
494 def patchProjects(self,RESPONSE): | |
495 """patch""" | |
496 projects=self.ZopeFind(self.projects,obj_metatypes=['MPIWGProject']) | |
497 for project in projects: | |
498 tmp=project[1].WEB_project_description[0].replace("/CD/projects/","")[0:] | |
499 setattr(project[1],'WEB_project_description',[tmp[0:]]) | |
500 RESPONSE.write("<p>%s</p>\n"%project[0]) | |
501 | |
502 def replaceNotEmpty(self,format,field): | |
503 """replace not empty""" | |
504 if field and (not field.lstrip()==''): | |
505 return self.decode(format%field) | |
506 else: | |
507 return "" | |
508 | |
509 | |
510 def isActiveMember(self,key): | |
511 """tested ob Mitarbeiter key ist aktiv""" | |
512 key=utf8ify(key) | |
513 ret=self.getat(self.ZSQLInlineSearch(_table='personal_www', | |
514 _op_key='eq',key=key, | |
515 _op_publish_the_data='eq', | |
516 publish_the_data='yes')) | |
517 | |
518 logging.info("ACTIVE_MEMBER %s"%ret) | |
519 if ret: | |
520 return True | |
521 else: | |
522 return False | |
523 | |
524 def isActual(self,project): | |
525 """checke if project is actual""" | |
526 actualTime=time.localtime() | |
527 | |
528 if hasattr(project,'getObject'): #obj ist aus einer catalogTrefferList | |
529 obj=project.getObject() | |
530 else: | |
531 obj=project | |
532 | |
533 if getattr(obj,'archiveTime',actualTime)< actualTime: | |
534 return False | |
535 else: | |
536 return True | |
537 | |
538 def redirectIndex_html(self,request): | |
539 #return request['URL1']+'/index_html' | |
540 | |
541 return urllib.urlopen(request['URL1']+'/index_html').read() | |
542 | |
543 | |
544 def formatBibliography(self,here,found): | |
545 """format""" | |
546 return formatBibliography(here,found) | |
547 | |
548 def getValue(self,fieldStr): | |
549 """Inhalt des Feldes""" | |
550 | |
551 if type(fieldStr)==StringType: | |
552 field=fieldStr | |
553 else: | |
554 field=fieldStr[0] | |
555 try: | |
556 if field[len(field)-1]==";": | |
557 field=field[0:len(field)-1] | |
558 except: | |
559 | |
560 """nothing""" | |
561 field=re.sub(r';([^\s])','; \g<1>',field) | |
562 return field.encode('utf-8') | |
563 | |
564 | |
565 | |
566 def sortedNames(self,list): | |
567 """sort names""" | |
568 | |
569 def sortLastName(x_c,y_c): | |
570 try: | |
571 x=urllib.unquote(x_c).encode('utf-8','ignore') | |
572 except: | |
573 x=urllib.unquote(x_c) | |
574 | |
575 try: | |
576 y=urllib.unquote(y_c).encode('utf-8','ignore') | |
577 except: | |
578 x=urllib.unquote(y_c) | |
579 | |
580 | |
581 | |
582 try: | |
583 last_x=x.split()[len(x.split())-1] | |
584 last_y=y.split()[len(y.split())-1] | |
585 | |
586 except: | |
587 | |
588 last_x="" | |
589 last_y="" | |
590 | |
591 | |
592 | |
593 if last_x<last_y: | |
594 return 1 | |
595 elif last_x>last_y: | |
596 return -1 | |
597 else: | |
598 return 0 | |
599 | |
600 list.sort(sortLastName) | |
601 list.reverse() | |
602 | |
603 return list | |
604 | |
605 def __init__(self, id, title): | |
606 """init""" | |
607 self.id=id | |
608 self.title=title | |
609 | |
610 def removeStopWords(self,xo): | |
611 """remove stop words from xo""" | |
612 if not hasattr(self,'_v_stopWords'): | |
613 self._v_stopWords=self.stopwords_en.data.split("\n") | |
614 | |
615 x=str(xo) | |
616 | |
617 strx=x.split(" ") | |
618 | |
619 for tmp in strx: | |
620 | |
621 if tmp.lower() in self._v_stopWords: | |
622 del strx[strx.index(tmp)] | |
623 | |
624 return " ".join(strx) | |
625 | |
626 def urlQuote(self,str): | |
627 """quote""" | |
628 return urllib.quote(str) | |
629 | |
630 def urlUnQuote(self,str): | |
631 """quote""" | |
632 return urllib.unquote(str) | |
633 | |
634 | |
635 | |
636 def getProjectsByFieldContent(self,fieldName,fieldContentsEntry, date=None): | |
637 """gib alle Projekte aus mit Value von field mit fieldName enthaelt ein Element der Liste fieldContents""" | |
638 def sort(x,y): | |
639 return cmp(x.WEB_title[0],y.WEB_title[0]) | |
640 | |
641 if type(fieldContentsEntry) is StringType: | |
642 fieldContentsTmp=[fieldContentsEntry] | |
643 else: | |
644 fieldContentsTmp=fieldContentsEntry | |
645 | |
646 fieldContents=[] | |
647 for x in fieldContentsTmp: | |
648 fieldContents.append(" AND ".join(x.split())) | |
649 projects=self.ProjectCatalog({fieldName:string.join(fieldContents,' AND')}) | |
650 #print projects | |
651 #ret=[x for x in projects] | |
652 ret=[] | |
653 for x in projects: | |
654 obj=x.getObject() | |
655 obj=obj.getActualVersion(date) | |
656 if obj and (not getattr(obj,'invisible',None)): | |
657 #if not (x in ret): | |
658 ret.append(x) | |
659 | |
660 ret.sort(sort) | |
661 return ret | |
662 | |
663 def changeMPIWGRootForm(self): | |
664 """edit""" | |
665 pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','changeMPIWGRootForm')).__of__(self) | |
666 return pt() | |
667 | |
668 def changeMPIWGRoot(self,title,connection_id,coneServiceURL,harvestPort,harvestServer,lang=None,autocommit=None,RESPONSE=None): | |
669 """change""" | |
670 self.title=title | |
671 self.connection_id=connection_id | |
672 #self.disciplineList=disciplineList | |
673 #self.themesList=themesList | |
674 self.coneServiceURL=coneServiceURL | |
675 self.harvestServer=harvestServer | |
676 try: | |
677 self.harvestPort=int(harvestPort) | |
678 except: | |
679 logging.error("couldn't change port!: no number:"+harvestPort) | |
680 | |
681 if lang is not None: | |
682 self.lang = lang | |
683 | |
684 self.autocommit = (autocommit == "on") | |
685 if RESPONSE is not None: | |
686 RESPONSE.redirect('manage_main') | |
687 | |
688 | |
689 def getContexts(self,childs=None,parents=None,depth=None,date=None,onlyActive=True): | |
690 """childs alle childs, alle parents""" | |
691 ret=[] | |
692 | |
693 if parents: | |
694 pnums=parents.split(".") | |
695 while len(pnums) > 1: | |
696 pnums.pop() | |
697 parentId=string.join(pnums,".") | |
698 | |
699 for project in self.getProjectFields('xdata_05',sort='int',date=date): | |
700 if project[1]==parentId: | |
701 ret.append(project) | |
702 | |
703 if (depth is not None) and (len(ret) >= depth): | |
704 break | |
705 | |
706 if childs: | |
707 for project in self.getProjectFields('xdata_05',sort='int',date=date): | |
708 searchStr=childs+"(\..*)" | |
709 | |
710 if (onlyActive and project[0].isActiveProject()) or (not onlyActive): | |
711 if re.match(searchStr,project[1]): | |
712 | |
713 if depth: | |
714 | |
715 if int(depth)>=len(project[1].split("."))-len(childs.split(".")): | |
716 | |
717 ret.append(project) | |
718 else: | |
719 ret.append(project) | |
720 | |
721 #logging.debug("getContexts: childs=%s parents=%s depth=%s => %s"%(childs,parents,depth,repr(ret))) | |
722 | |
723 return ret | |
724 | |
725 | |
726 def getAllProjectsAndTagsAsCSV(self,archived=1,RESPONSE=None): | |
727 """alle projekte auch die nicht getaggten""" | |
728 retList=[] | |
729 headers=['projectId','sortingNumber','projectName','scholars','startedAt','completedAt','lastChangeThesaurusAt','lastChangeProjectAt','projectCreatedAt','persons','places','objects'] | |
730 headers.extend(list(self.thesaurus.tags.keys())) | |
731 retList.append("\t".join(headers)) | |
732 if not hasattr(self,'thesaurus'): | |
733 return "NON thesaurus (there have to be a MPIWGthesaurus object, with object ID thesaurus)" | |
734 | |
735 projectTags = self.thesaurus.getProjectsAndTags() | |
736 for project in self.getProjectFields('WEB_title_or_short'): | |
737 proj = project[0] | |
738 p_name = project[1] | |
739 retProj=[] | |
740 #if (not proj.isArchivedProject() and archived==1) or (proj.isArchivedProject() and archived==2): | |
741 retProj.append(self.utf8ify(proj.getId())) | |
742 retProj.append(self.utf8ify(proj.getContent('xdata_05'))) | |
743 retProj.append(self.utf8ify(p_name)) | |
744 retProj.append(self.utf8ify(proj.getContent('xdata_01'))) | |
745 retProj.append(self.utf8ify(proj.getStartedAt())) | |
746 retProj.append(self.utf8ify(proj.getCompletedAt())) | |
747 changeDate=self.thesaurus.lastChangeInThesaurus.get(proj.getId(),'') | |
748 n = re.sub("[:\- ]","",str(changeDate)) | |
749 retProj.append(n) | |
750 retProj.append(self.utf8ify(getattr(proj,'creationTime','20050101000000'))) | |
751 retProj.append("")#TODO: project created at | |
752 retProj.append(";".join([person[1] for person in self.thesaurus.getPersonsFromProject(proj.getId())])) | |
753 retProj.append(";".join([person[1] for person in self.thesaurus.getHistoricalPlacesFromProject(proj.getId())])) | |
754 retProj.append(";".join([person[1] for person in self.thesaurus.getObjectsFromProject(proj.getId())])) | |
755 retProj+=self.thesaurus.getTags(proj.getId(),projectTags) | |
756 retList.append("\t".join(retProj)) | |
757 | |
758 if RESPONSE: | |
759 | |
760 RESPONSE.setHeader('Content-Disposition','attachment; filename="ProjectsAndTags.tsv"') | |
761 RESPONSE.setHeader('Content-Type', "application/octet-stream") | |
762 | |
763 return "\n".join(retList); | |
764 | |
765 | |
766 | |
767 | |
768 def getProjectFields(self,fieldName,date=None,folder=None,sort=None): | |
769 """getListofFieldNames""" | |
770 ret=[] | |
771 | |
772 objects=self.ZopeFind(self.projects,obj_metatypes=['MPIWGProject'],search_sub=0) | |
773 | |
774 | |
775 for object in objects: | |
776 obj=object[1] | |
777 obj=obj.getActualVersion(date) | |
778 if obj and (not getattr(obj,'invisible',None)): | |
779 if fieldName=="WEB_title_or_short": | |
780 | |
781 if len(obj.getContent('xdata_07'))<3: # hack weil z.Z. manchmal noch ein Trennzeichen ; oder , im Feld statt leer | |
782 fieldNameTmp="WEB_title" | |
783 else: | |
784 fieldNameTmp="xdata_07" | |
785 else: | |
786 fieldNameTmp=fieldName | |
787 | |
788 ret.append((obj,obj.getContent(fieldNameTmp))) | |
789 | |
790 | |
791 if sort=="int": | |
792 ret.sort(sortI) | |
793 elif sort=="stopWords": | |
794 | |
795 ret.sort(sortStopWords(self)) | |
796 | |
797 else: | |
798 ret.sort(sortF) | |
799 | |
800 return ret | |
801 | |
802 def showNewProjects(self): | |
803 projects=[] | |
804 for objs in self.getProjectFields('WEB_title_or_short'): # Get all Projets | |
805 if objs[0].xdata_05 and (objs[0].xdata_05[0] == ""): | |
806 | |
807 projects.append(objs) | |
808 | |
809 return projects | |
810 | |
811 | |
812 def updatePublicationDB(self,personId=None): | |
813 """updates the publication db, i.e. copy year and type into the main table""" | |
814 | |
815 if personId: | |
816 founds = self.ZSQLInlineSearch(_table="publications",key_main=personId) | |
817 else: | |
818 founds = self.ZSQLInlineSearch(_table="publications") | |
819 | |
820 for found in founds: | |
821 | |
822 if found.id_institutsbibliographie and (not found.id_institutsbibliographie =="") and (not found.id_institutsbibliographie =="0"): | |
823 | |
824 entries = self.ZSQLInlineSearch(_table="institutsbiblio",id=found.id_institutsbibliographie) | |
825 for entry in entries: | |
826 self.ZSQLChange(_table='publications',_identify='oid=%s' % found.oid,year=entry.year,referencetype=entry.reference_type) | |
827 | |
828 if found.id_gen_bib and (not found.id_gen_bib ==""): | |
829 entries = self.ZSQLInlineSearch(_table="bibliography",id=found.id_gen_bib) | |
830 for entry in entries: | |
831 self.ZSQLChange(_table='publications',_identify='oid=%s' % found.oid,year=entry.year,referencetype=entry.reference_type) | |
832 | |
833 return True | |
834 | |
835 def showNewDBEntries(self): | |
836 """zeige neue Eintraege in der Datenbank ohne e-mail adressen bzw. fuer die noch kein Object angelegt wurde""" | |
837 | |
838 qstr="select * from personal_www where web_object_created='no' and not key=''" | |
839 res=self.ZSQLQuery(qstr) | |
840 | |
841 pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','showNewDBEntries.zpt')).__of__(self) | |
842 return pt(newEntries=res) | |
843 | |
844 def createNewStaffObjects(self,RESPONSE): | |
845 """create new staff object""" | |
846 | |
847 memberFolder=getattr(self,'members') | |
848 args=self.REQUEST.form | |
849 arg_k=args.keys() | |
850 arg_k.remove("submit") | |
851 ret="" | |
852 for key in arg_k: | |
853 k=self.urlUnQuote(key) | |
854 | |
855 qstr="select * from personal_www where key=%s"%self.ZSQLQuote(k) | |
856 res=self.ZSQLQuery(qstr)[0] | |
857 if args[key]!="": #email-adresse wurde eingetragen | |
858 #create the object | |
859 e_mail=args[key] | |
860 try: | |
861 newObj=MPIWGStaff.MPIWGStaff(e_mail,res.last_name,res.first_name,k) | |
862 memberFolder._setObject(e_mail,newObj) | |
863 obj=getattr(memberFolder,e_mail) | |
864 obj.reindex_object() | |
865 ret+="Created %s \n"%e_mail | |
866 created=True | |
867 except: | |
868 msg="Cannot create new user %s (%s %s)"%(e_mail,sys.exc_info()[0],sys.exc_info()[1]) | |
869 logging.error(msg) | |
870 ret+=msg+"\n" | |
871 created=False | |
872 | |
873 if created: | |
874 qstr="update personal_www set web_object_created='yes',e_mail='%s@mpiwg-berlin.mpg.de' where key=%s"%(e_mail,self.ZSQLQuote(k)) | |
875 self.ZSQLQuery(qstr) | |
876 | |
877 return ret | |
878 | |
879 | |
880 def generateNewPersonEntry(self,data): | |
881 """generate a new person entry for data, neue personen werden zunaechst nur in der datenbank angelegt """ | |
882 | |
883 #memberFolder=getattr(self,'members') | |
884 #create the object | |
885 | |
886 # try: | |
887 # newObj=MPIWGStaff.MPIWGStaff(urllib.quote(data['key']),data['last_name'].encode('utf-8'),data['first_name'].encode('utf-8')) | |
888 # memberFolder._setObject(urllib.quote(data['key']),newObj) | |
889 # except: | |
890 # return False, "Cannot create new user %s (%s %s)"%(data['key'],sys.exc_info()[0],sys.exc_info()[1]) | |
891 # | |
892 | |
893 #create the new entry in the database | |
894 | |
895 | |
896 result,msg=MPIWGStaff.createNewDBEntry(self,data['publish_the_data'],data['key'],data['last_name'], | |
897 data['first_name'],data['titles_new'],data['status'],"", | |
898 "",data['date_from'],data['date_to'], | |
899 data['department'],'',data['funded_by'], | |
900 data['e_mail2'],data['current_work'],"yes",data['date_stay_at_mpiwg'],data['group'],"no",data['current_work']) | |
901 | |
902 return result,msg | |
903 | |
904 def updatePersonEntry(self,data,ignoreEntries=None): | |
905 """update an person entry from data. but ignore all fields in ignore Entries""" | |
906 if ignoreEntries is None: | |
907 ignoreEntries = [] | |
908 | |
909 #ignoreEntries.append('current_work') # TODO:updatecurrent work | |
910 logging.debug("updatePersonEntry: data=%s ignoreEntries=%s"%(repr(data),repr(ignoreEntries))) | |
911 | |
912 if data['date_to']=="": # wenn date_to leer | |
913 data['date_to']="date_none" | |
914 | |
915 if data['date_from']=="": # wenn date_fromleer | |
916 data['date_from']="date_none" | |
917 msg="" | |
918 | |
919 | |
920 #eintragen | |
921 | |
922 columns=data.keys() | |
923 for x in ignoreEntries: | |
924 logging.debug("updatePersonEntry: ignoring %s"%x) | |
925 try: #falls in ignore entries felder sind, die nicht in columns sind, fange den fehler ab | |
926 columns.remove(x) | |
927 except: | |
928 pass | |
929 | |
930 | |
931 insert=[] | |
932 for key in columns: | |
933 if data[key]=="date_none": # date_none eintrag wird zu null uebersetzt | |
934 insert.append('%s=null'%key) | |
935 else: | |
936 insert.append(""" "%s"=%s"""%(key,self.ZSQLQuote(data[key]))) | |
937 | |
938 insertStr=",".join(insert) | |
939 queryStr="update personal_www SET %s where key='%s'"%(insertStr,data['key']) | |
940 self.ZSQLQuery("SET DATESTYLE TO 'German'") | |
941 self.ZSQLQuery(queryStr) | |
942 | |
943 #currentwork | |
944 #if not (txt==""): | |
945 # queryStr="INSERT INTO current_work (id_main,current,publish) VALUES ('%s','%s','%s')"%(id,txt,txt_p) | |
946 # | |
947 # self.ZSQLQuery(queryStr) | |
948 | |
949 return True,msg | |
950 | |
951 | |
952 def updatePersonalwww_doIt(self): | |
953 """do the update""" | |
954 args=self.REQUEST.form | |
955 resultSet=self.REQUEST.SESSION['personal_www']['resultSet'] | |
956 news=self.REQUEST.SESSION['personal_www']['news'] | |
957 conflicts=self.REQUEST.SESSION['personal_www']['conflicts'] | |
958 logging.debug("updatePersonalwww_doIt: args=%s\n resultSet=%s\n news=%s\n conflicts=%s"%(args,resultSet,news,conflicts)) | |
959 | |
960 ret="<html><body>" | |
961 # generate the new entry | |
962 | |
963 if news and (len(news)>0): | |
964 ret+="<h2>Hinzugefügt</h2>" | |
965 ret+="<p>Neueinträge erscheinen erst auf der Homepage, wenn ihnen eine e-mail Adresse zugeordnet wurde.</p>" | |
966 ret+="<ul>" | |
967 | |
968 for new in news: | |
969 if args.has_key(self.urlQuote(new.encode('utf-8'))): # entry was selected | |
970 result,msg=self.generateNewPersonEntry(resultSet[new]) | |
971 if not result: | |
972 logging.error("Error (generateNewPersonEntry) %s"%msg) | |
973 ret+="<li>ERROR: %s %s"%(new.encode('utf-8'),msg) | |
974 else: | |
975 ret+="<li>OK: %s"%(new.encode('utf-8')) | |
976 | |
977 if news and (len(news)>0): | |
978 ret+="<p>Neueinträge erscheinen erst auf der Homepage, wenn ihnen eine e-mail Adresse zugeordnet wurde.</p>" | |
979 ret+="</ul>" | |
980 | |
981 # update | |
982 | |
983 if len(conflicts.keys())>0: | |
984 ret+="<h2>Änderung des Benutzers übernehmen</h2>" | |
985 ret+="<p>Wenn nötig in Filemaker-db ändern:</p>" | |
986 | |
987 # konflicte | |
988 for conflict in conflicts.keys(): | |
989 ignoreEntries=[] | |
990 displayIgnored=[] | |
991 for cf in conflicts[conflict]: | |
992 if args[conflict.encode('utf-8')+'_'+cf[0]]=="stored": #use the stored one | |
993 ignoreEntries.append(cf[0]) #so ignore field cf[0] | |
994 displayIgnored.append(cf) | |
995 | |
996 if len(displayIgnored)>0: | |
997 ret+="<h3>%s</h3>"%conflict.encode('utf-8') | |
998 ret+="<table border='1'>" | |
999 for iE in displayIgnored: | |
1000 ret+="<tr><td>%s</td><td>%s</td><td>%s</td>"%(iE[0].encode('utf-8'),iE[1].encode('utf-8'),iE[2].encode('utf-8')) | |
1001 ret+="</table>" | |
1002 | |
1003 self.updatePersonEntry(resultSet[conflict],ignoreEntries=ignoreEntries) | |
1004 | |
1005 # rest | |
1006 cl=list(conflicts.keys()) | |
1007 | |
1008 for key in resultSet.keys(): | |
1009 if key not in cl: | |
1010 self.updatePersonEntry(resultSet[key]) | |
1011 return ret+"</body></html>" | |
1012 | |
1013 | |
1014 def updateInstitutsbiliography(self): | |
1015 """update the Institutsbibliogrpahy""" | |
1016 self.upDateSQL('personalwww.xml') | |
1017 return "<html><body>DONE</body></html>" | |
1018 | |
1019 | |
1020 | |
1021 | |
1022 def updatePersonalwww_html(self): | |
1023 """update form for the homepages web form""" | |
1024 pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','updatePersonalwww.zpt')).__of__(self) | |
1025 return pt() | |
1026 | |
1027 | |
1028 def updatePersonalwww(self,uploadfile): | |
1029 """update personalwww | |
1030 @param uploadfile: file handle auf das file | |
1031 """ | |
1032 dsn=self.getConnectionObj().connection_string | |
1033 #dsn="dbname=personalwww" | |
1034 resultSet=updatePersonalWWW.importFMPXML(uploadfile) | |
1035 news,conflicts=updatePersonalWWW.checkImport(dsn, resultSet) | |
1036 | |
1037 self.REQUEST.SESSION['personal_www']={} | |
1038 self.REQUEST.SESSION['personal_www']['resultSet']=resultSet | |
1039 self.REQUEST.SESSION['personal_www']['news']=news | |
1040 self.REQUEST.SESSION['personal_www']['conflicts']=conflicts | |
1041 | |
1042 pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','updatePersonalwww_check.zpt')).__of__(self) | |
1043 return pt() | |
1044 | |
1045 | |
1046 | |
1047 def reindexCatalogs(self,RESPONSE=None): | |
1048 """reindex members and project catalog""" | |
1049 | |
1050 | |
1051 try: | |
1052 | |
1053 self.ProjectCatalog.manage_catalogReindex(self.REQUEST,RESPONSE,self.REQUEST['URL1']) | |
1054 logger("MPIWG Root (reindexCatalog: projects)",logging.INFO,"DONE") | |
1055 except: | |
1056 logger("MPIWG Root (reindexCatalog: projects)",logging.WARNING," %s %s"%sys.exc_info()[:2]) | |
1057 | |
1058 try: | |
1059 | |
1060 self.MembersCatalog.manage_catalogReindex(self.REQUEST,RESPONSE,self.REQUEST['URL1']) | |
1061 logger("MPIWG Root (reindexCatalog: members)",logging.INFO,"DONE") | |
1062 except: | |
1063 logger("MPIWG Root (reindexCatalog: members)",logging.WARNING," %s %s"%sys.exc_info()[:2]) | |
1064 | |
1065 try: | |
1066 | |
1067 self.fulltextProjectsMembers.manage_catalogReindex(self.REQUEST,RESPONSE,self.REQUEST['URL1']) | |
1068 logger("MPIWG Root (reindexCatalog: fulltextProjectsMembers)",logging.INFO,"DONE") | |
1069 except: | |
1070 logger("MPIWG Root (reindexCatalog: fulltextProjectsMembers)",logging.WARNING," %s %s"%sys.exc_info()[:2]) | |
1071 | |
1072 | |
1073 | |
1074 | |
1075 | |
1076 | |
1077 if RESPONSE: | |
1078 RESPONSE.redirect('manage_main') | |
1079 | |
1080 | |
1081 | |
1082 | |
1083 def getAllMembers(self): | |
1084 #ret=[] | |
1085 | |
1086 def sorter(x,y): | |
1087 return cmp(x[0].lower(),y[0].lower()) | |
1088 | |
1089 results=self.MembersCatalog({'isPublished':True}) | |
1090 | |
1091 ret=[(unicodify(", ".join([proj.lastName, proj.firstName])), proj.getKey) for proj in results] | |
1092 | |
1093 ret.sort(sorter) | |
1094 return ret | |
1095 | |
1096 | |
1097 def printAllMembers(self): | |
1098 """print""" | |
1099 members=self.getAllMembers() | |
1100 ret="" | |
1101 for x in members: | |
1102 ret+="<p>%s</p>"%x | |
1103 return ret | |
1104 | |
1105 | |
1106 def makeList(self,entry): | |
1107 """makes a list out of one entry or repeat a list""" | |
1108 if type(entry) is StringType: | |
1109 return [entry] | |
1110 else: | |
1111 return entry | |
1112 | |
1113 def getTreeRSS(self,dep=None,date=None,onlyActive=1,onlyArchived=0): | |
1114 """generateTree""" | |
1115 rss="""<?xml version="1.0" encoding="utf-8"?> | |
1116 <rss version="2.0"> | |
1117 <channel>""" | |
1118 | |
1119 for obj in self.getTree(dep, date, onlyActive, onlyArchived): | |
1120 linkStr="""<link>http://www.mpiwg-berlin.mpg.de/en/research/projects/%s</link>""" | |
1121 rss+="""<item>""" | |
1122 rss+=linkStr%obj[3].getId() | |
1123 rss+="""</item>""" | |
1124 if hasattr(obj[3],'publicationList'): | |
1125 rss+="""<item>""" | |
1126 rss+=linkStr%(obj[3].getId()+"/publicationList"); | |
1127 rss+="""</item>""" | |
1128 rss+="""</channel> | |
1129 </rss>""" | |
1130 | |
1131 | |
1132 return rss | |
1133 | |
1134 def getTree(self,dep=None,date=None,onlyActive=0,onlyArchived=0): | |
1135 """generate Tree from project list | |
1136 als Liste, jeder Eintrag ist ein Tupel ,(Tiefe, ProjektNummer,ProjektObject | |
1137 onlyActive = 0 : alle Projekte | |
1138 onlyActive = 1 : nur active Projekte | |
1139 onlyActive = 2: nur inactive Projekte | |
1140 | |
1141 onlyArchived=0: alle Projekte | |
1142 onlyArchived= 1 : nur aktuelle Projekte | |
1143 onlyArchived = 2: nur archivierte Projekte | |
1144 | |
1145 department fuer das Tree geholt werden soll | |
1146 """ | |
1147 | |
1148 returnListTmp=[] | |
1149 returnList=[] | |
1150 | |
1151 for project in self.getProjectFields('xdata_05',sort="int",date=date): # get Projects sorted by xdata_05 | |
1152 | |
1153 for idNr in project[1].split(";"): # more than one number | |
1154 if not idNr=="": | |
1155 splittedId=idNr.split(".") | |
1156 depth=len(splittedId) | |
1157 nr=idNr | |
1158 #title=project[0].WEB_title | |
1159 title=[project[0].getContent('WEB_title')] | |
1160 #print title | |
1161 | |
1162 if idNr[0]=="x": # kompatibilitaet mit alter Konvention, x vor der Nummer macht project inactive | |
1163 project[0].setActiveFlag(False) | |
1164 | |
1165 if (not dep) or (splittedId[0]==dep): #falls dep gesetzt ist nur dieses hinzufuegen. | |
1166 | |
1167 if (onlyActive==0): | |
1168 returnListTmp.append((depth,nr,title,project[0])) | |
1169 elif (onlyActive==1) and project[0].isActiveProject(): #nur active projekte | |
1170 returnListTmp.append((depth,nr,title,project[0])) | |
1171 elif (onlyActive==2) and (not project[0].isActiveProject()): #nur active projekte | |
1172 returnListTmp.append((depth,nr,title,project[0])) | |
1173 | |
1174 | |
1175 #filter jetzt die Liste nach Archived oder nicht | |
1176 for entry in returnListTmp: | |
1177 if (onlyArchived==0): | |
1178 returnList.append(entry) | |
1179 elif (onlyArchived==1) and (not entry[3].isArchivedProject()): #nur active projekte | |
1180 returnList.append(entry) | |
1181 elif (onlyArchived==2) and (entry[3].isArchivedProject()): #nur active projekte | |
1182 returnList.append(entry) | |
1183 | |
1184 | |
1185 return returnList | |
1186 | |
1187 | |
1188 | |
1189 def changePosition(self,treeId,select,RESPONSE=None): | |
1190 """Change Postion Entry""" | |
1191 numbers=[] | |
1192 | |
1193 # Suche hoechste bisherige nummer | |
1194 projects=self.getProjectFields('xdata_05') # get Projects sorted by xdata_05 | |
1195 #print "pj",projects | |
1196 for project in projects: #suche alle subtrees der treeId | |
1197 #print treeId | |
1198 | |
1199 founds=re.match(treeId+"\.(.*)",project[1].split(";")[0]) | |
1200 if founds: | |
1201 #print "x",founds.group(0),len(founds.group(0).split(".")) | |
1202 if len(founds.group(0).split("."))==len(treeId.split("."))+1: # nur ein punkt mehr, d.h. untere ebene | |
1203 try: | |
1204 numbers.append(int(founds.group(0).split(".")[len(founds.group(0).split("."))-1])) | |
1205 except: | |
1206 numbers.append(int(0)) | |
1207 | |
1208 try: | |
1209 highest=max(numbers) | |
1210 except: | |
1211 highest=0 | |
1212 projects=self.showNewProjects() | |
1213 for i in self.makeList(select): | |
1214 highest+=10 | |
1215 projects[int(i)][0].xdata_05=treeId+"."+str(highest) | |
1216 | |
1217 | |
1218 if RESPONSE is not None: | |
1219 RESPONSE.redirect('showTree') | |
1220 | |
1221 def changeTree(self,RESPONSE=None): | |
1222 """change the complete tree""" | |
1223 form=self.REQUEST.form | |
1224 hashList={} | |
1225 onlyArchived=int(form.get("onlyArchived",0)) | |
1226 onlyActive=int(form.get("onlyActive",0)) | |
1227 dep=form.get("dep",None) | |
1228 | |
1229 fields=self.getTree(dep=dep,onlyArchived=onlyArchived,onlyActive=onlyActive) | |
1230 | |
1231 logging.info("GOT TREE!----------------------------------------------------") | |
1232 for field in form.keys(): | |
1233 | |
1234 splitted=field.split('_') | |
1235 if (len(splitted)>1) and (splitted[1]=="runningNumber"): #feld hat die Form Nummer_name und runnignNumber | |
1236 | |
1237 | |
1238 nr=int(splitted[0]) # nummer des Datensatzes | |
1239 currentEntry = fields[nr] | |
1240 | |
1241 if form.has_key(str(nr)+'_active'): # active flag is set | |
1242 fields[nr][3].setActiveFlag(True) | |
1243 else: | |
1244 fields[nr][3].setActiveFlag(False) | |
1245 | |
1246 #nummer hat sich geaendert | |
1247 | |
1248 entryChanged = False; | |
1249 | |
1250 if isinstance(fields[nr][3].xdata_05,list): #for some reasons somtimes the content of the field is a list with one entry. | |
1251 fields[nr][3].xdata_05=fields[nr][3].xdata_05[0] | |
1252 | |
1253 if not (fields[nr][3].xdata_05==form[str(nr)+'_number']): | |
1254 logging.info("Changed!Number+++++++++++++++++++++++++++++++++") | |
1255 logging.info(repr(fields[nr][3].xdata_05)+" ---> "+ repr(form[str(nr)+'_number'])) | |
1256 fields[nr][3].xdata_05=form[str(nr)+'_number'] | |
1257 entryChanged = True | |
1258 | |
1259 #completed har sich geaendert | |
1260 | |
1261 td = fields[nr][3].transformDate # hole die funktion zum transformieren des datums | |
1262 | |
1263 if not (td(fields[nr][3].getCompletedAt())==td(form[str(nr)+'_completed'])): | |
1264 fields[nr][3].setCompletedAt(form[str(nr)+'_completed']) | |
1265 logging.info(repr(td(fields[nr][3].getCompletedAt()))+" ---> "+ repr(td(form[str(nr)+'_completed']))) | |
1266 logging.info("Changed!Completed+++++++++++++++++++++++++++++++++") | |
1267 entryChanged = True | |
1268 | |
1269 if not (td(fields[nr][3].getStartedAt())==td(form[str(nr)+'_started'])): | |
1270 fields[nr][3].setStartedAt(form[str(nr)+'_started']) | |
1271 | |
1272 logging.info(repr(td(fields[nr][3].getStartedAt()))+" ---> "+ repr(td(form[str(nr)+'_started']))) | |
1273 logging.info("Changed!Started+++++++++++++++++++++++++++++++++") | |
1274 entryChanged = True | |
1275 | |
1276 | |
1277 if entryChanged: | |
1278 logging.info("Changed!+++++++++++++++++++++++++++++++++") | |
1279 fields[nr][3].copyObjectToArchive() | |
1280 | |
1281 | |
1282 if RESPONSE is not None: | |
1283 RESPONSE.redirect('showTree') | |
1284 | |
1285 def getProjectWithId(self,id): | |
1286 fields=self.getProjectFields('xdata_05') | |
1287 for field in fields: | |
1288 if field[1]==id: | |
1289 return field[0] | |
1290 | |
1291 return None | |
1292 | |
1293 | |
1294 | |
1295 | |
1296 def getRelativeUrlFromPerson(self,list): | |
1297 """get urls to person list""" | |
1298 ret=[] | |
1299 persons=list.split(";") | |
1300 for person in persons: | |
1301 | |
1302 if len(person)>1: #nicht nur Trennzeichen | |
1303 splitted=person.split(",") | |
1304 if len(splitted)==1: | |
1305 splitted=person.split(" ") | |
1306 splittedNew=[re.sub(r'\s(.*)','$1',split) for split in splitted] | |
1307 if splittedNew[0]=='': | |
1308 del splittedNew[0] | |
1309 search=string.join(splittedNew,' AND ') | |
1310 | |
1311 if not search=='': | |
1312 | |
1313 try: | |
1314 proj=self.MembersCatalog({'title':search}) | |
1315 except: | |
1316 proj=None | |
1317 | |
1318 if proj: | |
1319 #ret.append("<a href=%s >%s</a>"%(proj[0].absolute_url,person.encode('utf-8'))) | |
1320 ret.append("<a href=%s >%s</a>"%('members/'+proj[0].id+'/index.html',person)) | |
1321 else: | |
1322 #ret.append("%s"%person.encode('utf-8')) | |
1323 ret.append("%s"%person) | |
1324 return string.join(ret,";") | |
1325 | |
1326 def getMemberIdFromKey(self,key): | |
1327 """gibt die ensprechende id im members Ordner zum key""" | |
1328 | |
1329 if key=="": | |
1330 return "" | |
1331 try: | |
1332 key=utf8ify(key) | |
1333 catalogged=self.MembersCatalog({'getKey':key}) | |
1334 if len(catalogged)==0: | |
1335 return "" | |
1336 else: | |
1337 return catalogged[0].getObject().getId() | |
1338 | |
1339 except: | |
1340 return "" | |
1341 | |
1342 | |
1343 | |
1344 def getProjectsOfMembers(self,date=None): | |
1345 """give tuple member /projects""" | |
1346 ret=[] | |
1347 members=self.getAllMembers() | |
1348 logging.debug("X %s"%repr(members)) | |
1349 #return str(members) | |
1350 for x in members: | |
1351 #logging.debug("X %s"%repr(x)) | |
1352 projects=self.getProjectsOfMember(key=x[1],date=date) | |
1353 if len(projects)>0: | |
1354 ret.append((x[0],projects)) | |
1355 | |
1356 return ret | |
1357 | |
1358 def getProjectsOfMember(self,key=None,date=None,onlyArchived=1,onlyActive=1): | |
1359 """get projects of a member | |
1360 | |
1361 @param key: (optional) Key zur Idenfikation des Benutzer | |
1362 @param date: (optional) Version die zum Zeitpunkt date gueltig war | |
1363 @param onlyArchived: | |
1364 onlyArchived=0: alle Projekte | |
1365 onlyArchived= 1 : nur aktuelle Projekte | |
1366 onlyArchived = 2: nur archivierte Projekte | |
1367 """ | |
1368 # TODO: Die ganze Loesung | |
1369 def sortP(x,y): | |
1370 """sort by sorting number""" | |
1371 return cmp(x.WEB_title,y.WEB_title) | |
1372 | |
1373 ret=[] | |
1374 if key: | |
1375 logging.debug("MPIWGROOT (getProjectsOfMember):"+key) | |
1376 proj=self.ProjectCatalog({'getPersonKeyList':utf8ify(key)}) | |
1377 else: | |
1378 return ret # key muss definiert sein | |
1379 | |
1380 #logging.debug("MPIWGROOT (getProjectsOfMember):"+repr(proj)) | |
1381 if proj: | |
1382 proj2=[] | |
1383 for x in proj: | |
1384 #logging.error("proj:%s"%repr(x.getPath())) | |
1385 if (not getattr(x.getObject(),'invisible',None)) and (getattr(x.getObject(),'archiveTime','')==''): | |
1386 proj2.append(x) | |
1387 | |
1388 else: | |
1389 proj2=[] | |
1390 | |
1391 | |
1392 | |
1393 proj2.sort(sortP) | |
1394 | |
1395 projectListe=[] | |
1396 #logging.error("getprojectsofmember proj2: %s"%repr(proj2)) | |
1397 for proj in proj2: | |
1398 obj=proj.getObject() | |
1399 add=False | |
1400 if onlyArchived==1: #nur aktuell projecte | |
1401 if not obj.isArchivedProject(): | |
1402 add=True | |
1403 elif onlyArchived==2: #nur archivierte | |
1404 if obj.isArchivedProject(): | |
1405 add=True | |
1406 else: #alle | |
1407 add=True | |
1408 | |
1409 if onlyActive==1: #nur active projecte | |
1410 if obj.isActiveProject(): | |
1411 add=add & True | |
1412 else: | |
1413 add=add & False | |
1414 | |
1415 elif onlyArchived==2: #nur nicht aktvive | |
1416 if not obj.isActiveProject(): | |
1417 add=add & True | |
1418 else: #alle | |
1419 add=add & True | |
1420 | |
1421 if add: | |
1422 projectListe.append(obj) | |
1423 | |
1424 #logging.error("getprojectsofmember projectliste: %s"%repr(projectListe)) | |
1425 return projectListe | |
1426 | |
1427 def givePersonList(self,name): | |
1428 """check if person is in personfolder and return list of person objects""" | |
1429 | |
1430 splitted=name.split(",") | |
1431 if len(splitted)==1: | |
1432 splitted=name.lstrip().rstrip().split(" ") | |
1433 splittedNew=[split.lstrip() for split in splitted] | |
1434 | |
1435 if splittedNew[0]=='': | |
1436 del splittedNew[0] | |
1437 search=string.join(splittedNew,' AND ') | |
1438 | |
1439 if not search=='': | |
1440 proj=self.MembersCatalog({'title':search}) | |
1441 | |
1442 if proj: | |
1443 return [[x.lastName,x.firstName] for x in proj] | |
1444 else: | |
1445 return [] | |
1446 | |
1447 ## splitted=name.split(",") # version nachname, vorname... | |
1448 ## if len(splitted)>1: | |
1449 ## lastName=splitted[0] | |
1450 ## firstName=splitted[1] | |
1451 ## else: | |
1452 ## splitted=name.split(" ") #version vorname irgenwas nachnamae | |
1453 | |
1454 ## lastName=splitted[len(splitted)-1] | |
1455 ## firstName=string.join(splitted[0:len(splitted)-1]) | |
1456 | |
1457 ## objs=[] | |
1458 | |
1459 #print self.members | |
1460 ## for x in self.members.__dict__: | |
1461 ## obj=getattr(self.members,x) | |
1462 ## if hasattr(obj,'lastName') and hasattr(obj,'firstName'): | |
1463 | |
1464 ## if (re.match(".*"+obj.lastName+".*",lastName) or re.match(".*"+lastName+".*",obj.lastName)) and (re.match(".*"+obj.firstName+".*",firstName) or re.match(".*"+firstName+".*",obj.firstName)): | |
1465 | |
1466 ## objs.append((obj,lastName+", "+firstName)) | |
1467 | |
1468 | |
1469 ## return objs | |
1470 | |
1471 | |
1472 def personCheck(self,names): | |
1473 """all persons for list""" | |
1474 #print "names",names | |
1475 splitted=names.split(";") | |
1476 ret={} | |
1477 for name in splitted: | |
1478 | |
1479 if not (name==""): | |
1480 try: | |
1481 ret[name]=self.givePersonList(name) | |
1482 except: | |
1483 """NOTHIHN""" | |
1484 #print "RET",ret | |
1485 return ret | |
1486 | |
1487 def giveCheckList(self,person,fieldname): | |
1488 """return checklist""" | |
1489 #print "GCL",fieldname | |
1490 if fieldname=='xdata_01': | |
1491 x=self.personCheck(person.getContent(fieldname)) | |
1492 #print "GCLBACKX",x | |
1493 return x | |
1494 | |
1495 | |
1496 def isCheckField(self,fieldname): | |
1497 """return chechfield""" | |
1498 | |
1499 return (fieldname in checkFields) | |
1500 | |
1501 | |
1502 def generateNameIndex(self): | |
1503 """erzeuge einen index verwendeter personen""" | |
1504 import psycopg | |
1505 o = psycopg.connect('dbname=authorities user=dwinter password=3333',serialize=0) | |
1506 results={} | |
1507 print self.fulltext.historicalNames.items() | |
1508 for nameItem in self.fulltext.historicalNames.items(): #gehe durch alle namen des lexikons | |
1509 | |
1510 c = o.cursor() | |
1511 name=nameItem[0] | |
1512 print "check",name | |
1513 c.execute("select lastname,firstname from persons where lower(lastname) = '%s'"%quote(name)) | |
1514 tmpres=c.fetchall() | |
1515 firstnames=[result[1] for result in tmpres] # find all firstnames | |
1516 if tmpres: | |
1517 lastname=tmpres[0][0] | |
1518 | |
1519 for found in self.fulltext({'names':name}): | |
1520 if found.getObject().isActual(): | |
1521 for nh in found.getObject().getGetNeighbourhood(name, length=50,tagging=False): #hole umgebung | |
1522 #schaue nun ob der vorname hinter oder vor dem name ist | |
1523 position=nh.find(lastname) | |
1524 # vorher | |
1525 #print "NH",nh | |
1526 bevorS=nh[0:position].split() | |
1527 #print "BV",bevorS | |
1528 if len(bevorS)>1: | |
1529 try: | |
1530 bevor=[bevorS[-1],bevorS[-2]] | |
1531 except: | |
1532 bevor=[bevorS[0]] | |
1533 else: | |
1534 bevor=[] | |
1535 #nachher | |
1536 behindS= re.split("[,|;| ]",nh[position:]) | |
1537 #print "BH",behindS | |
1538 if len(behindS)>2: | |
1539 try: | |
1540 behind=behindS[1:3] | |
1541 except: | |
1542 behind=[bevorS[1]] | |
1543 else: | |
1544 behind=[] | |
1545 for firstname in firstnames: | |
1546 if firstname in bevor+behind: #Namen wie mit Adelspraedikaten werden so erstmal nich gefunden | |
1547 id="%s,%s"%(lastname,firstname) | |
1548 if not results.has_key(id): | |
1549 results[id]=[] | |
1550 objId=found.getObject().getId() | |
1551 if not (objId in results[id]): | |
1552 print "d %s for %s"%(id,objId) | |
1553 results[id].append(objId) | |
1554 self.nameIndex=results | |
1555 return results | |
1556 | |
1557 def editNameIndexHTML(self): | |
1558 """edit the name index""" | |
1559 if not hasattr(self,'nameIndexEdited'): # falls editierter index noch nicht existiert, kopiere automatisch erstellten | |
1560 self.nameIndexEdited=copy.copy(self.nameIndex) | |
1561 print "huh" | |
1562 #self.nameIndexEdited=copy.copy(self.nameIndex) | |
1563 #print self.nameIndexEdited | |
1564 pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','editHistoricalNames.zpt')).__of__(self) | |
1565 return pt() | |
1566 | |
1567 def getNamesInProject(self,projectId): | |
1568 """get all names ofnameIndexEdited which are references in projec with projectId""" | |
1569 | |
1570 ret=[] | |
1571 for name in self.nameIndexEdited.keys(): | |
1572 if projectId in self.nameIndexEdited[name]: | |
1573 ret.append(name) | |
1574 | |
1575 return ret | |
1576 | |
1577 def editNameIndex(self,RESPONSE=None,name=None,occurrances=None,submit=None): | |
1578 """edit the index""" | |
1579 nI=self.nameIndexEdited # mI introduced to make sure that changes to nameIndexEdited are know to ZODB | |
1580 if submit=="delete": | |
1581 | |
1582 | |
1583 dh=getattr(self,'deletedHistoricalNames',{}) | |
1584 | |
1585 if type(dh) is ListType: | |
1586 dh={} | |
1587 if not dh.has_key(name): | |
1588 dh[name]=occurrances.split("\n") | |
1589 else: | |
1590 dh[name]+=occurrances.split("\n") | |
1591 | |
1592 self.deletedHistoricalNames=dh | |
1593 | |
1594 del self.nameIndexEdited[name] | |
1595 | |
1596 | |
1597 elif (submit=="change"): | |
1598 | |
1599 nI[name]=occurrances.split("\n")[0:] | |
1600 | |
1601 elif (submit=="add"): | |
1602 if not nI.has_key(name): | |
1603 nI[name]=occurrances.split("\n") | |
1604 else: | |
1605 nI[name]+=occurrances.split("\n") | |
1606 | |
1607 self.nameIndexEdited=nI | |
1608 | |
1609 | |
1610 if RESPONSE is not None: | |
1611 RESPONSE.redirect('editNameIndexHTML') | |
1612 | |
1613 | |
1614 | |
1615 def restoreIndex(self): | |
1616 """restore""" | |
1617 self.nameIndexEdited=self.nameIndex | |
1618 return "done" | |
1619 | |
1620 | |
1621 def sortResults(self,results): | |
1622 """search the catalog and give results back sorted by meta_type""" | |
1623 ret = {} | |
1624 logging.debug(results()) | |
1625 for result in results(): | |
1626 metaType = result.meta_type | |
1627 resultList= ret.get(metaType,[]) | |
1628 resultList.append(result) | |
1629 ret[metaType]=resultList | |
1630 | |
1631 logging.debug(ret) | |
1632 return ret | |
1633 | |
1634 | |
1635 def manage_addMPIWGRootForm(self): | |
1636 """form for adding the root""" | |
1637 pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','addMPIWGRootForm.zpt')).__of__(self) | |
1638 return pt() | |
1639 | |
1640 def manage_addMPIWGRoot(self,id,title,connection_id="",RESPONSE=None): | |
1641 """add a root folder""" | |
1642 newObj=MPIWGRoot(id,title) | |
1643 self._setObject(id,newObj) | |
1644 ob=getattr(self,id) | |
1645 setattr(ob,'connection_id',connection_id) | |
1646 if RESPONSE is not None: | |
1647 RESPONSE.redirect('manage_main') | |
1648 |