1: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
2: from Products.PageTemplates.PageTemplate import PageTemplate
3: from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate
4: from Products.ZSQLExtend.ZSQLExtend import ZSQLExtendFolder
5: from Products.ZCatalog.CatalogPathAwareness import CatalogAware
6: from OFS.Image import Image
7: from Globals import package_home
8: import urllib
9: import MPIWGStaff
10: import string
11: import re
12: import os
13: from types import *
14: import logging
15: import xmlhelper # Methoden zur Verwaltung der projekt xml
16: from OFS.SimpleItem import SimpleItem
17: from OFS.Folder import Folder
18: from Products.ZSQLMethods.SQL import SQLConnectionIDs
19: from AccessControl import ClassSecurityInfo
20: from bibliography import *
21: import time
22: import xml.dom.minidom
23: import sys
24: import transaction
25:
26: #from Ft.Xml.XPath import Evaluate
27: #from Ft.Xml.XPath.Context import Context
28: #from Ft.Xml.Domlette import NonvalidatingReader,PrettyPrint, Print
29: #from Ft.Xml import EMPTY_NAMESPACE
30: import copy
31: import updatePersonalWWW
32: import MPIWGStaff
33: from MPIWGHelper import *
34: from BeautifulSoup import BeautifulSoup, Comment
35: from ZODB import FileStorage, DB
36: from ZEO import ClientStorage
37:
38: def sortWeight(x,y):
39: x1=int(getattr(x[1],'weight','0'))
40: y1=int(getattr(y[1],'weight','0'))
41: return cmp(x1,y1)
42:
43:
44: class MPIWGRoot(ZSQLExtendFolder):
45: """Stammordner fuer den Web-Server"""
46:
47: _v_harvestCache=None
48: meta_type='MPIWGRoot'
49:
50: fieldLabels={'WEB_title':'WEB_Title',
51: 'xdata_01':'Responsible Scientists',
52: 'xdata_02':'Department',
53: 'xdata_03':'Historical Persons',
54: 'xdata_04':'Time period',
55: 'xdata_05':'Sorting number',
56: 'xdata_06':'Keywords',
57: 'xdata_07':'Short title',
58: 'xdata_08':'Other involved scholars' ,
59: 'xdata_09':'Disciplines',
60: 'xdata_10':'Themes',
61: 'xdata_11':'Object Digitallibrary',
62: 'xdata_12':'Cooperation partners',
63: 'xdata_13':'Funding institutions',
64: 'WEB_project_header':'WEB_project_header',
65: 'WEB_project_description':'WEB_project_description',
66: 'WEB_related_pub':'WEB_related_pub'}
67:
68: # (is this used?)
69: folders=['MPIWGProject','Folder','ECHO_Navigation']
70: # language of this instance
71: lang = 'en'
72: # types of objects that show up in navigation
73: nav_meta_types = ['MPIWGTemplate','MPIWGLink','MPIWGFolder']
74:
75: manage_options = Folder.manage_options+(
76: {'label':'Update personal homepages','action':'updatePersonalwww_html'},
77: {'label':'Reindex catalogs','action':'reindexCatalogs'},
78: {'label':'Main config','action':'changeMPIWGRootForm'},
79: {'label':'add e-mails','action':'showNewDBEntries'},
80: #{'label':'update the institutsbibliography','action':'updateInstitutsbiliography'},
81: #{'label':'Edit Historical Persons','action':'editHistoricalPersonsForm'},
82: #{'label':'Store Historical Persons','action':'storeHistoricalPersons'},
83: )
84:
85:
86: def getHarvestCachePort(self):
87: return getattr(self,"harvestPort",29999)
88:
89: def getHarvestCacheServer(self):
90: return getattr(self,"harvestServer","localhost")
91:
92:
93: def getHarvestCache(self):
94: logging.debug("CACHE:"+repr(self._v_harvestCache))
95: if self._v_harvestCache==None:
96: #storage = FileStorage.FileStorage('/tmp/'+self.getId()+'test-filestorage.fs')
97: addr = self.getHarvestCacheServer(), self.getHarvestCachePort()
98: storage = ClientStorage.ClientStorage(addr)
99: db = DB(storage)
100: self._v_harvestDV=db
101: self._v_harvestDV=db
102: conn = db.open()
103: dbroot = conn.root()
104: if not dbroot.has_key('templates'):
105: from BTrees.OOBTree import OOBTree
106: dbroot['templates'] = OOBTree()
107:
108: self._v_harvestCache = dbroot['templates']
109: logging.debug("CACHE2:"+repr(self._v_harvestCache))
110: return self._v_harvestCache
111:
112:
113:
114: def __del__(self):
115: if self._v_harvestCache!=None:
116: self._v_harvestDV.close();
117:
118: def getGetNeighbourhood(self,obj, wordStr, length=100,tagging=True):
119: """finde umgebung um die worte in wordStr, zurueckgegeben wird eine Array mit den Umgebungen von Fundstellen der Worte
120: alle Tags werden entfernt, die Fundstellen werden mit <span class="found">XX</span> getaggt, die Umgebungen werden
121: case insensitive gesucht
122: @param wordStr: string mit Worten getrennt durch Leerzeichen, Phrasen sind mit " gekennzeichnet
123: "eine phrase", "*" bezeichnet wildcards und wird ignoriert"
124: @param length: optional, default wert 100, 2*length ist die groesse der Umgebung
125: @param tagging: optional default wert true, kein span tag wird erzweugt falls tag=false
126: """
127:
128: ret=[] # nimmt das Array auf, dass spaeter zurueckgegeben wird
129: ranges=[] #Array mit tupeln x,y wobei x die Position des Anfang und y des Endes der i-ten Umgebung angiebt
130:
131: wordStr=wordStr.lstrip().rstrip()
132:
133: def isInRanges(nr,length):
134: """test ob eine gegeben Position nr schon irgendwo in einer Umgebung ist, gibt den Index des ersten Wertes aus ranges zurueck,
135: -1, wenn kein Treffer
136:
137: @param nr: Position die geprueft werden soll
138: @param length: Laenge des Wortes das geprueft werden soll
139: """
140: for x in ranges:
141: if (x[0]<=nr) and (nr < (x[1]-length)):
142: return ranges.index(x)
143: return -1
144:
145: # deal with phrases, in Phrasen werden die Leerzeichen durch "_" ersetzt.
146: def rep_empty(str):
147: x= re.sub(" ","_",str.group(0))
148: return re.sub("\"","",x)
149:
150: wordStr=re.sub("\".*?\"", rep_empty,wordStr)#ersetze leerzeichen in " " durch "_" und loesche "
151:
152: #deal with wildcards, for our purposes it is enough to delete the wildcard
153: wordStr=wordStr.replace("*","")
154:
155: words=wordStr.split(" ")
156: #if not words is ListType:
157: # words=[words]
158:
159:
160: txtCache = self.en.getHarvestCache();
161: txt= txtCache.get(obj.absolute_url(),None)
162:
163: if txt==None:
164:
165: logging.debug("NO CACHE for: "+obj.absolute_url())
166: txt=obj.harvest_page(mode="slim")
167:
168:
169: if not txt:
170: return ret
171:
172: soup = BeautifulSoup(txt)
173:
174: comments = soup.findAll(text=lambda text:isinstance(text, Comment))
175: [comment.extract() for comment in comments]
176:
177: txt = ''.join(soup.findAll(text=True))
178:
179:
180: #txt=re.sub("<.*?>", "", txt) # loesche alle Tags
181: for word in words:
182: word=re.sub("_"," ",word) # ersetze zurueck "_" durch " "
183: pos=0
184:
185: n=txt.lower().count(word.lower()) # wie oft tritt das Wort auf
186:
187: for i in range(n):
188: pos=txt.lower().find(word.lower(),pos)
189:
190: if pos > 0:
191: x=max(0,pos-length)
192: y=min(len(txt),pos+length)
193:
194:
195: #is word already in one of the results
196: nr=isInRanges(pos,len(word))
197: if nr >=0:# word ist in einer schon gefunden Umgebung, dann vergroessere diese
198: x=min(ranges[nr][0],x)
199: y=max(ranges[nr][1],y)
200:
201: str=txt[x:y]
202: if x!=0: #add dots if in the middle of text
203: str="..."+str
204:
205: if y!=len(txt): #add dots if in the middle of text
206: str=str+"..."
207:
208:
209:
210: if nr >=0: # word ist in einer schon gefunden Umgebung
211: ranges[nr]=(x,y) # neue Position der Umgebung
212:
213: ret[nr]=str # neue Umgebung
214: else: # andernfalls neue Umgebung hinzufuegen
215: ranges.append((x,y))
216:
217: ret.append(str)
218:
219: pos=pos+len(word)
220: else:
221: break;
222:
223: # now highlight everything
224: if tagging:
225: for x in range(len(ret)):
226: for word in words:
227: repl=re.compile(word,re.IGNORECASE)
228: ret[x]=repl.sub(""" <span class="found">%s</span>"""%word.upper(),ret[x])
229:
230: return ret
231: def copyAllImagesToMargin(self):
232: """tranformiere alle Bilder in die Margins"""
233: projects=self.getTree()
234: ret=""
235: for project in projects:
236: proj=project[3]
237: try:
238: persons=proj.copyImageToMargin();
239: except:
240: logging.error("Cannnot do: %s"%repr(project))
241:
242: def transformProjectsToId(self):
243: """trnasformiere zu ID, Hilfsfunktion die die alten Templates analysiert und mit der neuen Liste
244: verantwortlicher Personen versieht"""
245: projects=self.getTree()
246: ret=""
247: for project in projects:
248:
249: proj=project[3]
250: persons=proj.identifyNames(proj.getContent('xdata_01'))
251: if not hasattr(proj,'responsibleScientistsList'):
252: proj.responsibleScientistsList=[]
253:
254: for person in persons.items():
255:
256: if len(person[1]) >1: #nicht eindeutig
257: ret+="nicht eindeutig --- %s: %s\n"%(proj.getId(),person[0])
258:
259: elif len(person[1]) ==0: #kein eintrage
260: ret+="kein eintrag--- %s: %s\n"%(proj.getId(),person[0])
261: proj.responsibleScientistsList.append((person[0],""))
262: else:
263: proj.responsibleScientistsList.append((person[0],person[1][0].getObject().getKey()))
264:
265: return ret
266:
267:
268: def harvestProjects(self):
269: """harvest"""
270: folder="/tmp"
271: try:
272: os.mkdir("/tmp/harvest_MPIWG")
273: except:
274: pass
275: founds=self.ZopeFind(self.aq_parent.projects,obj_metatypes=['MPIWGProject'],search_sub=1)
276: for found in founds:
277: txt=found[1].harvest_page()
278:
279: if txt and (txt != ""):
280: name=found[0].replace("/","_")
281: fh=file("/tmp/harvest_MPIWG/"+name,"w")
282: fh.write(txt)
283: fh.close()
284:
285: def decode(self,str):
286: """decoder"""
287:
288: if not str:
289: return ""
290: if type(str) is StringType:
291: try:
292: return str.decode('utf-8')
293: except:
294: return str.decode('latin-1')
295: else:
296: return str
297:
298:
299: def getat(self,array,idx=0,default=None):
300: """return array element idx or default (but no exception)"""
301: if len(array) <= idx:
302: return default
303: else:
304: return array[idx]
305:
306: def getLang(self):
307: """returns the default language"""
308: return self.lang
309:
310: def browserCheck(self):
311: """check the browsers request to find out the browser type"""
312: bt = {}
313: ua = self.REQUEST.get_header("HTTP_USER_AGENT")
314: bt['ua'] = ua
315: bt['isIE'] = False
316: bt['isN4'] = False
317: if string.find(ua, 'MSIE') > -1:
318: bt['isIE'] = True
319: else:
320: bt['isN4'] = (string.find(ua, 'Mozilla/4.') > -1)
321:
322: try:
323: nav = ua[string.find(ua, '('):]
324: ie = string.split(nav, "; ")[1]
325: if string.find(ie, "MSIE") > -1:
326: bt['versIE'] = string.split(ie, " ")[1]
327: except: pass
328:
329: bt['isMac'] = string.find(ua, 'Macintosh') > -1
330: bt['isWin'] = string.find(ua, 'Windows') > -1
331: bt['isIEWin'] = bt['isIE'] and bt['isWin']
332: bt['isIEMac'] = bt['isIE'] and bt['isMac']
333: bt['staticHTML'] = False
334:
335: return bt
336:
337:
338: def versionHeaderEN(self):
339: """version header text"""
340:
341: date= self.REQUEST.get('date',None)
342: if date:
343: txt="""<h2>This pages shows the project which existed at %s</h2>"""%str(date)
344: return txt
345: return ""
346:
347: def versionHeaderDE(self):
348: """version header text"""
349: date= self.REQUEST.get('date',None)
350: if date:
351: txt="""<h2>Auf dieser Seite finden Sie die Projekte mit Stand vom %s</h2>"""%str(date)
352: return ""
353:
354:
355: def createOrUpdateId_raw(self):
356: """create sequence to create ids for bibliography"""
357: debug=None
358: #suche groesste existierende id
359: founds=self.ZSQLQuery("select id from bibliography")
360:
361: if founds:
362: ids=[int(x.id[1:]) for x in founds]
363: maximum=max(ids)
364:
365: id_raw=self.ZSQLQuery("select nextval('id_raw')",debug=debug)
366:
367: if id_raw:
368: self.ZSQLQuery("drop sequence id_raw",debug=debug)
369:
370: self.ZSQLQuery("create sequence id_raw start %i"%(maximum+1),debug=debug)
371:
372:
373: def queryLink(self,link):
374: """append querystring to the link"""
375: return "%s?%s"%(link,self.REQUEST.get('QUERY_STRING',''))
376:
377: def getKategory(self,url):
378: """kategorie"""
379: splitted=url.split("/")
380: return splitted[4]
381:
382: def generateUrlProject(self,url,project=None):
383: """erzeuge aus absoluter url, relative des Projektes"""
384: if project:
385: splitted=url.split("/")
386: length=len(splitted)
387: short=splitted[length-2:length]
388:
389: base=self.REQUEST['URL3']+"/"+"/".join(short)
390:
391: else:
392: findPart=url.find("/projects/")
393: base=self.REQUEST['URL1']+"/"+url[findPart:]
394:
395:
396: return base
397:
398: def isNewCapital(self,text=None,reset=None):
399: if reset:
400: self.REQUEST['capital']="A"
401: return True
402: else:
403: if len(text)>0 and not (text[0]==self.REQUEST['capital']):
404: self.REQUEST['capital']=text[0]
405: return True
406: else:
407: return False
408:
409: def subNavStatic(self,obj):
410: """subnav" von self"""
411: subs=self.ZopeFind(obj,obj_metatypes=['MPIWGTemplate','MPIWGLink'])
412: subret=[]
413:
414: for x in subs:
415: if not(x[1].title==""):
416: subret.append(x)
417: subret.sort(sortWeight)
418: return subret
419:
420: def subNav(self,obj):
421: """return sub-navigation elements i.e. below sections"""
422: # get section -> parent should be MPIWGRoot
423: p = obj
424: sec = None
425: # descend parents to the root (and remember the last id)
426: while p is not None and p.meta_type != 'MPIWGRoot':
427: sec = p
428: p = p.aq_parent
429:
430: subsecs = sec.objectItems(self.nav_meta_types)
431: subsecs = [s for s in subsecs if s[1].title != ""]
432: subsecs.sort(sortWeight)
433: return subsecs
434:
435: def isType(self,object,meta_type):
436: """teste ob ein object vom meta_type ist."""
437: return (object.meta_type==meta_type)
438:
439: def isActive(self,name):
440: """teste ob subnavigation aktiv"""
441: for part in self.REQUEST['URL'].split("/"):
442: if part==name:
443: return True
444: return False
445:
446:
447: def getSections(self):
448: """returns a list of all sections i.e. top-level MPIWGFolders"""
449: secs = self.objectItems(['MPIWGFolder'])
450: secs.sort(sortWeight)
451: #logging.debug("root: %s secs: %s"%(repr(self.absolute_url()), repr(secs)))
452: # return pure list of objects
453: return [s[1] for s in secs]
454:
455: def getSectionStyle(self, name, style=""):
456: """returns a string with the given style + '-sel' if the current section == name"""
457: if self.getSection() == name:
458: return style + '-sel'
459: else:
460: return style
461:
462: def getFeatures(self, num=None):
463: """returns a list of the last num Features"""
464: dir = getattr(self, 'features')
465: features = dir.objectItems(['MPIWGFeature'])
466: features.sort(sortWeight)
467: if num is not None:
468: # take only the last num elements
469: features = features[-num:]
470: # return pure list of objects
471: return [f[1] for f in features]
472:
473:
474: def getMPIWGRoot(self):
475: """returns the MPIWG root"""
476: return self
477:
478: def MPIWGrootURL(self):
479: """returns the URL to the root"""
480: return self.absolute_url()
481:
482: def upDateSQL(self,fileName):
483: """updates SQL databases using fm.jar"""
484: fmJarPath=os.path.join(package_home(globals()), 'updateSQL/fm.jar')
485: xmlPath=os.path.join(package_home(globals()), "updateSQL/%s"%fileName)
486: logger("MPIWG Web",logging.INFO,"java -classpath %s -Djava.awt.headless=true Convert %s"%(fmJarPath,xmlPath))
487: ret=os.popen("java -classpath %s -Djava.awt.headless=true Convert %s"%(fmJarPath,xmlPath),"r").read()
488: logger("MPIWG Web",logging.INFO,"result convert: %s"%ret)
489: return 1
490:
491: def patchProjects(self,RESPONSE):
492: """patch"""
493: projects=self.ZopeFind(self.projects,obj_metatypes=['MPIWGProject'])
494: for project in projects:
495: tmp=project[1].WEB_project_description[0].replace("/CD/projects/","")[0:]
496: setattr(project[1],'WEB_project_description',[tmp[0:]])
497: RESPONSE.write("<p>%s</p>\n"%project[0])
498:
499: def replaceNotEmpty(self,format,field):
500: """replace not empty"""
501: if field and (not field.lstrip()==''):
502: return self.decode(format%field)
503: else:
504: return ""
505:
506:
507: def isActiveMember(self,key):
508: """tested ob Mitarbeiter key ist aktiv"""
509: key=utf8ify(key)
510: ret=self.getat(self.ZSQLInlineSearch(_table='personal_www',
511: _op_key='eq',key=key,
512: _op_publish_the_data='eq',
513: publish_the_data='yes'))
514:
515: logging.info("ACTIVE_MEMBER %s"%ret)
516: if ret:
517: return True
518: else:
519: return False
520:
521: def isActual(self,project):
522: """checke if project is actual"""
523: actualTime=time.localtime()
524:
525: if hasattr(project,'getObject'): #obj ist aus einer catalogTrefferList
526: obj=project.getObject()
527: else:
528: obj=project
529:
530: if getattr(obj,'archiveTime',actualTime)< actualTime:
531: return False
532: else:
533: return True
534:
535: def redirectIndex_html(self,request):
536: #return request['URL1']+'/index_html'
537:
538: return urllib.urlopen(request['URL1']+'/index_html').read()
539:
540:
541: def formatBibliography(self,here,found):
542: """format"""
543: return formatBibliography(here,found)
544:
545: def getValue(self,fieldStr):
546: """Inhalt des Feldes"""
547:
548: if type(fieldStr)==StringType:
549: field=fieldStr
550: else:
551: field=fieldStr[0]
552: try:
553: if field[len(field)-1]==";":
554: field=field[0:len(field)-1]
555: except:
556:
557: """nothing"""
558: field=re.sub(r';([^\s])','; \g<1>',field)
559: return field.encode('utf-8')
560:
561:
562:
563: def sortedNames(self,list):
564: """sort names"""
565:
566: def sortLastName(x_c,y_c):
567: try:
568: x=urllib.unquote(x_c).encode('utf-8','ignore')
569: except:
570: x=urllib.unquote(x_c)
571:
572: try:
573: y=urllib.unquote(y_c).encode('utf-8','ignore')
574: except:
575: x=urllib.unquote(y_c)
576:
577:
578:
579: try:
580: last_x=x.split()[len(x.split())-1]
581: last_y=y.split()[len(y.split())-1]
582:
583: except:
584:
585: last_x=""
586: last_y=""
587:
588:
589:
590: if last_x<last_y:
591: return 1
592: elif last_x>last_y:
593: return -1
594: else:
595: return 0
596:
597: list.sort(sortLastName)
598: list.reverse()
599:
600: return list
601:
602: def __init__(self, id, title):
603: """init"""
604: self.id=id
605: self.title=title
606:
607: def removeStopWords(self,xo):
608: """remove stop words from xo"""
609: if not hasattr(self,'_v_stopWords'):
610: self._v_stopWords=self.stopwords_en.data.split("\n")
611:
612: x=str(xo)
613:
614: strx=x.split(" ")
615:
616: for tmp in strx:
617:
618: if tmp.lower() in self._v_stopWords:
619: del strx[strx.index(tmp)]
620:
621: return " ".join(strx)
622:
623: def urlQuote(self,str):
624: """quote"""
625: return urllib.quote(str)
626:
627: def urlUnQuote(self,str):
628: """quote"""
629: return urllib.unquote(str)
630:
631:
632:
633: def getProjectsByFieldContent(self,fieldName,fieldContentsEntry, date=None):
634: """gib alle Projekte aus mit Value von field mit fieldName enthaelt ein Element der Liste fieldContents"""
635: def sort(x,y):
636: return cmp(x.WEB_title[0],y.WEB_title[0])
637:
638: if type(fieldContentsEntry) is StringType:
639: fieldContentsTmp=[fieldContentsEntry]
640: else:
641: fieldContentsTmp=fieldContentsEntry
642:
643: fieldContents=[]
644: for x in fieldContentsTmp:
645: fieldContents.append(" AND ".join(x.split()))
646: projects=self.ProjectCatalog({fieldName:string.join(fieldContents,' AND')})
647: #print projects
648: #ret=[x for x in projects]
649: ret=[]
650: for x in projects:
651: obj=x.getObject()
652: obj=obj.getActualVersion(date)
653: if obj and (not getattr(obj,'invisible',None)):
654: #if not (x in ret):
655: ret.append(x)
656:
657: ret.sort(sort)
658: return ret
659:
660: def changeMPIWGRootForm(self):
661: """edit"""
662: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','changeMPIWGRootForm')).__of__(self)
663: return pt()
664:
665: def changeMPIWGRoot(self,title,connection_id,coneServiceURL,harvestPort,harvestServer,lang=None,autocommit=None,RESPONSE=None):
666: """change"""
667: self.title=title
668: self.connection_id=connection_id
669: #self.disciplineList=disciplineList
670: #self.themesList=themesList
671: self.coneServiceURL=coneServiceURL
672: self.harvestServer=harvestServer
673: try:
674: self.harvestPort=int(harvestPort)
675: except:
676: logging.error("couldn't change port!: no number:"+harvestPort)
677:
678: if lang is not None:
679: self.lang = lang
680:
681: self.autocommit = (autocommit == "on")
682: if RESPONSE is not None:
683: RESPONSE.redirect('manage_main')
684:
685:
686: def getContexts(self,childs=None,parents=None,depth=None,date=None,onlyActive=True):
687: """childs alle childs, alle parents"""
688: ret=[]
689:
690: if parents:
691: pnums=parents.split(".")
692: while len(pnums) > 1:
693: pnums.pop()
694: parentId=string.join(pnums,".")
695:
696: for project in self.getProjectFields('xdata_05',sort='int',date=date):
697: if project[1]==parentId:
698: ret.append(project)
699:
700: if (depth is not None) and (len(ret) >= depth):
701: break
702:
703: if childs:
704: for project in self.getProjectFields('xdata_05',sort='int',date=date):
705: searchStr=childs+"(\..*)"
706:
707: if (onlyActive and project[0].isActiveProject()) or (not onlyActive):
708: if re.match(searchStr,project[1]):
709:
710: if depth:
711:
712: if int(depth)>=len(project[1].split("."))-len(childs.split(".")):
713:
714: ret.append(project)
715: else:
716: ret.append(project)
717:
718: #logging.debug("getContexts: childs=%s parents=%s depth=%s => %s"%(childs,parents,depth,repr(ret)))
719:
720: return ret
721:
722:
723: def getAllProjectsAndTagsAsCSV(self,archived=1,RESPONSE=None):
724: """alle projekte auch die nicht getaggten"""
725: retList=[]
726: headers=['projectId','sortingNumber','projectName','scholars','startedAt','completedAt','lastChangeThesaurusAt','lastChangeProjectAt','projectCreatedAt','persons','places','objects']
727: headers.extend(list(self.thesaurus.tags.keys()))
728: retList.append("\t".join(headers))
729: if not hasattr(self,'thesaurus'):
730: return "NON thesaurus (there have to be a MPIWGthesaurus object, with object ID thesaurus)"
731:
732: projectTags = self.thesaurus.getProjectsAndTags()
733: for project in self.getProjectFields('WEB_title_or_short'):
734: proj = project[0]
735: p_name = project[1]
736: retProj=[]
737: #if (not proj.isArchivedProject() and archived==1) or (proj.isArchivedProject() and archived==2):
738: retProj.append(self.utf8ify(proj.getId()))
739: retProj.append(self.utf8ify(proj.getContent('xdata_05')))
740: retProj.append(self.utf8ify(p_name))
741: retProj.append(self.utf8ify(proj.getContent('xdata_01')))
742: retProj.append(self.utf8ify(proj.getStartedAt()))
743: retProj.append(self.utf8ify(proj.getCompletedAt()))
744: changeDate=self.thesaurus.lastChangeInThesaurus.get(proj.getId(),'')
745: n = re.sub("[:\- ]","",str(changeDate))
746: retProj.append(n)
747: retProj.append(self.utf8ify(getattr(proj,'creationTime','20050101000000')))
748: retProj.append("")#TODO: project created at
749: retProj.append(";".join([person[1] for person in self.thesaurus.getPersonsFromProject(proj.getId())]))
750: retProj.append(";".join([person[1] for person in self.thesaurus.getHistoricalPlacesFromProject(proj.getId())]))
751: retProj.append(";".join([person[1] for person in self.thesaurus.getObjectsFromProject(proj.getId())]))
752: retProj+=self.thesaurus.getTags(proj.getId(),projectTags)
753: retList.append("\t".join(retProj))
754:
755: if RESPONSE:
756:
757: RESPONSE.setHeader('Content-Disposition','attachment; filename="ProjectsAndTags.tsv"')
758: RESPONSE.setHeader('Content-Type', "application/octet-stream")
759:
760: return "\n".join(retList);
761:
762:
763:
764:
765: def getProjectFields(self,fieldName,date=None,folder=None,sort=None):
766: """getListofFieldNames"""
767: ret=[]
768:
769: objects=self.ZopeFind(self.projects,obj_metatypes=['MPIWGProject'],search_sub=0)
770:
771:
772: for object in objects:
773: obj=object[1]
774: obj=obj.getActualVersion(date)
775: if obj and (not getattr(obj,'invisible',None)):
776: if fieldName=="WEB_title_or_short":
777:
778: if len(obj.getContent('xdata_07'))<3: # hack weil z.Z. manchmal noch ein Trennzeichen ; oder , im Feld statt leer
779: fieldNameTmp="WEB_title"
780: else:
781: fieldNameTmp="xdata_07"
782: else:
783: fieldNameTmp=fieldName
784:
785: ret.append((obj,obj.getContent(fieldNameTmp)))
786:
787:
788: if sort=="int":
789: ret.sort(sortI)
790: elif sort=="stopWords":
791:
792: ret.sort(sortStopWords(self))
793:
794: else:
795: ret.sort(sortF)
796:
797: return ret
798:
799: def showNewProjects(self):
800: projects=[]
801: for objs in self.getProjectFields('WEB_title_or_short'): # Get all Projets
802: if objs[0].xdata_05 and (objs[0].xdata_05[0] == ""):
803:
804: projects.append(objs)
805:
806: return projects
807:
808:
809: def updatePublicationDB(self,personId=None):
810: """updates the publication db, i.e. copy year and type into the main table"""
811:
812: if personId:
813: founds = self.ZSQLInlineSearch(_table="publications",key_main=personId)
814: else:
815: founds = self.ZSQLInlineSearch(_table="publications")
816:
817: for found in founds:
818:
819: if found.id_institutsbibliographie and (not found.id_institutsbibliographie =="") and (not found.id_institutsbibliographie =="0"):
820:
821: entries = self.ZSQLInlineSearch(_table="institutsbiblio",id=found.id_institutsbibliographie)
822: for entry in entries:
823: self.ZSQLChange(_table='publications',_identify='oid=%s' % found.oid,year=entry.year,referencetype=entry.reference_type)
824:
825: if found.id_gen_bib and (not found.id_gen_bib ==""):
826: entries = self.ZSQLInlineSearch(_table="bibliography",id=found.id_gen_bib)
827: for entry in entries:
828: self.ZSQLChange(_table='publications',_identify='oid=%s' % found.oid,year=entry.year,referencetype=entry.reference_type)
829:
830: return True
831:
832: def showNewDBEntries(self):
833: """zeige neue Eintraege in der Datenbank ohne e-mail adressen bzw. fuer die noch kein Object angelegt wurde"""
834:
835: qstr="select * from personal_www where web_object_created='no' and not key=''"
836: res=self.ZSQLQuery(qstr)
837:
838: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','showNewDBEntries.zpt')).__of__(self)
839: return pt(newEntries=res)
840:
841: def createNewStaffObjects(self,RESPONSE):
842: """create new staff object"""
843:
844: memberFolder=getattr(self,'members')
845: args=self.REQUEST.form
846: arg_k=args.keys()
847: arg_k.remove("submit")
848: ret=""
849: for key in arg_k:
850: k=self.urlUnQuote(key)
851:
852: qstr="select * from personal_www where key=%s"%self.ZSQLQuote(k)
853: res=self.ZSQLQuery(qstr)[0]
854: if args[key]!="": #email-adresse wurde eingetragen
855: #create the object
856: e_mail=args[key]
857: try:
858: newObj=MPIWGStaff.MPIWGStaff(e_mail,res.last_name,res.first_name,k)
859: memberFolder._setObject(e_mail,newObj)
860: obj=getattr(memberFolder,e_mail)
861: obj.reindex_object()
862: ret+="Created %s \n"%e_mail
863: created=True
864: except:
865: msg="Cannot create new user %s (%s %s)"%(e_mail,sys.exc_info()[0],sys.exc_info()[1])
866: logging.error(msg)
867: ret+=msg+"\n"
868: created=False
869:
870: if created:
871: qstr="update personal_www set web_object_created='yes',e_mail='%s@mpiwg-berlin.mpg.de' where key=%s"%(e_mail,self.ZSQLQuote(k))
872: self.ZSQLQuery(qstr)
873:
874: return ret
875:
876:
877: def generateNewPersonEntry(self,data):
878: """generate a new person entry for data, neue personen werden zunaechst nur in der datenbank angelegt """
879:
880: #memberFolder=getattr(self,'members')
881: #create the object
882:
883: # try:
884: # newObj=MPIWGStaff.MPIWGStaff(urllib.quote(data['key']),data['last_name'].encode('utf-8'),data['first_name'].encode('utf-8'))
885: # memberFolder._setObject(urllib.quote(data['key']),newObj)
886: # except:
887: # return False, "Cannot create new user %s (%s %s)"%(data['key'],sys.exc_info()[0],sys.exc_info()[1])
888: #
889:
890: #create the new entry in the database
891:
892:
893: result,msg=MPIWGStaff.createNewDBEntry(self,data['publish_the_data'],data['key'],data['last_name'],
894: data['first_name'],data['titles_new'],data['status'],"",
895: "",data['date_from'],data['date_to'],
896: data['department'],'',data['funded_by'],
897: data['e_mail2'],data['current_work'],"yes",data['date_stay_at_mpiwg'],data['group'],"no",data['current_work'])
898:
899: return result,msg
900:
901: def updatePersonEntry(self,data,ignoreEntries=None):
902: """update an person entry from data. but ignore all fields in ignore Entries"""
903: if ignoreEntries is None:
904: ignoreEntries = []
905:
906: #ignoreEntries.append('current_work') # TODO:updatecurrent work
907: logging.debug("updatePersonEntry: data=%s ignoreEntries=%s"%(repr(data),repr(ignoreEntries)))
908:
909: if data['date_to']=="": # wenn date_to leer
910: data['date_to']="date_none"
911:
912: if data['date_from']=="": # wenn date_fromleer
913: data['date_from']="date_none"
914: msg=""
915:
916:
917: #eintragen
918:
919: columns=data.keys()
920: for x in ignoreEntries:
921: logging.debug("updatePersonEntry: ignoring %s"%x)
922: try: #falls in ignore entries felder sind, die nicht in columns sind, fange den fehler ab
923: columns.remove(x)
924: except:
925: pass
926:
927:
928: insert=[]
929: for key in columns:
930: if data[key]=="date_none": # date_none eintrag wird zu null uebersetzt
931: insert.append('%s=null'%key)
932: else:
933: insert.append(""" "%s"=%s"""%(key,self.ZSQLQuote(data[key])))
934:
935: insertStr=",".join(insert)
936: queryStr="update personal_www SET %s where key='%s'"%(insertStr,data['key'])
937: self.ZSQLQuery("SET DATESTYLE TO 'German'")
938: self.ZSQLQuery(queryStr)
939:
940: #currentwork
941: #if not (txt==""):
942: # queryStr="INSERT INTO current_work (id_main,current,publish) VALUES ('%s','%s','%s')"%(id,txt,txt_p)
943: #
944: # self.ZSQLQuery(queryStr)
945:
946: return True,msg
947:
948:
949: def updatePersonalwww_doIt(self):
950: """do the update"""
951: args=self.REQUEST.form
952: resultSet=self.REQUEST.SESSION['personal_www']['resultSet']
953: news=self.REQUEST.SESSION['personal_www']['news']
954: conflicts=self.REQUEST.SESSION['personal_www']['conflicts']
955: logging.debug("updatePersonalwww_doIt: args=%s\n resultSet=%s\n news=%s\n conflicts=%s"%(args,resultSet,news,conflicts))
956:
957: ret="<html><body>"
958: # generate the new entry
959:
960: if news and (len(news)>0):
961: ret+="<h2>Hinzugefügt</h2>"
962: ret+="<p>Neueinträge erscheinen erst auf der Homepage, wenn ihnen eine e-mail Adresse zugeordnet wurde.</p>"
963: ret+="<ul>"
964:
965: for new in news:
966: if args.has_key(self.urlQuote(new.encode('utf-8'))): # entry was selected
967: result,msg=self.generateNewPersonEntry(resultSet[new])
968: if not result:
969: logging.error("Error (generateNewPersonEntry) %s"%msg)
970: ret+="<li>ERROR: %s %s"%(new.encode('utf-8'),msg)
971: else:
972: ret+="<li>OK: %s"%(new.encode('utf-8'))
973:
974: if news and (len(news)>0):
975: ret+="<p>Neueinträge erscheinen erst auf der Homepage, wenn ihnen eine e-mail Adresse zugeordnet wurde.</p>"
976: ret+="</ul>"
977:
978: # update
979:
980: if len(conflicts.keys())>0:
981: ret+="<h2>Änderung des Benutzers übernehmen</h2>"
982: ret+="<p>Wenn nötig in Filemaker-db ändern:</p>"
983:
984: # konflicte
985: for conflict in conflicts.keys():
986: ignoreEntries=[]
987: displayIgnored=[]
988: for cf in conflicts[conflict]:
989: if args[conflict.encode('utf-8')+'_'+cf[0]]=="stored": #use the stored one
990: ignoreEntries.append(cf[0]) #so ignore field cf[0]
991: displayIgnored.append(cf)
992:
993: if len(displayIgnored)>0:
994: ret+="<h3>%s</h3>"%conflict.encode('utf-8')
995: ret+="<table border='1'>"
996: for iE in displayIgnored:
997: ret+="<tr><td>%s</td><td>%s</td><td>%s</td>"%(iE[0].encode('utf-8'),iE[1].encode('utf-8'),iE[2].encode('utf-8'))
998: ret+="</table>"
999:
1000: self.updatePersonEntry(resultSet[conflict],ignoreEntries=ignoreEntries)
1001:
1002: # rest
1003: cl=list(conflicts.keys())
1004:
1005: for key in resultSet.keys():
1006: if key not in cl:
1007: self.updatePersonEntry(resultSet[key])
1008: return ret+"</body></html>"
1009:
1010:
1011: def updateInstitutsbiliography(self):
1012: """update the Institutsbibliogrpahy"""
1013: self.upDateSQL('personalwww.xml')
1014: return "<html><body>DONE</body></html>"
1015:
1016:
1017:
1018:
1019: def updatePersonalwww_html(self):
1020: """update form for the homepages web form"""
1021: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','updatePersonalwww.zpt')).__of__(self)
1022: return pt()
1023:
1024:
1025: def updatePersonalwww(self,uploadfile):
1026: """update personalwww
1027: @param uploadfile: file handle auf das file
1028: """
1029: dsn=self.getConnectionObj().connection_string
1030: #dsn="dbname=personalwww"
1031: resultSet=updatePersonalWWW.importFMPXML(uploadfile)
1032: news,conflicts=updatePersonalWWW.checkImport(dsn, resultSet)
1033:
1034: self.REQUEST.SESSION['personal_www']={}
1035: self.REQUEST.SESSION['personal_www']['resultSet']=resultSet
1036: self.REQUEST.SESSION['personal_www']['news']=news
1037: self.REQUEST.SESSION['personal_www']['conflicts']=conflicts
1038:
1039: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','updatePersonalwww_check.zpt')).__of__(self)
1040: return pt()
1041:
1042:
1043:
1044: def reindexCatalogs(self,RESPONSE=None):
1045: """reindex members and project catalog"""
1046:
1047:
1048: try:
1049:
1050: self.ProjectCatalog.manage_catalogReindex(self.REQUEST,RESPONSE,self.REQUEST['URL1'])
1051: logger("MPIWG Root (reindexCatalog: projects)",logging.INFO,"DONE")
1052: except:
1053: logger("MPIWG Root (reindexCatalog: projects)",logging.WARNING," %s %s"%sys.exc_info()[:2])
1054:
1055: try:
1056:
1057: self.MembersCatalog.manage_catalogReindex(self.REQUEST,RESPONSE,self.REQUEST['URL1'])
1058: logger("MPIWG Root (reindexCatalog: members)",logging.INFO,"DONE")
1059: except:
1060: logger("MPIWG Root (reindexCatalog: members)",logging.WARNING," %s %s"%sys.exc_info()[:2])
1061:
1062: try:
1063:
1064: self.fulltextProjectsMembers.manage_catalogReindex(self.REQUEST,RESPONSE,self.REQUEST['URL1'])
1065: logger("MPIWG Root (reindexCatalog: fulltextProjectsMembers)",logging.INFO,"DONE")
1066: except:
1067: logger("MPIWG Root (reindexCatalog: fulltextProjectsMembers)",logging.WARNING," %s %s"%sys.exc_info()[:2])
1068:
1069:
1070:
1071:
1072:
1073:
1074: if RESPONSE:
1075: RESPONSE.redirect('manage_main')
1076:
1077:
1078:
1079:
1080: def getAllMembers(self):
1081: #ret=[]
1082:
1083: def sorter(x,y):
1084: return cmp(x[0],y[0])
1085:
1086: results=self.MembersCatalog({'isPublished':True})
1087:
1088: ret=[(unicodify(", ".join([proj.lastName, proj.firstName])), proj.getKey) for proj in results]
1089:
1090: ret.sort(sorter)
1091: return ret
1092:
1093:
1094: def printAllMembers(self):
1095: """print"""
1096: members=self.getAllMembers()
1097: ret=""
1098: for x in members:
1099: ret+="<p>%s</p>"%x
1100: return ret
1101:
1102:
1103: def makeList(self,entry):
1104: """makes a list out of one entry or repeat a list"""
1105: if type(entry) is StringType:
1106: return [entry]
1107: else:
1108: return entry
1109:
1110: def getTreeRSS(self,dep=None,date=None,onlyActive=1,onlyArchived=0):
1111: """generateTree"""
1112: rss="""<?xml version="1.0" encoding="utf-8"?>
1113: <rss version="2.0">
1114: <channel>"""
1115:
1116: for obj in self.getTree(dep, date, onlyActive, onlyArchived):
1117: linkStr="""<link>http://www.mpiwg-berlin.mpg.de/en/research/projects/%s</link>"""
1118: rss+="""<item>"""
1119: rss+=linkStr%obj[3].getId()
1120: rss+="""</item>"""
1121: if hasattr(obj[3],'publicationList'):
1122: rss+="""<item>"""
1123: rss+=linkStr%(obj[3].getId()+"/publicationList");
1124: rss+="""</item>"""
1125: rss+="""</channel>
1126: </rss>"""
1127:
1128:
1129: return rss
1130:
1131: def getTree(self,dep=None,date=None,onlyActive=0,onlyArchived=0):
1132: """generate Tree from project list
1133: als Liste, jeder Eintrag ist ein Tupel ,(Tiefe, ProjektNummer,ProjektObject
1134: onlyActive = 0 : alle Projekte
1135: onlyActive = 1 : nur active Projekte
1136: onlyActive = 2: nur inactive Projekte
1137:
1138: onlyArchived=0: alle Projekte
1139: onlyArchived= 1 : nur aktuelle Projekte
1140: onlyArchived = 2: nur archivierte Projekte
1141:
1142: department fuer das Tree geholt werden soll
1143: """
1144:
1145: returnListTmp=[]
1146: returnList=[]
1147:
1148: for project in self.getProjectFields('xdata_05',sort="int",date=date): # get Projects sorted by xdata_05
1149:
1150: for idNr in project[1].split(";"): # more than one number
1151: if not idNr=="":
1152: splittedId=idNr.split(".")
1153: depth=len(splittedId)
1154: nr=idNr
1155: #title=project[0].WEB_title
1156: title=[project[0].getContent('WEB_title')]
1157: #print title
1158:
1159: if idNr[0]=="x": # kompatibilitaet mit alter Konvention, x vor der Nummer macht project inactive
1160: project[0].setActiveFlag(False)
1161:
1162: if (not dep) or (splittedId[0]==dep): #falls dep gesetzt ist nur dieses hinzufuegen.
1163:
1164: if (onlyActive==0):
1165: returnListTmp.append((depth,nr,title,project[0]))
1166: elif (onlyActive==1) and project[0].isActiveProject(): #nur active projekte
1167: returnListTmp.append((depth,nr,title,project[0]))
1168: elif (onlyActive==2) and (not project[0].isActiveProject()): #nur active projekte
1169: returnListTmp.append((depth,nr,title,project[0]))
1170:
1171:
1172: #filter jetzt die Liste nach Archived oder nicht
1173: for entry in returnListTmp:
1174: if (onlyArchived==0):
1175: returnList.append(entry)
1176: elif (onlyArchived==1) and (not entry[3].isArchivedProject()): #nur active projekte
1177: returnList.append(entry)
1178: elif (onlyArchived==2) and (entry[3].isArchivedProject()): #nur active projekte
1179: returnList.append(entry)
1180:
1181:
1182: return returnList
1183:
1184:
1185:
1186: def changePosition(self,treeId,select,RESPONSE=None):
1187: """Change Postion Entry"""
1188: numbers=[]
1189:
1190: # Suche hoechste bisherige nummer
1191: projects=self.getProjectFields('xdata_05') # get Projects sorted by xdata_05
1192: #print "pj",projects
1193: for project in projects: #suche alle subtrees der treeId
1194: #print treeId
1195:
1196: founds=re.match(treeId+"\.(.*)",project[1].split(";")[0])
1197: if founds:
1198: #print "x",founds.group(0),len(founds.group(0).split("."))
1199: if len(founds.group(0).split("."))==len(treeId.split("."))+1: # nur ein punkt mehr, d.h. untere ebene
1200: try:
1201: numbers.append(int(founds.group(0).split(".")[len(founds.group(0).split("."))-1]))
1202: except:
1203: numbers.append(int(0))
1204:
1205: try:
1206: highest=max(numbers)
1207: except:
1208: highest=0
1209: projects=self.showNewProjects()
1210: for i in self.makeList(select):
1211: highest+=10
1212: projects[int(i)][0].xdata_05=treeId+"."+str(highest)
1213:
1214:
1215: if RESPONSE is not None:
1216: RESPONSE.redirect('showTree')
1217:
1218: def changeTree(self,RESPONSE=None):
1219: """change the complete tree"""
1220: form=self.REQUEST.form
1221: hashList={}
1222: onlyArchived=int(form.get("onlyArchived",0))
1223: onlyActive=int(form.get("onlyActive",0))
1224: dep=form.get("dep",None)
1225:
1226: fields=self.getTree(dep=dep,onlyArchived=onlyArchived,onlyActive=onlyActive)
1227:
1228: logging.info("GOT TREE!----------------------------------------------------")
1229: for field in form.keys():
1230:
1231: splitted=field.split('_')
1232: if (len(splitted)>1) and (splitted[1]=="runningNumber"): #feld hat die Form Nummer_name und runnignNumber
1233:
1234:
1235: nr=int(splitted[0]) # nummer des Datensatzes
1236: currentEntry = fields[nr]
1237:
1238: if form.has_key(str(nr)+'_active'): # active flag is set
1239: fields[nr][3].setActiveFlag(True)
1240: else:
1241: fields[nr][3].setActiveFlag(False)
1242:
1243: #nummer hat sich geaendert
1244:
1245: entryChanged = False;
1246:
1247: if isinstance(fields[nr][3].xdata_05,list): #for some reasons somtimes the content of the field is a list with one entry.
1248: fields[nr][3].xdata_05=fields[nr][3].xdata_05[0]
1249:
1250: if not (fields[nr][3].xdata_05==form[str(nr)+'_number']):
1251: logging.info("Changed!Number+++++++++++++++++++++++++++++++++")
1252: logging.info(repr(fields[nr][3].xdata_05)+" ---> "+ repr(form[str(nr)+'_number']))
1253: fields[nr][3].xdata_05=form[str(nr)+'_number']
1254: entryChanged = True
1255:
1256: #completed har sich geaendert
1257:
1258: td = fields[nr][3].transformDate # hole die funktion zum transformieren des datums
1259:
1260: if not (td(fields[nr][3].getCompletedAt())==td(form[str(nr)+'_completed'])):
1261: fields[nr][3].setCompletedAt(form[str(nr)+'_completed'])
1262: logging.info(repr(td(fields[nr][3].getCompletedAt()))+" ---> "+ repr(td(form[str(nr)+'_completed'])))
1263: logging.info("Changed!Completed+++++++++++++++++++++++++++++++++")
1264: entryChanged = True
1265:
1266: if not (td(fields[nr][3].getStartedAt())==td(form[str(nr)+'_started'])):
1267: fields[nr][3].setStartedAt(form[str(nr)+'_started'])
1268:
1269: logging.info(repr(td(fields[nr][3].getStartedAt()))+" ---> "+ repr(td(form[str(nr)+'_started'])))
1270: logging.info("Changed!Started+++++++++++++++++++++++++++++++++")
1271: entryChanged = True
1272:
1273:
1274: if entryChanged:
1275: logging.info("Changed!+++++++++++++++++++++++++++++++++")
1276: fields[nr][3].copyObjectToArchive()
1277:
1278:
1279: if RESPONSE is not None:
1280: RESPONSE.redirect('showTree')
1281:
1282: def getProjectWithId(self,id):
1283: fields=self.getProjectFields('xdata_05')
1284: for field in fields:
1285: if field[1]==id:
1286: return field[0]
1287:
1288: return None
1289:
1290:
1291:
1292:
1293: def getRelativeUrlFromPerson(self,list):
1294: """get urls to person list"""
1295: ret=[]
1296: persons=list.split(";")
1297: for person in persons:
1298:
1299: if len(person)>1: #nicht nur Trennzeichen
1300: splitted=person.split(",")
1301: if len(splitted)==1:
1302: splitted=person.split(" ")
1303: splittedNew=[re.sub(r'\s(.*)','$1',split) for split in splitted]
1304: if splittedNew[0]=='':
1305: del splittedNew[0]
1306: search=string.join(splittedNew,' AND ')
1307:
1308: if not search=='':
1309:
1310: try:
1311: proj=self.MembersCatalog({'title':search})
1312: except:
1313: proj=None
1314:
1315: if proj:
1316: #ret.append("<a href=%s >%s</a>"%(proj[0].absolute_url,person.encode('utf-8')))
1317: ret.append("<a href=%s >%s</a>"%('members/'+proj[0].id+'/index.html',person))
1318: else:
1319: #ret.append("%s"%person.encode('utf-8'))
1320: ret.append("%s"%person)
1321: return string.join(ret,";")
1322:
1323: def getMemberIdFromKey(self,key):
1324: """gibt die ensprechende id im members Ordner zum key"""
1325:
1326: if key=="":
1327: return ""
1328: try:
1329: key=utf8ify(key)
1330: catalogged=self.MembersCatalog({'getKey':key})
1331: if len(catalogged)==0:
1332: return ""
1333: else:
1334: return catalogged[0].getObject().getId()
1335:
1336: except:
1337: return ""
1338:
1339:
1340:
1341: def getProjectsOfMembers(self,date=None):
1342: """give tuple member /projects"""
1343: ret=[]
1344: members=self.getAllMembers()
1345: logging.debug("X %s"%repr(members))
1346: #return str(members)
1347: for x in members:
1348: #logging.debug("X %s"%repr(x))
1349: projects=self.getProjectsOfMember(key=x[1],date=date)
1350: if len(projects)>0:
1351: ret.append((x[0],projects))
1352:
1353: return ret
1354:
1355: def getProjectsOfMember(self,key=None,date=None,onlyArchived=1,onlyActive=1):
1356: """get projects of a member
1357:
1358: @param key: (optional) Key zur Idenfikation des Benutzer
1359: @param date: (optional) Version die zum Zeitpunkt date gueltig war
1360: @param onlyArchived:
1361: onlyArchived=0: alle Projekte
1362: onlyArchived= 1 : nur aktuelle Projekte
1363: onlyArchived = 2: nur archivierte Projekte
1364: """
1365: # TODO: Die ganze Loesung
1366: def sortP(x,y):
1367: """sort by sorting number"""
1368: return cmp(x.WEB_title,y.WEB_title)
1369:
1370: ret=[]
1371: if key:
1372: logging.debug("MPIWGROOT (getProjectsOfMember):"+key)
1373: proj=self.ProjectCatalog({'getPersonKeyList':utf8ify(key)})
1374: else:
1375: return ret # key muss definiert sein
1376:
1377: #logging.debug("MPIWGROOT (getProjectsOfMember):"+repr(proj))
1378: if proj:
1379: proj2=[]
1380: for x in proj:
1381: #logging.error("proj:%s"%repr(x.getPath()))
1382: if (not getattr(x.getObject(),'invisible',None)) and (getattr(x.getObject(),'archiveTime','')==''):
1383: proj2.append(x)
1384:
1385: else:
1386: proj2=[]
1387:
1388:
1389:
1390: proj2.sort(sortP)
1391:
1392: projectListe=[]
1393: #logging.error("getprojectsofmember proj2: %s"%repr(proj2))
1394: for proj in proj2:
1395: obj=proj.getObject()
1396: add=False
1397: if onlyArchived==1: #nur aktuell projecte
1398: if not obj.isArchivedProject():
1399: add=True
1400: elif onlyArchived==2: #nur archivierte
1401: if obj.isArchivedProject():
1402: add=True
1403: else: #alle
1404: add=True
1405:
1406: if onlyActive==1: #nur active projecte
1407: if obj.isActiveProject():
1408: add=add & True
1409: else:
1410: add=add & False
1411:
1412: elif onlyArchived==2: #nur nicht aktvive
1413: if not obj.isActiveProject():
1414: add=add & True
1415: else: #alle
1416: add=add & True
1417:
1418: if add:
1419: projectListe.append(obj)
1420:
1421: #logging.error("getprojectsofmember projectliste: %s"%repr(projectListe))
1422: return projectListe
1423:
1424: def givePersonList(self,name):
1425: """check if person is in personfolder and return list of person objects"""
1426:
1427: splitted=name.split(",")
1428: if len(splitted)==1:
1429: splitted=name.lstrip().rstrip().split(" ")
1430: splittedNew=[split.lstrip() for split in splitted]
1431:
1432: if splittedNew[0]=='':
1433: del splittedNew[0]
1434: search=string.join(splittedNew,' AND ')
1435:
1436: if not search=='':
1437: proj=self.MembersCatalog({'title':search})
1438:
1439: if proj:
1440: return [[x.lastName,x.firstName] for x in proj]
1441: else:
1442: return []
1443:
1444: ## splitted=name.split(",") # version nachname, vorname...
1445: ## if len(splitted)>1:
1446: ## lastName=splitted[0]
1447: ## firstName=splitted[1]
1448: ## else:
1449: ## splitted=name.split(" ") #version vorname irgenwas nachnamae
1450:
1451: ## lastName=splitted[len(splitted)-1]
1452: ## firstName=string.join(splitted[0:len(splitted)-1])
1453:
1454: ## objs=[]
1455:
1456: #print self.members
1457: ## for x in self.members.__dict__:
1458: ## obj=getattr(self.members,x)
1459: ## if hasattr(obj,'lastName') and hasattr(obj,'firstName'):
1460:
1461: ## if (re.match(".*"+obj.lastName+".*",lastName) or re.match(".*"+lastName+".*",obj.lastName)) and (re.match(".*"+obj.firstName+".*",firstName) or re.match(".*"+firstName+".*",obj.firstName)):
1462:
1463: ## objs.append((obj,lastName+", "+firstName))
1464:
1465:
1466: ## return objs
1467:
1468:
1469: def personCheck(self,names):
1470: """all persons for list"""
1471: #print "names",names
1472: splitted=names.split(";")
1473: ret={}
1474: for name in splitted:
1475:
1476: if not (name==""):
1477: try:
1478: ret[name]=self.givePersonList(name)
1479: except:
1480: """NOTHIHN"""
1481: #print "RET",ret
1482: return ret
1483:
1484: def giveCheckList(self,person,fieldname):
1485: """return checklist"""
1486: #print "GCL",fieldname
1487: if fieldname=='xdata_01':
1488: x=self.personCheck(person.getContent(fieldname))
1489: #print "GCLBACKX",x
1490: return x
1491:
1492:
1493: def isCheckField(self,fieldname):
1494: """return chechfield"""
1495:
1496: return (fieldname in checkFields)
1497:
1498:
1499: def generateNameIndex(self):
1500: """erzeuge einen index verwendeter personen"""
1501: import psycopg
1502: o = psycopg.connect('dbname=authorities user=dwinter password=3333',serialize=0)
1503: results={}
1504: print self.fulltext.historicalNames.items()
1505: for nameItem in self.fulltext.historicalNames.items(): #gehe durch alle namen des lexikons
1506:
1507: c = o.cursor()
1508: name=nameItem[0]
1509: print "check",name
1510: c.execute("select lastname,firstname from persons where lower(lastname) = '%s'"%quote(name))
1511: tmpres=c.fetchall()
1512: firstnames=[result[1] for result in tmpres] # find all firstnames
1513: if tmpres:
1514: lastname=tmpres[0][0]
1515:
1516: for found in self.fulltext({'names':name}):
1517: if found.getObject().isActual():
1518: for nh in found.getObject().getGetNeighbourhood(name, length=50,tagging=False): #hole umgebung
1519: #schaue nun ob der vorname hinter oder vor dem name ist
1520: position=nh.find(lastname)
1521: # vorher
1522: #print "NH",nh
1523: bevorS=nh[0:position].split()
1524: #print "BV",bevorS
1525: if len(bevorS)>1:
1526: try:
1527: bevor=[bevorS[-1],bevorS[-2]]
1528: except:
1529: bevor=[bevorS[0]]
1530: else:
1531: bevor=[]
1532: #nachher
1533: behindS= re.split("[,|;| ]",nh[position:])
1534: #print "BH",behindS
1535: if len(behindS)>2:
1536: try:
1537: behind=behindS[1:3]
1538: except:
1539: behind=[bevorS[1]]
1540: else:
1541: behind=[]
1542: for firstname in firstnames:
1543: if firstname in bevor+behind: #Namen wie mit Adelspraedikaten werden so erstmal nich gefunden
1544: id="%s,%s"%(lastname,firstname)
1545: if not results.has_key(id):
1546: results[id]=[]
1547: objId=found.getObject().getId()
1548: if not (objId in results[id]):
1549: print "d %s for %s"%(id,objId)
1550: results[id].append(objId)
1551: self.nameIndex=results
1552: return results
1553:
1554: def editNameIndexHTML(self):
1555: """edit the name index"""
1556: if not hasattr(self,'nameIndexEdited'): # falls editierter index noch nicht existiert, kopiere automatisch erstellten
1557: self.nameIndexEdited=copy.copy(self.nameIndex)
1558: print "huh"
1559: #self.nameIndexEdited=copy.copy(self.nameIndex)
1560: #print self.nameIndexEdited
1561: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','editHistoricalNames.zpt')).__of__(self)
1562: return pt()
1563:
1564: def getNamesInProject(self,projectId):
1565: """get all names ofnameIndexEdited which are references in projec with projectId"""
1566:
1567: ret=[]
1568: for name in self.nameIndexEdited.keys():
1569: if projectId in self.nameIndexEdited[name]:
1570: ret.append(name)
1571:
1572: return ret
1573:
1574: def editNameIndex(self,RESPONSE=None,name=None,occurrances=None,submit=None):
1575: """edit the index"""
1576: nI=self.nameIndexEdited # mI introduced to make sure that changes to nameIndexEdited are know to ZODB
1577: if submit=="delete":
1578:
1579:
1580: dh=getattr(self,'deletedHistoricalNames',{})
1581:
1582: if type(dh) is ListType:
1583: dh={}
1584: if not dh.has_key(name):
1585: dh[name]=occurrances.split("\n")
1586: else:
1587: dh[name]+=occurrances.split("\n")
1588:
1589: self.deletedHistoricalNames=dh
1590:
1591: del self.nameIndexEdited[name]
1592:
1593:
1594: elif (submit=="change"):
1595:
1596: nI[name]=occurrances.split("\n")[0:]
1597:
1598: elif (submit=="add"):
1599: if not nI.has_key(name):
1600: nI[name]=occurrances.split("\n")
1601: else:
1602: nI[name]+=occurrances.split("\n")
1603:
1604: self.nameIndexEdited=nI
1605:
1606:
1607: if RESPONSE is not None:
1608: RESPONSE.redirect('editNameIndexHTML')
1609:
1610:
1611:
1612: def restoreIndex(self):
1613: """restore"""
1614: self.nameIndexEdited=self.nameIndex
1615: return "done"
1616:
1617:
1618: def sortResults(self,results):
1619: """search the catalog and give results back sorted by meta_type"""
1620: ret = {}
1621: logging.debug(results())
1622: for result in results():
1623: metaType = result.meta_type
1624: resultList= ret.get(metaType,[])
1625: resultList.append(result)
1626: ret[metaType]=resultList
1627:
1628: logging.debug(ret)
1629: return ret
1630:
1631:
1632: def manage_addMPIWGRootForm(self):
1633: """form for adding the root"""
1634: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','addMPIWGRootForm.zpt')).__of__(self)
1635: return pt()
1636:
1637: def manage_addMPIWGRoot(self,id,title,connection_id="",RESPONSE=None):
1638: """add a root folder"""
1639: newObj=MPIWGRoot(id,title)
1640: self._setObject(id,newObj)
1641: ob=getattr(self,id)
1642: setattr(ob,'connection_id',connection_id)
1643: if RESPONSE is not None:
1644: RESPONSE.redirect('manage_main')
1645:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>