Annotation of MPIWGWeb/MPIWGRoot.py, revision 1.1.2.39
1.1.2.1 dwinter 1: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
2: from Products.PageTemplates.PageTemplate import PageTemplate
3: from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate
4: from Products.ZSQLExtend.ZSQLExtend import ZSQLExtendFolder
5: from Products.ZCatalog.CatalogPathAwareness import CatalogAware
6: from OFS.Image import Image
7: from Globals import package_home
8: import urllib
9: import MPIWGStaff
10: import string
11: import re
12: import os
13: from types import *
14: import logging
15: import xmlhelper # Methoden zur Verwaltung der projekt xml
16: from OFS.SimpleItem import SimpleItem
17: from OFS.Folder import Folder
18: from Products.ZSQLMethods.SQL import SQLConnectionIDs
19: from AccessControl import ClassSecurityInfo
20: from bibliography import *
21: import time
22: import xml.dom.minidom
23: import sys
1.1.2.36 dwinter 24: import transaction
25:
1.1.2.34 dwinter 26: #from Ft.Xml.XPath import Evaluate
27: #from Ft.Xml.XPath.Context import Context
28: #from Ft.Xml.Domlette import NonvalidatingReader,PrettyPrint, Print
29: #from Ft.Xml import EMPTY_NAMESPACE
1.1.2.1 dwinter 30: import copy
31: import updatePersonalWWW
32: import MPIWGStaff
33: from MPIWGHelper import *
1.1.2.33 dwinter 34: from BeautifulSoup import BeautifulSoup, Comment
1.1.2.35 dwinter 35: from ZODB import FileStorage, DB
36: from ZEO import ClientStorage
1.1.2.6 casties 37:
38: def sortWeight(x,y):
39: x1=int(getattr(x[1],'weight','0'))
40: y1=int(getattr(y[1],'weight','0'))
41: return cmp(x1,y1)
42:
43:
1.1.2.1 dwinter 44: class MPIWGRoot(ZSQLExtendFolder):
45: """Stammordner fuer den Web-Server"""
46:
1.1.2.35 dwinter 47: _v_harvestCache=None
1.1.2.15 casties 48: meta_type='MPIWGRoot'
49:
1.1.2.6 casties 50: fieldLabels={'WEB_title':'WEB_Title',
51: 'xdata_01':'Responsible Scientists',
52: 'xdata_02':'Department',
53: 'xdata_03':'Historical Persons',
54: 'xdata_04':'Time period',
55: 'xdata_05':'Sorting number',
56: 'xdata_06':'Keywords',
57: 'xdata_07':'Short title',
58: 'xdata_08':'Other involved scholars' ,
59: 'xdata_09':'Disciplines',
60: 'xdata_10':'Themes',
61: 'xdata_11':'Object Digitallibrary',
62: 'xdata_12':'Cooperation partners',
63: 'xdata_13':'Funding institutions',
64: 'WEB_project_header':'WEB_project_header',
65: 'WEB_project_description':'WEB_project_description',
66: 'WEB_related_pub':'WEB_related_pub'}
1.1.2.1 dwinter 67:
1.1.2.18 casties 68: # (is this used?)
1.1.2.1 dwinter 69: folders=['MPIWGProject','Folder','ECHO_Navigation']
1.1.2.18 casties 70: # language of this instance
71: lang = 'en'
1.1.2.15 casties 72: # types of objects that show up in navigation
73: nav_meta_types = ['MPIWGTemplate','MPIWGLink','MPIWGFolder']
1.1.2.1 dwinter 74:
1.1.2.29 casties 75: manage_options = Folder.manage_options+(
76: {'label':'Update personal homepages','action':'updatePersonalwww_html'},
77: {'label':'Reindex catalogs','action':'reindexCatalogs'},
78: {'label':'Main config','action':'changeMPIWGRootForm'},
79: {'label':'add e-mails','action':'showNewDBEntries'},
80: #{'label':'update the institutsbibliography','action':'updateInstitutsbiliography'},
81: #{'label':'Edit Historical Persons','action':'editHistoricalPersonsForm'},
82: #{'label':'Store Historical Persons','action':'storeHistoricalPersons'},
83: )
84:
85:
1.1.2.35 dwinter 86: def getHarvestCachePort(self):
87: return getattr(self,"harvestPort",29999)
88:
89: def getHarvestCacheServer(self):
90: return getattr(self,"harvestServer","localhost")
91:
92:
93: def getHarvestCache(self):
94: logging.debug("CACHE:"+repr(self._v_harvestCache))
95: if self._v_harvestCache==None:
96: #storage = FileStorage.FileStorage('/tmp/'+self.getId()+'test-filestorage.fs')
97: addr = self.getHarvestCacheServer(), self.getHarvestCachePort()
98: storage = ClientStorage.ClientStorage(addr)
99: db = DB(storage)
100: self._v_harvestDV=db
1.1.2.36 dwinter 101: self._v_harvestDV=db
1.1.2.35 dwinter 102: conn = db.open()
103: dbroot = conn.root()
104: if not dbroot.has_key('templates'):
105: from BTrees.OOBTree import OOBTree
106: dbroot['templates'] = OOBTree()
107:
108: self._v_harvestCache = dbroot['templates']
109: logging.debug("CACHE2:"+repr(self._v_harvestCache))
110: return self._v_harvestCache
111:
1.1.2.36 dwinter 112:
113:
1.1.2.35 dwinter 114: def __del__(self):
115: if self._v_harvestCache!=None:
116: self._v_harvestDV.close();
117:
1.1.2.11 dwinter 118: def getGetNeighbourhood(self,obj, wordStr, length=100,tagging=True):
119: """finde umgebung um die worte in wordStr, zurueckgegeben wird eine Array mit den Umgebungen von Fundstellen der Worte
120: alle Tags werden entfernt, die Fundstellen werden mit <span class="found">XX</span> getaggt, die Umgebungen werden
121: case insensitive gesucht
122: @param wordStr: string mit Worten getrennt durch Leerzeichen, Phrasen sind mit " gekennzeichnet
123: "eine phrase", "*" bezeichnet wildcards und wird ignoriert"
124: @param length: optional, default wert 100, 2*length ist die groesse der Umgebung
125: @param tagging: optional default wert true, kein span tag wird erzweugt falls tag=false
126: """
127:
128: ret=[] # nimmt das Array auf, dass spaeter zurueckgegeben wird
129: ranges=[] #Array mit tupeln x,y wobei x die Position des Anfang und y des Endes der i-ten Umgebung angiebt
130:
1.1.2.22 dwinter 131: wordStr=wordStr.lstrip().rstrip()
132:
1.1.2.11 dwinter 133: def isInRanges(nr,length):
134: """test ob eine gegeben Position nr schon irgendwo in einer Umgebung ist, gibt den Index des ersten Wertes aus ranges zurueck,
135: -1, wenn kein Treffer
136:
137: @param nr: Position die geprueft werden soll
138: @param length: Laenge des Wortes das geprueft werden soll
139: """
140: for x in ranges:
141: if (x[0]<=nr) and (nr < (x[1]-length)):
142: return ranges.index(x)
143: return -1
144:
145: # deal with phrases, in Phrasen werden die Leerzeichen durch "_" ersetzt.
146: def rep_empty(str):
147: x= re.sub(" ","_",str.group(0))
148: return re.sub("\"","",x)
149:
150: wordStr=re.sub("\".*?\"", rep_empty,wordStr)#ersetze leerzeichen in " " durch "_" und loesche "
151:
152: #deal with wildcards, for our purposes it is enough to delete the wildcard
153: wordStr=wordStr.replace("*","")
154:
155: words=wordStr.split(" ")
156: #if not words is ListType:
157: # words=[words]
1.1.2.35 dwinter 158:
159:
160: txtCache = self.en.getHarvestCache();
161: txt= txtCache.get(obj.absolute_url(),None)
162:
163: if txt==None:
1.1.2.11 dwinter 164:
1.1.2.35 dwinter 165: logging.debug("NO CACHE for: "+obj.absolute_url())
166: txt=obj.harvest_page(mode="slim")
167:
168:
1.1.2.11 dwinter 169: if not txt:
170: return ret
1.1.2.33 dwinter 171:
172: soup = BeautifulSoup(txt)
173:
174: comments = soup.findAll(text=lambda text:isinstance(text, Comment))
175: [comment.extract() for comment in comments]
176:
177: txt = ''.join(soup.findAll(text=True))
178:
179:
180: #txt=re.sub("<.*?>", "", txt) # loesche alle Tags
1.1.2.11 dwinter 181: for word in words:
182: word=re.sub("_"," ",word) # ersetze zurueck "_" durch " "
183: pos=0
184:
185: n=txt.lower().count(word.lower()) # wie oft tritt das Wort auf
186:
187: for i in range(n):
188: pos=txt.lower().find(word.lower(),pos)
189:
190: if pos > 0:
191: x=max(0,pos-length)
192: y=min(len(txt),pos+length)
193:
194:
195: #is word already in one of the results
196: nr=isInRanges(pos,len(word))
197: if nr >=0:# word ist in einer schon gefunden Umgebung, dann vergroessere diese
198: x=min(ranges[nr][0],x)
199: y=max(ranges[nr][1],y)
200:
201: str=txt[x:y]
1.1.2.33 dwinter 202: if x!=0: #add dots if in the middle of text
203: str="..."+str
204:
205: if y!=len(txt): #add dots if in the middle of text
206: str=str+"..."
207:
208:
1.1.2.11 dwinter 209:
210: if nr >=0: # word ist in einer schon gefunden Umgebung
211: ranges[nr]=(x,y) # neue Position der Umgebung
212:
213: ret[nr]=str # neue Umgebung
214: else: # andernfalls neue Umgebung hinzufuegen
215: ranges.append((x,y))
216:
217: ret.append(str)
218:
219: pos=pos+len(word)
220: else:
221: break;
222:
223: # now highlight everything
224: if tagging:
225: for x in range(len(ret)):
226: for word in words:
227: repl=re.compile(word,re.IGNORECASE)
228: ret[x]=repl.sub(""" <span class="found">%s</span>"""%word.upper(),ret[x])
229:
230: return ret
1.1.2.9 dwinter 231: def copyAllImagesToMargin(self):
232: """tranformiere alle Bilder in die Margins"""
233: projects=self.getTree()
234: ret=""
235: for project in projects:
236: proj=project[3]
237: try:
238: persons=proj.copyImageToMargin();
239: except:
240: logging.error("Cannnot do: %s"%repr(project))
241:
1.1.2.1 dwinter 242: def transformProjectsToId(self):
243: """trnasformiere zu ID, Hilfsfunktion die die alten Templates analysiert und mit der neuen Liste
244: verantwortlicher Personen versieht"""
245: projects=self.getTree()
246: ret=""
247: for project in projects:
1.1.2.11 dwinter 248:
1.1.2.1 dwinter 249: proj=project[3]
250: persons=proj.identifyNames(proj.getContent('xdata_01'))
251: if not hasattr(proj,'responsibleScientistsList'):
252: proj.responsibleScientistsList=[]
253:
254: for person in persons.items():
1.1.2.2 dwinter 255:
1.1.2.1 dwinter 256: if len(person[1]) >1: #nicht eindeutig
257: ret+="nicht eindeutig --- %s: %s\n"%(proj.getId(),person[0])
258:
259: elif len(person[1]) ==0: #kein eintrage
260: ret+="kein eintrag--- %s: %s\n"%(proj.getId(),person[0])
261: proj.responsibleScientistsList.append((person[0],""))
262: else:
263: proj.responsibleScientistsList.append((person[0],person[1][0].getObject().getKey()))
264:
265: return ret
1.1.2.9 dwinter 266:
1.1.2.1 dwinter 267:
268: def harvestProjects(self):
269: """harvest"""
270: folder="/tmp"
271: try:
272: os.mkdir("/tmp/harvest_MPIWG")
273: except:
274: pass
275: founds=self.ZopeFind(self.aq_parent.projects,obj_metatypes=['MPIWGProject'],search_sub=1)
276: for found in founds:
277: txt=found[1].harvest_page()
278:
279: if txt and (txt != ""):
280: name=found[0].replace("/","_")
281: fh=file("/tmp/harvest_MPIWG/"+name,"w")
282: fh.write(txt)
283: fh.close()
284:
285: def decode(self,str):
286: """decoder"""
1.1.2.3 dwinter 287:
1.1.2.1 dwinter 288: if not str:
289: return ""
290: if type(str) is StringType:
291: try:
292: return str.decode('utf-8')
293: except:
294: return str.decode('latin-1')
295: else:
296: return str
297:
298:
299: def getat(self,array,idx=0,default=None):
300: """return array element idx or default (but no exception)"""
301: if len(array) <= idx:
302: return default
303: else:
304: return array[idx]
305:
1.1.2.18 casties 306: def getLang(self):
307: """returns the default language"""
308: return self.lang
1.1.2.1 dwinter 309:
310: def browserCheck(self):
311: """check the browsers request to find out the browser type"""
312: bt = {}
313: ua = self.REQUEST.get_header("HTTP_USER_AGENT")
314: bt['ua'] = ua
315: bt['isIE'] = False
316: bt['isN4'] = False
317: if string.find(ua, 'MSIE') > -1:
318: bt['isIE'] = True
319: else:
320: bt['isN4'] = (string.find(ua, 'Mozilla/4.') > -1)
321:
322: try:
323: nav = ua[string.find(ua, '('):]
324: ie = string.split(nav, "; ")[1]
325: if string.find(ie, "MSIE") > -1:
326: bt['versIE'] = string.split(ie, " ")[1]
327: except: pass
328:
329: bt['isMac'] = string.find(ua, 'Macintosh') > -1
330: bt['isWin'] = string.find(ua, 'Windows') > -1
331: bt['isIEWin'] = bt['isIE'] and bt['isWin']
332: bt['isIEMac'] = bt['isIE'] and bt['isMac']
333: bt['staticHTML'] = False
334:
335: return bt
336:
337:
338: def versionHeaderEN(self):
339: """version header text"""
340:
341: date= self.REQUEST.get('date',None)
342: if date:
343: txt="""<h2>This pages shows the project which existed at %s</h2>"""%str(date)
344: return txt
345: return ""
346:
347: def versionHeaderDE(self):
348: """version header text"""
349: date= self.REQUEST.get('date',None)
350: if date:
351: txt="""<h2>Auf dieser Seite finden Sie die Projekte mit Stand vom %s</h2>"""%str(date)
352: return ""
353:
354:
355: def createOrUpdateId_raw(self):
356: """create sequence to create ids for bibliography"""
357: debug=None
358: #suche groesste existierende id
359: founds=self.ZSQLQuery("select id from bibliography")
360:
361: if founds:
362: ids=[int(x.id[1:]) for x in founds]
363: maximum=max(ids)
364:
365: id_raw=self.ZSQLQuery("select nextval('id_raw')",debug=debug)
366:
367: if id_raw:
368: self.ZSQLQuery("drop sequence id_raw",debug=debug)
369:
370: self.ZSQLQuery("create sequence id_raw start %i"%(maximum+1),debug=debug)
371:
372:
373: def queryLink(self,link):
374: """append querystring to the link"""
375: return "%s?%s"%(link,self.REQUEST.get('QUERY_STRING',''))
376:
377: def getKategory(self,url):
378: """kategorie"""
379: splitted=url.split("/")
380: return splitted[4]
381:
382: def generateUrlProject(self,url,project=None):
383: """erzeuge aus absoluter url, relative des Projektes"""
384: if project:
385: splitted=url.split("/")
386: length=len(splitted)
387: short=splitted[length-2:length]
388:
389: base=self.REQUEST['URL3']+"/"+"/".join(short)
390:
391: else:
392: findPart=url.find("/projects/")
393: base=self.REQUEST['URL1']+"/"+url[findPart:]
394:
395:
396: return base
397:
398: def isNewCapital(self,text=None,reset=None):
399: if reset:
400: self.REQUEST['capital']="A"
401: return True
402: else:
403: if len(text)>0 and not (text[0]==self.REQUEST['capital']):
404: self.REQUEST['capital']=text[0]
405: return True
406: else:
407: return False
408:
409: def subNavStatic(self,obj):
410: """subnav" von self"""
411: subs=self.ZopeFind(obj,obj_metatypes=['MPIWGTemplate','MPIWGLink'])
412: subret=[]
413:
414: for x in subs:
415: if not(x[1].title==""):
416: subret.append(x)
417: subret.sort(sortWeight)
418: return subret
419:
420: def subNav(self,obj):
1.1.2.19 casties 421: """return sub-navigation elements i.e. below sections"""
422: # get section -> parent should be MPIWGRoot
423: p = obj
424: sec = None
425: # descend parents to the root (and remember the last id)
426: while p is not None and p.meta_type != 'MPIWGRoot':
427: sec = p
428: p = p.aq_parent
429:
430: subsecs = sec.objectItems(self.nav_meta_types)
431: subsecs = [s for s in subsecs if s[1].title != ""]
432: subsecs.sort(sortWeight)
433: return subsecs
434:
1.1.2.11 dwinter 435: def isType(self,object,meta_type):
436: """teste ob ein object vom meta_type ist."""
437: return (object.meta_type==meta_type)
438:
1.1.2.1 dwinter 439: def isActive(self,name):
440: """teste ob subnavigation aktiv"""
441: for part in self.REQUEST['URL'].split("/"):
442: if part==name:
443: return True
444: return False
445:
1.1.2.6 casties 446:
447: def getSections(self):
448: """returns a list of all sections i.e. top-level MPIWGFolders"""
449: secs = self.objectItems(['MPIWGFolder'])
450: secs.sort(sortWeight)
451: #logging.debug("root: %s secs: %s"%(repr(self.absolute_url()), repr(secs)))
1.1.2.13 casties 452: # return pure list of objects
453: return [s[1] for s in secs]
1.1.2.1 dwinter 454:
455: def getSectionStyle(self, name, style=""):
456: """returns a string with the given style + '-sel' if the current section == name"""
457: if self.getSection() == name:
458: return style + '-sel'
459: else:
460: return style
461:
1.1.2.23 casties 462: def getFeatures(self, num=None):
463: """returns a list of the last num Features"""
1.1.2.13 casties 464: dir = getattr(self, 'features')
465: features = dir.objectItems(['MPIWGFeature'])
466: features.sort(sortWeight)
1.1.2.23 casties 467: if num is not None:
468: # take only the last num elements
469: features = features[-num:]
1.1.2.13 casties 470: # return pure list of objects
471: return [f[1] for f in features]
472:
473:
1.1.2.20 casties 474: def getMPIWGRoot(self):
475: """returns the MPIWG root"""
476: return self
477:
1.1.2.1 dwinter 478: def MPIWGrootURL(self):
479: """returns the URL to the root"""
480: return self.absolute_url()
481:
482: def upDateSQL(self,fileName):
483: """updates SQL databases using fm.jar"""
484: fmJarPath=os.path.join(package_home(globals()), 'updateSQL/fm.jar')
485: xmlPath=os.path.join(package_home(globals()), "updateSQL/%s"%fileName)
486: logger("MPIWG Web",logging.INFO,"java -classpath %s -Djava.awt.headless=true Convert %s"%(fmJarPath,xmlPath))
487: ret=os.popen("java -classpath %s -Djava.awt.headless=true Convert %s"%(fmJarPath,xmlPath),"r").read()
488: logger("MPIWG Web",logging.INFO,"result convert: %s"%ret)
489: return 1
490:
491: def patchProjects(self,RESPONSE):
492: """patch"""
493: projects=self.ZopeFind(self.projects,obj_metatypes=['MPIWGProject'])
494: for project in projects:
495: tmp=project[1].WEB_project_description[0].replace("/CD/projects/","")[0:]
496: setattr(project[1],'WEB_project_description',[tmp[0:]])
497: RESPONSE.write("<p>%s</p>\n"%project[0])
498:
499: def replaceNotEmpty(self,format,field):
500: """replace not empty"""
501: if field and (not field.lstrip()==''):
1.1.2.3 dwinter 502: return self.decode(format%field)
1.1.2.1 dwinter 503: else:
504: return ""
505:
506:
507: def isActiveMember(self,key):
508: """tested ob Mitarbeiter key ist aktiv"""
1.1.2.10 dwinter 509: key=utf8ify(key)
1.1.2.1 dwinter 510: ret=self.getat(self.ZSQLInlineSearch(_table='personal_www',
511: _op_key='eq',key=key,
512: _op_publish_the_data='eq',
513: publish_the_data='yes'))
514:
515: logging.info("ACTIVE_MEMBER %s"%ret)
516: if ret:
517: return True
518: else:
519: return False
520:
521: def isActual(self,project):
522: """checke if project is actual"""
523: actualTime=time.localtime()
524:
525: if hasattr(project,'getObject'): #obj ist aus einer catalogTrefferList
526: obj=project.getObject()
527: else:
528: obj=project
529:
530: if getattr(obj,'archiveTime',actualTime)< actualTime:
531: return False
532: else:
533: return True
534:
535: def redirectIndex_html(self,request):
536: #return request['URL1']+'/index_html'
537:
538: return urllib.urlopen(request['URL1']+'/index_html').read()
539:
540:
541: def formatBibliography(self,here,found):
542: """format"""
543: return formatBibliography(here,found)
544:
545: def getValue(self,fieldStr):
546: """Inhalt des Feldes"""
547:
548: if type(fieldStr)==StringType:
549: field=fieldStr
550: else:
551: field=fieldStr[0]
552: try:
553: if field[len(field)-1]==";":
554: field=field[0:len(field)-1]
555: except:
556:
557: """nothing"""
558: field=re.sub(r';([^\s])','; \g<1>',field)
559: return field.encode('utf-8')
560:
561:
562:
563: def sortedNames(self,list):
564: """sort names"""
565:
566: def sortLastName(x_c,y_c):
567: try:
568: x=urllib.unquote(x_c).encode('utf-8','ignore')
569: except:
570: x=urllib.unquote(x_c)
571:
572: try:
573: y=urllib.unquote(y_c).encode('utf-8','ignore')
574: except:
575: x=urllib.unquote(y_c)
576:
577:
578:
579: try:
580: last_x=x.split()[len(x.split())-1]
581: last_y=y.split()[len(y.split())-1]
582:
583: except:
584:
585: last_x=""
586: last_y=""
587:
588:
589:
590: if last_x<last_y:
591: return 1
592: elif last_x>last_y:
593: return -1
594: else:
595: return 0
596:
597: list.sort(sortLastName)
598: list.reverse()
599:
600: return list
601:
602: def __init__(self, id, title):
603: """init"""
604: self.id=id
605: self.title=title
606:
607: def removeStopWords(self,xo):
608: """remove stop words from xo"""
609: if not hasattr(self,'_v_stopWords'):
610: self._v_stopWords=self.stopwords_en.data.split("\n")
611:
612: x=str(xo)
613:
614: strx=x.split(" ")
615:
616: for tmp in strx:
617:
618: if tmp.lower() in self._v_stopWords:
619: del strx[strx.index(tmp)]
620:
621: return " ".join(strx)
622:
623: def urlQuote(self,str):
624: """quote"""
625: return urllib.quote(str)
626:
627: def urlUnQuote(self,str):
628: """quote"""
629: return urllib.unquote(str)
630:
631:
632:
633: def getProjectsByFieldContent(self,fieldName,fieldContentsEntry, date=None):
634: """gib alle Projekte aus mit Value von field mit fieldName enthaelt ein Element der Liste fieldContents"""
635: def sort(x,y):
636: return cmp(x.WEB_title[0],y.WEB_title[0])
637:
638: if type(fieldContentsEntry) is StringType:
639: fieldContentsTmp=[fieldContentsEntry]
640: else:
641: fieldContentsTmp=fieldContentsEntry
642:
643: fieldContents=[]
644: for x in fieldContentsTmp:
645: fieldContents.append(" AND ".join(x.split()))
646: projects=self.ProjectCatalog({fieldName:string.join(fieldContents,' AND')})
647: #print projects
648: #ret=[x for x in projects]
649: ret=[]
650: for x in projects:
651: obj=x.getObject()
652: obj=obj.getActualVersion(date)
653: if obj and (not getattr(obj,'invisible',None)):
654: #if not (x in ret):
655: ret.append(x)
656:
657: ret.sort(sort)
658: return ret
659:
660: def changeMPIWGRootForm(self):
661: """edit"""
662: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','changeMPIWGRootForm')).__of__(self)
663: return pt()
664:
1.1.2.35 dwinter 665: def changeMPIWGRoot(self,title,connection_id,coneServiceURL,harvestPort,harvestServer,lang=None,autocommit=None,RESPONSE=None):
1.1.2.1 dwinter 666: """change"""
667: self.title=title
668: self.connection_id=connection_id
1.1.2.32 dwinter 669: #self.disciplineList=disciplineList
670: #self.themesList=themesList
671: self.coneServiceURL=coneServiceURL
1.1.2.35 dwinter 672: self.harvestServer=harvestServer
673: try:
674: self.harvestPort=int(harvestPort)
675: except:
676: logging.error("couldn't change port!: no number:"+harvestPort)
1.1.2.32 dwinter 677:
1.1.2.18 casties 678: if lang is not None:
679: self.lang = lang
1.1.2.1 dwinter 680:
1.1.2.30 casties 681: self.autocommit = (autocommit == "on")
1.1.2.1 dwinter 682: if RESPONSE is not None:
683: RESPONSE.redirect('manage_main')
684:
685:
686: def getContexts(self,childs=None,parents=None,depth=None,date=None,onlyActive=True):
687: """childs alle childs, alle parents"""
688: ret=[]
1.1.2.21 dwinter 689:
1.1.2.1 dwinter 690: if parents:
1.1.2.6 casties 691: pnums=parents.split(".")
692: while len(pnums) > 1:
693: pnums.pop()
694: parentId=string.join(pnums,".")
695:
696: for project in self.getProjectFields('xdata_05',sort='int',date=date):
697: if project[1]==parentId:
698: ret.append(project)
699:
700: if (depth is not None) and (len(ret) >= depth):
701: break
1.1.2.1 dwinter 702:
703: if childs:
704: for project in self.getProjectFields('xdata_05',sort='int',date=date):
705: searchStr=childs+"(\..*)"
706:
707: if (onlyActive and project[0].isActiveProject()) or (not onlyActive):
708: if re.match(searchStr,project[1]):
709:
710: if depth:
711:
712: if int(depth)>=len(project[1].split("."))-len(childs.split(".")):
713:
714: ret.append(project)
715: else:
716: ret.append(project)
1.1.2.6 casties 717:
1.1.2.10 dwinter 718: #logging.debug("getContexts: childs=%s parents=%s depth=%s => %s"%(childs,parents,depth,repr(ret)))
1.1.2.21 dwinter 719:
1.1.2.1 dwinter 720: return ret
1.1.2.6 casties 721:
1.1.2.39! dwinter 722: def getAllProjectsAndTagsAsCSV(self,archived=1,RESPONSE=None):
1.1.2.38 dwinter 723: """alle projekte auch die nicht getaggten"""
724: retList=[]
1.1.2.39! dwinter 725: headers=['projectName','scholars','lastChange thesaurus','last change project','persons','places','objects']
! 726: headers.extend(list(self.thesaurus.tags.keys()))
! 727: retList.append("\t".join(headers))
! 728: if not hasattr(self,'thesaurus'):
! 729: return "NON thesaurus (there have to be a MPIWGthesaurus object, with object ID thesaurus)"
! 730:
! 731: projectTags = self.thesaurus.getProjectsAndTags()
1.1.2.38 dwinter 732: for project in self.getProjectFields('WEB_title_or_short'):
733: proj = project[0]
734: p_name = project[1]
735: retProj=[]
736: if (not proj.isArchivedProject() and archived==1) or (proj.isArchivedProject() and archived==2):
737: retProj.append(self.utf8ify(p_name))
1.1.2.39! dwinter 738: retProj.append(self.utf8ify(proj.getContent('xdata_01')))
! 739: retProj.append(self.thesaurus.lastChangeInThesaurus.get(proj.getId(),''))
1.1.2.38 dwinter 740: retProj.append(self.utf8ify(getattr(proj,'creationTime','20050101000000')))
1.1.2.39! dwinter 741: retProj.append(";".join([person[1] for person in self.thesaurus.getPersonsFromProject(proj.getId())]))
! 742: retProj.append(";".join([person[1] for person in self.thesaurus.getHistoricalPlacesFromProject(proj.getId())]))
! 743: retProj.append(";".join([person[1] for person in self.thesaurus.getObjectsFromProject(proj.getId())]))
! 744: retProj+=self.thesaurus.getTags(proj.getId(),projectTags)
1.1.2.38 dwinter 745: retList.append("\t".join(retProj))
1.1.2.39! dwinter 746:
! 747: if RESPONSE:
! 748: RESPONSE.setHeader('Content-Type', "application/octet-stream")
! 749:
1.1.2.38 dwinter 750: return "\n".join(retList);
1.1.2.1 dwinter 751:
752: def getProjectFields(self,fieldName,date=None,folder=None,sort=None):
753: """getListofFieldNames"""
754: ret=[]
755:
756: objects=self.ZopeFind(self.projects,obj_metatypes=['MPIWGProject'],search_sub=0)
757:
758:
759: for object in objects:
760: obj=object[1]
761: obj=obj.getActualVersion(date)
762: if obj and (not getattr(obj,'invisible',None)):
763: if fieldName=="WEB_title_or_short":
764:
765: if len(obj.getContent('xdata_07'))<3: # hack weil z.Z. manchmal noch ein Trennzeichen ; oder , im Feld statt leer
766: fieldNameTmp="WEB_title"
767: else:
768: fieldNameTmp="xdata_07"
769: else:
770: fieldNameTmp=fieldName
771:
772: ret.append((obj,obj.getContent(fieldNameTmp)))
773:
774:
775: if sort=="int":
776: ret.sort(sortI)
777: elif sort=="stopWords":
778:
779: ret.sort(sortStopWords(self))
780:
781: else:
782: ret.sort(sortF)
783:
784: return ret
785:
786: def showNewProjects(self):
787: projects=[]
788: for objs in self.getProjectFields('WEB_title_or_short'): # Get all Projets
789: if objs[0].xdata_05 and (objs[0].xdata_05[0] == ""):
790:
791: projects.append(objs)
792:
793: return projects
794:
795:
796: def updatePublicationDB(self,personId=None):
797: """updates the publication db, i.e. copy year and type into the main table"""
798:
799: if personId:
800: founds = self.ZSQLInlineSearch(_table="publications",key_main=personId)
801: else:
802: founds = self.ZSQLInlineSearch(_table="publications")
803:
804: for found in founds:
805:
806: if found.id_institutsbibliographie and (not found.id_institutsbibliographie =="") and (not found.id_institutsbibliographie =="0"):
807:
808: entries = self.ZSQLInlineSearch(_table="institutsbiblio",id=found.id_institutsbibliographie)
809: for entry in entries:
810: self.ZSQLChange(_table='publications',_identify='oid=%s' % found.oid,year=entry.year,referencetype=entry.reference_type)
811:
812: if found.id_gen_bib and (not found.id_gen_bib ==""):
813: entries = self.ZSQLInlineSearch(_table="bibliography",id=found.id_gen_bib)
814: for entry in entries:
815: self.ZSQLChange(_table='publications',_identify='oid=%s' % found.oid,year=entry.year,referencetype=entry.reference_type)
816:
817: return True
818:
819: def showNewDBEntries(self):
820: """zeige neue Eintraege in der Datenbank ohne e-mail adressen bzw. fuer die noch kein Object angelegt wurde"""
821:
822: qstr="select * from personal_www where web_object_created='no' and not key=''"
823: res=self.ZSQLQuery(qstr)
824:
825: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','showNewDBEntries.zpt')).__of__(self)
826: return pt(newEntries=res)
827:
828: def createNewStaffObjects(self,RESPONSE):
829: """create new staff object"""
830:
831: memberFolder=getattr(self,'members')
832: args=self.REQUEST.form
833: arg_k=args.keys()
834: arg_k.remove("submit")
835: ret=""
836: for key in arg_k:
837: k=self.urlUnQuote(key)
838:
839: qstr="select * from personal_www where key=%s"%self.ZSQLQuote(k)
840: res=self.ZSQLQuery(qstr)[0]
841: if args[key]!="": #email-adresse wurde eingetragen
842: #create the object
843: e_mail=args[key]
844: try:
845: newObj=MPIWGStaff.MPIWGStaff(e_mail,res.last_name,res.first_name,k)
846: memberFolder._setObject(e_mail,newObj)
847: obj=getattr(memberFolder,e_mail)
848: obj.reindex_object()
849: ret+="Created %s \n"%e_mail
850: created=True
851: except:
852: msg="Cannot create new user %s (%s %s)"%(e_mail,sys.exc_info()[0],sys.exc_info()[1])
853: logging.error(msg)
854: ret+=msg+"\n"
855: created=False
856:
857: if created:
858: qstr="update personal_www set web_object_created='yes',e_mail='%s@mpiwg-berlin.mpg.de' where key=%s"%(e_mail,self.ZSQLQuote(k))
859: self.ZSQLQuery(qstr)
860:
861: return ret
862:
863:
864: def generateNewPersonEntry(self,data):
865: """generate a new person entry for data, neue personen werden zunaechst nur in der datenbank angelegt """
866:
867: #memberFolder=getattr(self,'members')
868: #create the object
869:
870: # try:
871: # newObj=MPIWGStaff.MPIWGStaff(urllib.quote(data['key']),data['last_name'].encode('utf-8'),data['first_name'].encode('utf-8'))
872: # memberFolder._setObject(urllib.quote(data['key']),newObj)
873: # except:
874: # return False, "Cannot create new user %s (%s %s)"%(data['key'],sys.exc_info()[0],sys.exc_info()[1])
875: #
876:
877: #create the new entry in the database
878:
879:
880: result,msg=MPIWGStaff.createNewDBEntry(self,data['publish_the_data'],data['key'],data['last_name'],
1.1.2.24 dwinter 881: data['first_name'],data['titles_new'],data['status'],"",
1.1.2.1 dwinter 882: "",data['date_from'],data['date_to'],
1.1.2.24 dwinter 883: data['department'],'',data['funded_by'],
1.1.2.1 dwinter 884: data['e_mail2'],data['current_work'],"yes",data['date_stay_at_mpiwg'],data['group'],"no",data['current_work'])
885:
886: return result,msg
887:
1.1.2.28 casties 888: def updatePersonEntry(self,data,ignoreEntries=None):
1.1.2.1 dwinter 889: """update an person entry from data. but ignore all fields in ignore Entries"""
1.1.2.28 casties 890: if ignoreEntries is None:
891: ignoreEntries = []
892:
1.1.2.31 dwinter 893: #ignoreEntries.append('current_work') # TODO:updatecurrent work
1.1.2.28 casties 894: logging.debug("updatePersonEntry: data=%s ignoreEntries=%s"%(repr(data),repr(ignoreEntries)))
1.1.2.1 dwinter 895:
896: if data['date_to']=="": # wenn date_to leer
897: data['date_to']="date_none"
898:
899: if data['date_from']=="": # wenn date_fromleer
900: data['date_from']="date_none"
901: msg=""
902:
903:
904: #eintragen
905:
906: columns=data.keys()
907: for x in ignoreEntries:
1.1.2.28 casties 908: logging.debug("updatePersonEntry: ignoring %s"%x)
1.1.2.1 dwinter 909: try: #falls in ignore entries felder sind, die nicht in columns sind, fange den fehler ab
1.1.2.28 casties 910: columns.remove(x)
1.1.2.1 dwinter 911: except:
912: pass
913:
914:
915: insert=[]
916: for key in columns:
917: if data[key]=="date_none": # date_none eintrag wird zu null uebersetzt
918: insert.append('%s=null'%key)
919: else:
920: insert.append(""" "%s"=%s"""%(key,self.ZSQLQuote(data[key])))
921:
922: insertStr=",".join(insert)
923: queryStr="update personal_www SET %s where key='%s'"%(insertStr,data['key'])
924: self.ZSQLQuery("SET DATESTYLE TO 'German'")
925: self.ZSQLQuery(queryStr)
926:
927: #currentwork
928: #if not (txt==""):
929: # queryStr="INSERT INTO current_work (id_main,current,publish) VALUES ('%s','%s','%s')"%(id,txt,txt_p)
930: #
931: # self.ZSQLQuery(queryStr)
932:
933: return True,msg
934:
935:
936: def updatePersonalwww_doIt(self):
937: """do the update"""
938: args=self.REQUEST.form
939: resultSet=self.REQUEST.SESSION['personal_www']['resultSet']
940: news=self.REQUEST.SESSION['personal_www']['news']
941: conflicts=self.REQUEST.SESSION['personal_www']['conflicts']
1.1.2.28 casties 942: logging.debug("updatePersonalwww_doIt: args=%s\n resultSet=%s\n news=%s\n conflicts=%s"%(args,resultSet,news,conflicts))
943:
1.1.2.1 dwinter 944: ret="<html><body>"
945: # generate the new entry
946:
947: if news and (len(news)>0):
948: ret+="<h2>Hinzugefügt</h2>"
949: ret+="<p>Neueinträge erscheinen erst auf der Homepage, wenn ihnen eine e-mail Adresse zugeordnet wurde.</p>"
950: ret+="<ul>"
1.1.2.28 casties 951:
1.1.2.1 dwinter 952: for new in news:
953: if args.has_key(self.urlQuote(new.encode('utf-8'))): # entry was selected
954: result,msg=self.generateNewPersonEntry(resultSet[new])
955: if not result:
956: logging.error("Error (generateNewPersonEntry) %s"%msg)
957: ret+="<li>ERROR: %s %s"%(new.encode('utf-8'),msg)
958: else:
959: ret+="<li>OK: %s"%(new.encode('utf-8'))
1.1.2.28 casties 960:
1.1.2.1 dwinter 961: if news and (len(news)>0):
962: ret+="<p>Neueinträge erscheinen erst auf der Homepage, wenn ihnen eine e-mail Adresse zugeordnet wurde.</p>"
963: ret+="</ul>"
964:
965: # update
966:
967: if len(conflicts.keys())>0:
968: ret+="<h2>Änderung des Benutzers übernehmen</h2>"
969: ret+="<p>Wenn nötig in Filemaker-db ändern:</p>"
970:
971: # konflicte
972: for conflict in conflicts.keys():
973: ignoreEntries=[]
974: displayIgnored=[]
975: for cf in conflicts[conflict]:
976: if args[conflict.encode('utf-8')+'_'+cf[0]]=="stored": #use the stored one
977: ignoreEntries.append(cf[0]) #so ignore field cf[0]
978: displayIgnored.append(cf)
1.1.2.28 casties 979:
1.1.2.1 dwinter 980: if len(displayIgnored)>0:
981: ret+="<h3>%s</h3>"%conflict.encode('utf-8')
982: ret+="<table border='1'>"
983: for iE in displayIgnored:
984: ret+="<tr><td>%s</td><td>%s</td><td>%s</td>"%(iE[0].encode('utf-8'),iE[1].encode('utf-8'),iE[2].encode('utf-8'))
1.1.2.31 dwinter 985: ret+="</table>"
1.1.2.1 dwinter 986:
987: self.updatePersonEntry(resultSet[conflict],ignoreEntries=ignoreEntries)
988:
989: # rest
990: cl=list(conflicts.keys())
991:
992: for key in resultSet.keys():
993: if key not in cl:
994: self.updatePersonEntry(resultSet[key])
995: return ret+"</body></html>"
996:
997:
998: def updateInstitutsbiliography(self):
999: """update the Institutsbibliogrpahy"""
1000: self.upDateSQL('personalwww.xml')
1001: return "<html><body>DONE</body></html>"
1002:
1003:
1.1.2.11 dwinter 1004:
1005:
1.1.2.1 dwinter 1006: def updatePersonalwww_html(self):
1007: """update form for the homepages web form"""
1008: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','updatePersonalwww.zpt')).__of__(self)
1009: return pt()
1010:
1011:
1012: def updatePersonalwww(self,uploadfile):
1013: """update personalwww
1014: @param uploadfile: file handle auf das file
1015: """
1016: dsn=self.getConnectionObj().connection_string
1017: #dsn="dbname=personalwww"
1018: resultSet=updatePersonalWWW.importFMPXML(uploadfile)
1019: news,conflicts=updatePersonalWWW.checkImport(dsn, resultSet)
1020:
1021: self.REQUEST.SESSION['personal_www']={}
1022: self.REQUEST.SESSION['personal_www']['resultSet']=resultSet
1023: self.REQUEST.SESSION['personal_www']['news']=news
1024: self.REQUEST.SESSION['personal_www']['conflicts']=conflicts
1025:
1026: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','updatePersonalwww_check.zpt')).__of__(self)
1027: return pt()
1028:
1029:
1030:
1031: def reindexCatalogs(self,RESPONSE=None):
1032: """reindex members and project catalog"""
1033:
1034:
1035: try:
1036:
1037: self.ProjectCatalog.manage_catalogReindex(self.REQUEST,RESPONSE,self.REQUEST['URL1'])
1.1.2.22 dwinter 1038: logger("MPIWG Root (reindexCatalog: projects)",logging.INFO,"DONE")
1039: except:
1040: logger("MPIWG Root (reindexCatalog: projects)",logging.WARNING," %s %s"%sys.exc_info()[:2])
1041:
1042: try:
1043:
1044: self.MembersCatalog.manage_catalogReindex(self.REQUEST,RESPONSE,self.REQUEST['URL1'])
1045: logger("MPIWG Root (reindexCatalog: members)",logging.INFO,"DONE")
1046: except:
1047: logger("MPIWG Root (reindexCatalog: members)",logging.WARNING," %s %s"%sys.exc_info()[:2])
1048:
1049: try:
1050:
1051: self.fulltextProjectsMembers.manage_catalogReindex(self.REQUEST,RESPONSE,self.REQUEST['URL1'])
1052: logger("MPIWG Root (reindexCatalog: fulltextProjectsMembers)",logging.INFO,"DONE")
1.1.2.1 dwinter 1053: except:
1.1.2.22 dwinter 1054: logger("MPIWG Root (reindexCatalog: fulltextProjectsMembers)",logging.WARNING," %s %s"%sys.exc_info()[:2])
1.1.2.1 dwinter 1055:
1056:
1057:
1.1.2.35 dwinter 1058:
1059:
1060:
1.1.2.1 dwinter 1061: if RESPONSE:
1062: RESPONSE.redirect('manage_main')
1063:
1064:
1065:
1066:
1067: def getAllMembers(self):
1068: #ret=[]
1069:
1070: def sorter(x,y):
1071: return cmp(x[0],y[0])
1072:
1073: results=self.MembersCatalog({'isPublished':True})
1.1.2.12 dwinter 1074:
1.1.2.25 casties 1075: ret=[(unicodify(", ".join([proj.lastName, proj.firstName])), proj.getKey) for proj in results]
1.1.2.1 dwinter 1076:
1077: ret.sort(sorter)
1078: return ret
1079:
1080:
1081: def printAllMembers(self):
1082: """print"""
1083: members=self.getAllMembers()
1084: ret=""
1085: for x in members:
1086: ret+="<p>%s</p>"%x
1087: return ret
1088:
1089:
1090: def makeList(self,entry):
1091: """makes a list out of one entry or repeat a list"""
1092: if type(entry) is StringType:
1093: return [entry]
1094: else:
1095: return entry
1096:
1.1.2.7 dwinter 1097: def getTreeRSS(self,dep=None,date=None,onlyActive=1,onlyArchived=0):
1.1.2.5 dwinter 1098: """generateTree"""
1099: rss="""<?xml version="1.0" encoding="utf-8"?>
1100: <rss version="2.0">
1101: <channel>"""
1102:
1103: for obj in self.getTree(dep, date, onlyActive, onlyArchived):
1.1.2.17 casties 1104: linkStr="""<link>http://www.mpiwg-berlin.mpg.de/en/research/projects/%s</link>"""
1.1.2.5 dwinter 1105: rss+="""<item>"""
1.1.2.7 dwinter 1106: rss+=linkStr%obj[3].getId()
1.1.2.5 dwinter 1107: rss+="""</item>"""
1.1.2.8 dwinter 1108: if hasattr(obj[3],'publicationList'):
1109: rss+="""<item>"""
1110: rss+=linkStr%(obj[3].getId()+"/publicationList");
1111: rss+="""</item>"""
1.1.2.5 dwinter 1112: rss+="""</channel>
1113: </rss>"""
1114:
1115:
1116: return rss
1.1.2.1 dwinter 1117:
1118: def getTree(self,dep=None,date=None,onlyActive=0,onlyArchived=0):
1119: """generate Tree from project list
1120: als Liste, jeder Eintrag ist ein Tupel ,(Tiefe, ProjektNummer,ProjektObject
1121: onlyActive = 0 : alle Projekte
1122: onlyActive = 1 : nur active Projekte
1123: onlyActive = 2: nur inactive Projekte
1124:
1125: onlyArchived=0: alle Projekte
1126: onlyArchived= 1 : nur aktuelle Projekte
1127: onlyArchived = 2: nur archivierte Projekte
1.1.2.27 dwinter 1128:
1129: department fuer das Tree geholt werden soll
1.1.2.1 dwinter 1130: """
1131:
1132: returnListTmp=[]
1133: returnList=[]
1134:
1135: for project in self.getProjectFields('xdata_05',sort="int",date=date): # get Projects sorted by xdata_05
1136:
1137: for idNr in project[1].split(";"): # more than one number
1138: if not idNr=="":
1139: splittedId=idNr.split(".")
1140: depth=len(splittedId)
1141: nr=idNr
1142: #title=project[0].WEB_title
1143: title=[project[0].getContent('WEB_title')]
1144: #print title
1145:
1146: if idNr[0]=="x": # kompatibilitaet mit alter Konvention, x vor der Nummer macht project inactive
1147: project[0].setActiveFlag(False)
1148:
1.1.2.27 dwinter 1149: if (not dep) or (splittedId[0]==dep): #falls dep gesetzt ist nur dieses hinzufuegen.
1.1.2.1 dwinter 1150:
1151: if (onlyActive==0):
1152: returnListTmp.append((depth,nr,title,project[0]))
1153: elif (onlyActive==1) and project[0].isActiveProject(): #nur active projekte
1154: returnListTmp.append((depth,nr,title,project[0]))
1155: elif (onlyActive==2) and (not project[0].isActiveProject()): #nur active projekte
1156: returnListTmp.append((depth,nr,title,project[0]))
1157:
1158:
1159: #filter jetzt die Liste nach Archived oder nicht
1160: for entry in returnListTmp:
1161: if (onlyArchived==0):
1162: returnList.append(entry)
1163: elif (onlyArchived==1) and (not entry[3].isArchivedProject()): #nur active projekte
1164: returnList.append(entry)
1165: elif (onlyArchived==2) and (entry[3].isArchivedProject()): #nur active projekte
1166: returnList.append(entry)
1167:
1168:
1169: return returnList
1170:
1171:
1172:
1173: def changePosition(self,treeId,select,RESPONSE=None):
1174: """Change Postion Entry"""
1175: numbers=[]
1176:
1177: # Suche hoechste bisherige nummer
1178: projects=self.getProjectFields('xdata_05') # get Projects sorted by xdata_05
1179: #print "pj",projects
1180: for project in projects: #suche alle subtrees der treeId
1181: #print treeId
1182:
1183: founds=re.match(treeId+"\.(.*)",project[1].split(";")[0])
1184: if founds:
1185: #print "x",founds.group(0),len(founds.group(0).split("."))
1186: if len(founds.group(0).split("."))==len(treeId.split("."))+1: # nur ein punkt mehr, d.h. untere ebene
1187: try:
1188: numbers.append(int(founds.group(0).split(".")[len(founds.group(0).split("."))-1]))
1189: except:
1190: numbers.append(int(0))
1191:
1192: try:
1193: highest=max(numbers)
1194: except:
1195: highest=0
1196: projects=self.showNewProjects()
1197: for i in self.makeList(select):
1198: highest+=10
1199: projects[int(i)][0].xdata_05=treeId+"."+str(highest)
1200:
1201:
1202: if RESPONSE is not None:
1203: RESPONSE.redirect('showTree')
1204:
1205: def changeTree(self,RESPONSE=None):
1206: """change the complete tree"""
1207: form=self.REQUEST.form
1208: hashList={}
1209: onlyArchived=int(form.get("onlyArchived",0))
1210: onlyActive=int(form.get("onlyActive",0))
1.1.2.27 dwinter 1211: dep=form.get("dep",None)
1.1.2.1 dwinter 1212:
1.1.2.27 dwinter 1213: fields=self.getTree(dep=dep,onlyArchived=onlyArchived,onlyActive=onlyActive)
1.1.2.1 dwinter 1214:
1215: logging.info("GOT TREE!----------------------------------------------------")
1216: for field in form.keys():
1217:
1218: splitted=field.split('_')
1219: if (len(splitted)>1) and (splitted[1]=="runningNumber"): #feld hat die Form Nummer_name und runnignNumber
1220:
1221:
1222: nr=int(splitted[0]) # nummer des Datensatzes
1223: currentEntry = fields[nr]
1224:
1225: if form.has_key(str(nr)+'_active'): # active flag is set
1226: fields[nr][3].setActiveFlag(True)
1227: else:
1228: fields[nr][3].setActiveFlag(False)
1229:
1.1.2.26 casties 1230: #nummer hat sich geaendert
1.1.2.1 dwinter 1231:
1232: entryChanged = False;
1233:
1.1.2.27 dwinter 1234: if isinstance(fields[nr][3].xdata_05,list): #for some reasons somtimes the content of the field is a list with one entry.
1235: fields[nr][3].xdata_05=fields[nr][3].xdata_05[0]
1236:
1.1.2.1 dwinter 1237: if not (fields[nr][3].xdata_05==form[str(nr)+'_number']):
1238: logging.info("Changed!Number+++++++++++++++++++++++++++++++++")
1.1.2.27 dwinter 1239: logging.info(repr(fields[nr][3].xdata_05)+" ---> "+ repr(form[str(nr)+'_number']))
1.1.2.1 dwinter 1240: fields[nr][3].xdata_05=form[str(nr)+'_number']
1241: entryChanged = True
1242:
1243: #completed har sich geaendert
1.1.2.27 dwinter 1244:
1245: td = fields[nr][3].transformDate # hole die funktion zum transformieren des datums
1246:
1247: if not (td(fields[nr][3].getCompletedAt())==td(form[str(nr)+'_completed'])):
1.1.2.1 dwinter 1248: fields[nr][3].setCompletedAt(form[str(nr)+'_completed'])
1.1.2.27 dwinter 1249: logging.info(repr(td(fields[nr][3].getCompletedAt()))+" ---> "+ repr(td(form[str(nr)+'_completed'])))
1.1.2.1 dwinter 1250: logging.info("Changed!Completed+++++++++++++++++++++++++++++++++")
1251: entryChanged = True
1252:
1.1.2.27 dwinter 1253: if not (td(fields[nr][3].getStartedAt())==td(form[str(nr)+'_started'])):
1.1.2.4 dwinter 1254: fields[nr][3].setStartedAt(form[str(nr)+'_started'])
1.1.2.27 dwinter 1255:
1256: logging.info(repr(td(fields[nr][3].getStartedAt()))+" ---> "+ repr(td(form[str(nr)+'_started'])))
1.1.2.4 dwinter 1257: logging.info("Changed!Started+++++++++++++++++++++++++++++++++")
1258: entryChanged = True
1259:
1.1.2.1 dwinter 1260:
1261: if entryChanged:
1262: logging.info("Changed!+++++++++++++++++++++++++++++++++")
1263: fields[nr][3].copyObjectToArchive()
1264:
1265:
1266: if RESPONSE is not None:
1267: RESPONSE.redirect('showTree')
1268:
1269: def getProjectWithId(self,id):
1270: fields=self.getProjectFields('xdata_05')
1271: for field in fields:
1272: if field[1]==id:
1273: return field[0]
1274:
1275: return None
1276:
1277:
1278:
1279:
1280: def getRelativeUrlFromPerson(self,list):
1281: """get urls to person list"""
1282: ret=[]
1283: persons=list.split(";")
1284: for person in persons:
1285:
1286: if len(person)>1: #nicht nur Trennzeichen
1287: splitted=person.split(",")
1288: if len(splitted)==1:
1289: splitted=person.split(" ")
1290: splittedNew=[re.sub(r'\s(.*)','$1',split) for split in splitted]
1291: if splittedNew[0]=='':
1292: del splittedNew[0]
1293: search=string.join(splittedNew,' AND ')
1294:
1295: if not search=='':
1296:
1297: try:
1298: proj=self.MembersCatalog({'title':search})
1299: except:
1300: proj=None
1301:
1302: if proj:
1303: #ret.append("<a href=%s >%s</a>"%(proj[0].absolute_url,person.encode('utf-8')))
1304: ret.append("<a href=%s >%s</a>"%('members/'+proj[0].id+'/index.html',person))
1305: else:
1306: #ret.append("%s"%person.encode('utf-8'))
1307: ret.append("%s"%person)
1308: return string.join(ret,";")
1309:
1310: def getMemberIdFromKey(self,key):
1311: """gibt die ensprechende id im members Ordner zum key"""
1312:
1313: if key=="":
1314: return ""
1.1.2.14 dwinter 1315: try:
1316: key=utf8ify(key)
1317: catalogged=self.MembersCatalog({'getKey':key})
1318: if len(catalogged)==0:
1319: return ""
1320: else:
1321: return catalogged[0].getObject().getId()
1.1.2.1 dwinter 1322:
1.1.2.14 dwinter 1323: except:
1324: return ""
1.1.2.1 dwinter 1325:
1326:
1.1.2.2 dwinter 1327:
1.1.2.1 dwinter 1328: def getProjectsOfMembers(self,date=None):
1329: """give tuple member /projects"""
1330: ret=[]
1331: members=self.getAllMembers()
1.1.2.37 dwinter 1332: logging.debug("X %s"%repr(members))
1.1.2.1 dwinter 1333: #return str(members)
1334: for x in members:
1.1.2.37 dwinter 1335: #logging.debug("X %s"%repr(x))
1.1.2.1 dwinter 1336: projects=self.getProjectsOfMember(key=x[1],date=date)
1337: if len(projects)>0:
1338: ret.append((x[0],projects))
1339:
1340: return ret
1341:
1342: def getProjectsOfMember(self,key=None,date=None,onlyArchived=1,onlyActive=1):
1343: """get projects of a member
1344:
1345: @param key: (optional) Key zur Idenfikation des Benutzer
1346: @param date: (optional) Version die zum Zeitpunkt date gueltig war
1347: @param onlyArchived:
1348: onlyArchived=0: alle Projekte
1349: onlyArchived= 1 : nur aktuelle Projekte
1350: onlyArchived = 2: nur archivierte Projekte
1351: """
1352: # TODO: Die ganze Loesung
1353: def sortP(x,y):
1354: """sort by sorting number"""
1355: return cmp(x.WEB_title,y.WEB_title)
1356:
1357: ret=[]
1358: if key:
1.1.2.35 dwinter 1359: logging.debug("MPIWGROOT (getProjectsOfMember):"+key)
1.1.2.12 dwinter 1360: proj=self.ProjectCatalog({'getPersonKeyList':utf8ify(key)})
1.1.2.1 dwinter 1361: else:
1362: return ret # key muss definiert sein
1363:
1.1.2.37 dwinter 1364: #logging.debug("MPIWGROOT (getProjectsOfMember):"+repr(proj))
1.1.2.1 dwinter 1365: if proj:
1366: proj2=[]
1367: for x in proj:
1.1.2.10 dwinter 1368: #logging.error("proj:%s"%repr(x.getPath()))
1.1.2.1 dwinter 1369: if (not getattr(x.getObject(),'invisible',None)) and (getattr(x.getObject(),'archiveTime','')==''):
1370: proj2.append(x)
1371:
1372: else:
1373: proj2=[]
1374:
1375:
1376:
1377: proj2.sort(sortP)
1378:
1379: projectListe=[]
1.1.2.10 dwinter 1380: #logging.error("getprojectsofmember proj2: %s"%repr(proj2))
1.1.2.1 dwinter 1381: for proj in proj2:
1382: obj=proj.getObject()
1383: add=False
1384: if onlyArchived==1: #nur aktuell projecte
1385: if not obj.isArchivedProject():
1386: add=True
1387: elif onlyArchived==2: #nur archivierte
1388: if obj.isArchivedProject():
1389: add=True
1390: else: #alle
1.1.2.10 dwinter 1391: add=True
1.1.2.1 dwinter 1392:
1393: if onlyActive==1: #nur active projecte
1394: if obj.isActiveProject():
1395: add=add & True
1396: else:
1397: add=add & False
1398:
1399: elif onlyArchived==2: #nur nicht aktvive
1400: if not obj.isActiveProject():
1401: add=add & True
1402: else: #alle
1403: add=add & True
1404:
1405: if add:
1406: projectListe.append(obj)
1407:
1.1.2.10 dwinter 1408: #logging.error("getprojectsofmember projectliste: %s"%repr(projectListe))
1.1.2.1 dwinter 1409: return projectListe
1410:
1411: def givePersonList(self,name):
1412: """check if person is in personfolder and return list of person objects"""
1413:
1414: splitted=name.split(",")
1415: if len(splitted)==1:
1416: splitted=name.lstrip().rstrip().split(" ")
1417: splittedNew=[split.lstrip() for split in splitted]
1418:
1419: if splittedNew[0]=='':
1420: del splittedNew[0]
1421: search=string.join(splittedNew,' AND ')
1422:
1423: if not search=='':
1424: proj=self.MembersCatalog({'title':search})
1425:
1426: if proj:
1427: return [[x.lastName,x.firstName] for x in proj]
1428: else:
1429: return []
1430:
1431: ## splitted=name.split(",") # version nachname, vorname...
1432: ## if len(splitted)>1:
1433: ## lastName=splitted[0]
1434: ## firstName=splitted[1]
1435: ## else:
1436: ## splitted=name.split(" ") #version vorname irgenwas nachnamae
1437:
1438: ## lastName=splitted[len(splitted)-1]
1439: ## firstName=string.join(splitted[0:len(splitted)-1])
1440:
1441: ## objs=[]
1442:
1443: #print self.members
1444: ## for x in self.members.__dict__:
1445: ## obj=getattr(self.members,x)
1446: ## if hasattr(obj,'lastName') and hasattr(obj,'firstName'):
1447:
1448: ## if (re.match(".*"+obj.lastName+".*",lastName) or re.match(".*"+lastName+".*",obj.lastName)) and (re.match(".*"+obj.firstName+".*",firstName) or re.match(".*"+firstName+".*",obj.firstName)):
1449:
1450: ## objs.append((obj,lastName+", "+firstName))
1451:
1452:
1453: ## return objs
1454:
1455:
1456: def personCheck(self,names):
1457: """all persons for list"""
1458: #print "names",names
1459: splitted=names.split(";")
1460: ret={}
1461: for name in splitted:
1462:
1463: if not (name==""):
1464: try:
1465: ret[name]=self.givePersonList(name)
1466: except:
1467: """NOTHIHN"""
1468: #print "RET",ret
1469: return ret
1470:
1471: def giveCheckList(self,person,fieldname):
1472: """return checklist"""
1473: #print "GCL",fieldname
1474: if fieldname=='xdata_01':
1475: x=self.personCheck(person.getContent(fieldname))
1476: #print "GCLBACKX",x
1477: return x
1478:
1479:
1480: def isCheckField(self,fieldname):
1481: """return chechfield"""
1482:
1483: return (fieldname in checkFields)
1484:
1485:
1486: def generateNameIndex(self):
1487: """erzeuge einen index verwendeter personen"""
1488: import psycopg
1489: o = psycopg.connect('dbname=authorities user=dwinter password=3333',serialize=0)
1490: results={}
1491: print self.fulltext.historicalNames.items()
1492: for nameItem in self.fulltext.historicalNames.items(): #gehe durch alle namen des lexikons
1493:
1494: c = o.cursor()
1495: name=nameItem[0]
1496: print "check",name
1497: c.execute("select lastname,firstname from persons where lower(lastname) = '%s'"%quote(name))
1498: tmpres=c.fetchall()
1499: firstnames=[result[1] for result in tmpres] # find all firstnames
1500: if tmpres:
1501: lastname=tmpres[0][0]
1502:
1503: for found in self.fulltext({'names':name}):
1504: if found.getObject().isActual():
1505: for nh in found.getObject().getGetNeighbourhood(name, length=50,tagging=False): #hole umgebung
1506: #schaue nun ob der vorname hinter oder vor dem name ist
1507: position=nh.find(lastname)
1508: # vorher
1509: #print "NH",nh
1510: bevorS=nh[0:position].split()
1511: #print "BV",bevorS
1512: if len(bevorS)>1:
1513: try:
1514: bevor=[bevorS[-1],bevorS[-2]]
1515: except:
1516: bevor=[bevorS[0]]
1517: else:
1518: bevor=[]
1519: #nachher
1520: behindS= re.split("[,|;| ]",nh[position:])
1521: #print "BH",behindS
1522: if len(behindS)>2:
1523: try:
1524: behind=behindS[1:3]
1525: except:
1526: behind=[bevorS[1]]
1527: else:
1528: behind=[]
1529: for firstname in firstnames:
1530: if firstname in bevor+behind: #Namen wie mit Adelspraedikaten werden so erstmal nich gefunden
1531: id="%s,%s"%(lastname,firstname)
1532: if not results.has_key(id):
1533: results[id]=[]
1534: objId=found.getObject().getId()
1535: if not (objId in results[id]):
1536: print "d %s for %s"%(id,objId)
1537: results[id].append(objId)
1538: self.nameIndex=results
1539: return results
1540:
1541: def editNameIndexHTML(self):
1542: """edit the name index"""
1543: if not hasattr(self,'nameIndexEdited'): # falls editierter index noch nicht existiert, kopiere automatisch erstellten
1544: self.nameIndexEdited=copy.copy(self.nameIndex)
1545: print "huh"
1546: #self.nameIndexEdited=copy.copy(self.nameIndex)
1547: #print self.nameIndexEdited
1548: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','editHistoricalNames.zpt')).__of__(self)
1549: return pt()
1550:
1551: def getNamesInProject(self,projectId):
1552: """get all names ofnameIndexEdited which are references in projec with projectId"""
1553:
1554: ret=[]
1555: for name in self.nameIndexEdited.keys():
1556: if projectId in self.nameIndexEdited[name]:
1557: ret.append(name)
1558:
1559: return ret
1560:
1561: def editNameIndex(self,RESPONSE=None,name=None,occurrances=None,submit=None):
1562: """edit the index"""
1563: nI=self.nameIndexEdited # mI introduced to make sure that changes to nameIndexEdited are know to ZODB
1564: if submit=="delete":
1565:
1566:
1567: dh=getattr(self,'deletedHistoricalNames',{})
1568:
1569: if type(dh) is ListType:
1570: dh={}
1571: if not dh.has_key(name):
1572: dh[name]=occurrances.split("\n")
1573: else:
1574: dh[name]+=occurrances.split("\n")
1575:
1576: self.deletedHistoricalNames=dh
1577:
1578: del self.nameIndexEdited[name]
1579:
1580:
1581: elif (submit=="change"):
1582:
1583: nI[name]=occurrances.split("\n")[0:]
1584:
1585: elif (submit=="add"):
1586: if not nI.has_key(name):
1587: nI[name]=occurrances.split("\n")
1588: else:
1589: nI[name]+=occurrances.split("\n")
1590:
1591: self.nameIndexEdited=nI
1592:
1593:
1594: if RESPONSE is not None:
1595: RESPONSE.redirect('editNameIndexHTML')
1596:
1597:
1598:
1599: def restoreIndex(self):
1600: """restore"""
1601: self.nameIndexEdited=self.nameIndex
1602: return "done"
1603:
1604:
1.1.2.33 dwinter 1605: def sortResults(self,results):
1606: """search the catalog and give results back sorted by meta_type"""
1607: ret = {}
1608: logging.debug(results())
1609: for result in results():
1610: metaType = result.meta_type
1611: resultList= ret.get(metaType,[])
1612: resultList.append(result)
1613: ret[metaType]=resultList
1614:
1615: logging.debug(ret)
1616: return ret
1617:
1.1.2.1 dwinter 1618:
1619: def manage_addMPIWGRootForm(self):
1620: """form for adding the root"""
1621: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','addMPIWGRootForm.zpt')).__of__(self)
1622: return pt()
1623:
1624: def manage_addMPIWGRoot(self,id,title,connection_id="",RESPONSE=None):
1625: """add a root folder"""
1626: newObj=MPIWGRoot(id,title)
1627: self._setObject(id,newObj)
1628: ob=getattr(self,id)
1629: setattr(ob,'connection_id',connection_id)
1630: if RESPONSE is not None:
1631: RESPONSE.redirect('manage_main')
1.1.2.25 casties 1632:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>