Return to archive.py CVS log | Up to [Repository] / OSAS / OSA_system |
1.1 dwinter 1: """ TO DO generell falls noch ein File, das nicht index.meta -> archivierung ausgeben """
2: from types import *
3: import urllib
4: import os
5: import sys
6: import re
7: from AccessControl import ClassSecurityInfo
8: from AccessControl.Role import RoleManager
9: from Acquisition import Implicit
10: from Globals import Persistent
11: from time import strptime
12: from time import strftime
13: import time
14: import os.path
15: import dircache
16: import xml.dom.minidom
17: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
18: from Products.PageTemplates.PageTemplate import PageTemplate
19: import tempfile
20: tempfile.tempdir="/var/tmp/archiver"
21:
22: exclusion=[".HSResource","lost+found","Network Trash Folder","TheFindByContentFolder","TheVolumeSettingsFolder"]
23: class fsentry(Implicit, Persistent, RoleManager):
24: """File entry class"""
25: path = ""
26: user = ""
27: month = ""
28: date =""
29: time = ""
30:
31: security=ClassSecurityInfo()
32: def __init__(self,extpath):
33: """initialize class"""
34: extpath=os.path.abspath(re.search(r"(.*)\n",extpath).group(1))
35: self.all=extpath
36: self.path=extpath
37: self.user=""
38: self.mtime=os.path.getmtime(extpath)
39:
40:
41: security.declarePublic('getPath')
42: def getPath(self):
43: """Ausgabe von path"""
44: return self.path
45:
46: security.declarePublic('getUser')
47: def getUser(self):
48: """Ausgabe von user"""
49: return self.user
50:
51: security.declarePublic('getDate')
52: def getDate(self):
53: """Ausgabe von Date"""
54: return strftime("%Y%m%d%H%M",time.gmtime(self.mtime))
55:
56: security.declarePublic('getDate')
57: def getID(self):
58: """Ausgabe einer eindeutigen Sortierbaren ID"""
59: return self.getDate()+self.getPath()
60:
61: security.declarePublic('getTime')
62: def getTime(self):
63: """Ausgabe von path"""
64: return self.time
65: security.declarePublic('getAll')
66: def getAll(self):
67: """Ausgabe von path"""
68: return self.all
69:
70: class filesystem(Implicit, Persistent, RoleManager):
71: """store filesystem"""
72: node={}
73: hasindex={}
74: security=ClassSecurityInfo()
75:
76: def getfs(self,start):
77: """load filessystem"""
78: f = os.popen("find "+ start+" -name '*' ","r")
79: lines = f.readlines()
80:
81: return lines
82:
83: def loadfs(self,start):
84: """analyse filesystem"""
85: for line in self.getfs(start):
86:
87: g=re.search(r"(.*/)(.*)\n",line)
88: if not g==None:
89: path=g.group(1)
90: file=g.group(2)
91: if self.node.has_key(path):
92: elements=self.node[path]
93: elements.append(file)
94: self.node[path]=elements
95: else:
96: self.node[path]=[file]
97: if (file=="index.meta") | (file=="meta"):
98: self.hasindex[path]="1"
99:
100: def __init__(self,start,reload=0):
101: if reload==1:
102: self.node={}
103: self.hasindex={}
104: self.loadfs(start)
105:
106:
107: security.declarePublic('getNode')
108: def getNode(self):
109: return self.node
110:
111: security.declarePublic('getKeys')
112: def getKeys(self):
113: return self.node.keys()
114:
115: security.declarePublic('clearnode')
116: def clearnode(self):
117: self.node={}
118: return 0
119:
120: security.declarePublic('hasIndex')
121: def hasIndex(self,path):
122:
123: return self.hasindex.has_key(path)
124:
125:
126: def onlyIndex_old(self):
127: """return only files with archive material"""
128: j={}
129: for k in self.node:
130: if self.hasindex.has_key(k):
131: if len(self.node[k])>1:
132: if (len(self.node[k])==2) & ('meta' not in self.node[k]):
133: j[k]=self.node[k]
134: elif (len(self.node[k])==2) & ('meta' in self.node[k]):
135: """ nothing """
136: else:
137: j[k]=self.node[k]
138: return j
139:
140: def archive_the_path(self,path):
141: """parse indexmeta and return digilib path"""
142: try:
143: #f = os.popen("cat "+path+"/index.meta","r")
144: f =file(path+"/index.meta","r")
145:
146: lines = f.read()
147:
148: try:
149: dom = xml.dom.minidom.parseString(lines)
150: if getText(dom.getElementsByTagName("content-type")[0].childNodes)=="folder":
151: """folder nicht archivieren"""
152: return 0
153: else:
154: archive_storage_date=getText(dom.getElementsByTagName("archive-storage-date")[0].childNodes)
155:
156: if archive_storage_date=="":
157:
158: """leer also archivieren"""
159: return 1
160: else:
161: """nicht archivieren"""
162: return 0
163: except:
164: """kein tag also archivieren"""
165: return 1
166: except:
167: """kein index.meta also nicht archivieren"""
168: return 0
169:
170: security.declarePublic('onlyIndex')
171: def onlyIndex(self):
172: """return only files with archive material (archive-storage-date not set)"""
173: j={}
174:
175: for k in self.node:
176: if self.archive_the_path(k):
177: j[k]=self.node[k]
178: return j
179: security.declarePublic('getImageDirs')
180: def getImageDirs(self,dom,path):
181: dirs=dom.getElementsByTagName("dir")
182: dirback=[]
183: for dir in dirs:
184: temp=getText(dir.getElementsByTagName("name")[0].childNodes)
185: temp2=re.search(r"(.*)/mpiwg/online/(.*)",path+"/"+temp)
186: if not temp2==None:
187: try:
188: dirback.append(temp2.group(2))
189: except:
190: """nothing"""
191: else:
192: dirback.append(temp)
193: return dirback
194:
195:
196:
197:
198: security.declarePublic('digilib')
199: def digilib(self, path):
200: """check if folder is a container for digilib files"""
201: if self.hasindex.has_key(path+"/"):
202: return(self.parseIndexMeta(path))
203: else:
204: return "NO"
205:
206:
207:
208:
209: security.declarePublic('isdigilib')
210: def isdigilib(self, path):
211: """return number of possible image directories usefull for digilib"""
212: if self.hasindex.has_key(path+"/"):
213: return(len(self.parseIndexMeta(path)))
214: else:
215: return 0
216:
217: security.declarePublic('parseIndexMeta')
218: def parseIndexMeta(self,k):
219: """parse indexmeta and return digilib path"""
220: f = os.popen("cat "+k+"/index.meta","r")
221: lines = f.read()
222:
223: try:
224: dom = xml.dom.minidom.parseString(lines)
225: content_type=getText(dom.getElementsByTagName("content-type")[0].childNodes)
226: if (content_type=="scanned-document") or (content_type=="scanned document"):
227: dirs=self.getImageDirs(dom,k)
228:
229: return dirs
230: except:
231: return []
232:
233:
234: class filesystem2(Implicit, Persistent, RoleManager):
235: """store filesystem"""
236: node={}
237: hasindex={}
238: security=ClassSecurityInfo()
239:
240: def getfs(self,start):
241: """load filessystem"""
242: f = os.popen("find "+ start+" -name '*' ","r")
243: lines = f.readlines()
244:
245: return lines
246:
247: def loadfs(self,start):
248: """analyse filesystem"""
249: for line in self.getfs(start):
250:
251: g=re.search(r"(.*/)(.*)\n",line)
252: if not g==None:
253: try:
254: path=g.group(1)
255: file=g.group(2)
256: except:
257: """nothing"""
258: if self.node.has_key(path):
259: elements=self.node[path]
260: elements.append(file)
261: self.node[path]=elements
262: else:
263: self.node[path]=[file]
264: if (file=="index.meta") | (file=="meta"):
265: self.hasindex[path]="1"
266:
267: def __init__(self,start,reload=0):
268: """nothing"""
269:
270: security.declarePublic('getImageDirs')
271: def getImageDirs(self,dom,path):
272: dirs=dom.getElementsByTagName("dir")
273: dirback=[]
274: for dir in dirs:
275: temp=getText(dir.getElementsByTagName("name")[0].childNodes)
276: temp2=re.search(r"(.*)/mpiwg/online/(.*)",path+"/"+temp)
277: if not temp2==None:
278: try:
279: dirback.append(temp2.group(2))
280: except:
281: """nothing"""
282: else:
283: dirback.append(temp)
284: return dirback
285:
286:
287: security.declarePublic('digilib')
288: def digilib(self, path):
289: """check if folder is a container for digilib files"""
290: if os.path.exists(path+"/index.meta"):
291: return(self.parseIndexMeta(path))
292: else:
293: return "NO"
294:
295: security.declarePublic('isdigilib')
296: def isdigilib(self, path):
297: if os.path.exists(path+"/index.meta"):
298: return(len(self.parseIndexMeta(path)))
299: else:
300: return 0
301: security.declarePublic('parseIndexMeta')
302: def parseIndexMeta(self,k):
303: """parse indexmeta and return digilib path"""
304: f = os.popen("cat "+k+"/index.meta","r")
305: lines = f.read()
306:
307: try:
308: dom = xml.dom.minidom.parseString(lines)
309: content_type=getText(dom.getElementsByTagName("content-type")[0].childNodes)
310: if content_type=="scanned-document":
311: dirs=self.getImageDirs(dom,k)
312:
313: return dirs
314: except:
315: return []
316:
317:
318: class browse(Implicit, Persistent, RoleManager):
319:
320: security=ClassSecurityInfo()
321: tree={}
322: toggledict={}
323:
324: def filterExcluded(self,dir):
325: ret=[]
326: for item in dir:
327: if not item in exclusion:
328: ret.append(item)
329: return ret
330:
331: def __init__(self,startpath):
332: self.tree={}
333: self.tree[startpath]=self.filterExcluded(dircache.listdir(startpath))
334:
335: security.declarePublic('getTree')
336: def getTree(self,path):
337: if self.tree.has_key(path):
338: return self.tree[path]
339: else:
340: self.tree[path]=self.filterExcluded(dircache.listdir(path))
341: return self.tree[path]
342:
343: security.declarePublic('isDirectory')
344: def isDirectory(self,path,file):
345: return os.path.isdir(os.path.abspath(path+"/"+file))
346:
347: security.declarePublic('toggle')
348: def toggle(self,tmppath,file):
349: path=tmppath+"/"+file
350:
351: if self.toggledict.has_key(path):
352: if self.toggledict[path]==0:
353: self.toggledict[path]=1
354:
355: else:
356: self.toggledict[path]=0
357:
358: else:
359: self.toggledict[path]=4
360:
361:
362: security.declarePublic('isToggle')
363: def isToggle(self,tmppath,file):
364: path=tmppath+"/"+file
365:
366: if self.toggledict.has_key(path):
367:
368: return self.toggledict[path]
369: else:
370:
371: return 0
372:
373:
374: def getfs(start):
375: """return filesystem"""
376: f = os.popen("find "+ start+" -name '*'","r")
377: lines = f.readlines()
378: return lines
379:
380: def showall(start):
381: lines = getfs(start)
382: for line in lines:
383: print line
384: return 0
385:
386: def entries(start):
387: """retrun list of entries of a filesystem"""
388: i=0
389: fs=[]
390: lines=getfs(start)
391: for line in lines:
392: try:
393: if os.path.exists(os.path.abspath(re.search(r"(.*)\n",line).group(1))):
394: fs.append(fsentry(line))
395: i=i+1
396: except:
397: """nothing"""
398: return fs
399:
400: def getfilesystem(start,reload=0):
401: """load filesystem"""
402:
403: k=filesystem(start,1)
404: return k
405:
406: def getfilesystem2(start,reload=0):
407: """load filesystem"""
408:
409: k=filesystem2(start,1)
410: return k
411:
412: def tree(start):
413: """get the filetree"""
414: k=browse(start)
415: return k
416:
417: def sort_by_date(fs):
418: """sorts lists of fileentries"""
419: ls=[]
420: dict={}
421: for k in fs:
422: ls.append(k.getID())
423: dict[k.getID()]=k
424: ls.sort()
425: ls.reverse()
426: ret=[]
427: for j in ls:
428: ret.append(dict[j])
429: return ret
430:
431: def path_to_link(path):
432: """generates navigation bar for showfiles"""
433: string=""
434:
435: tmppath=os.path.dirname(path)
436: i=0
437: pathes=[[path, os.path.basename(path)]]
438:
439: while not (len(tmppath)==1):
440:
441: i=i+1
442: if i>20: break
443:
444: pathes.append([tmppath, os.path.basename(tmppath)])
445: tmppath=os.path.dirname(tmppath)
446:
447: while i>=0:
448: string=string+"<a href=showfiles?path="+pathes[i][0]+">"+pathes[i][1]+"</a>/"
449:
450: i=i-1
451: return string
452:
453: def path_to_link_view(path):
454: """generates navigation bar for viewfiles"""
455: string=""
456:
457: tmppath=os.path.dirname(path)
458: i=0
459: pathes=[[path, os.path.basename(path)]]
460:
461: while not (len(tmppath)==1):
462:
463: i=i+1
464: if i>20: break
465:
466: pathes.append([tmppath, os.path.basename(tmppath)])
467: tmppath=os.path.dirname(tmppath)
468:
469: while i>=0:
470: string=string+"<a href=viewfiles?path="+pathes[i][0]+">"+pathes[i][1]+"</a>/"
471:
472: i=i-1
473: return string
474:
475: def path_to_link_store(path):
476: """generates navigation bar for viewfiles"""
477: string=""
478:
479: tmppath=os.path.dirname(path)
480: i=0
481: pathes=[[path, os.path.basename(path)]]
482:
483: while not (len(tmppath)==1):
484:
485: i=i+1
486: if i>20: break
487:
488: pathes.append([tmppath, os.path.basename(tmppath)])
489: tmppath=os.path.dirname(tmppath)
490:
491: while i>=0:
492: string=string+"<a href=storefiles?path="+pathes[i][0]+">"+pathes[i][1]+"</a>/"
493:
494: i=i-1
495: return string
496:
497:
498: class Error(Implicit, Persistent, RoleManager):
499:
500: error=[]
501: security=ClassSecurityInfo()
502: def __init__(self,initerror):
503: self.error=initerror[0:]
504:
505: security.declarePublic('getError')
506: def getError(self):
507: return self.error
508:
509: class metacheck(Implicit, Persistent, RoleManager):
510: lines=[]
511: security=ClassSecurityInfo()
512: def parsearchive(self,str):
513: """parse for error"""
514: retstr=''
515:
516: if not len(str)==0:
517: for line in str:
518: retstr=retstr+line+"<br>"
519: check=re.search(r"(.*):(.*)",line)
520: if check.group(1)=='ABORT':
521: error="error"
522: elif check.group(1)=='DONE':
523: error="ok"
524: else:
525: error="running"
526:
527: return [retstr,error]
528: else:
529: return ['','running']
1.5 ! dwinter 530:
1.1 dwinter 531: def __init__(self,path):
532: """archive the documents in path"""
533: self.lines=[]
534:
535: if type(path)==StringType:
1.4 dwinter 536: f = os.popen("/usr/local/mpiwg/archive/metacheck "+path,"r")
1.1 dwinter 537: self.lines.append(Error([path,self.parsearchive(f.readlines())]))
538: else:
539: for singlepath in path:
1.4 dwinter 540: f = os.popen("/usr/local/mpiwg/archive/metacheck "+singlepath,"r")
1.1 dwinter 541: self.lines.append(Error([singlepath,self.parsearchive(f.readlines())]))
542: security.declarePublic('messages')
543:
544: def messages(self):
545: return self.lines
546:
547:
548:
549:
550: class archive(Implicit, Persistent, RoleManager):
551: lines=[]
552: security=ClassSecurityInfo()
553: def parsearchive(self,str):
554: """parse for error"""
555: retstr=''
556:
557: if not len(str)==0:
558: for line in str:
559: retstr=retstr+line+"<br>"
560: check=re.search(r"(.*):(.*)",line)
561: if check.group(1)=='ABORT':
562: error="error"
563: elif check.group(1)=='DONE':
564: error="ok"
565: else:
566: error="running"
567:
568: return [retstr,error]
569: else:
570: return ['','running']
571:
572: def __init__(self,path,session):
573: """archive the documents in path"""
574: self.lines=[]
575: self.filenames={}
576: session['archiver']=self
577:
578:
579: if type(path)==StringType:
580: self.filenames[path]=tempfile.mktemp()
581: f = os.popen("/usr/local/mpiwg/archive/archiver "+path+" > "+self.filenames[path]+" &","r")
582: else:
583: for singlepath in path:
584: self.filenames[singlepath]=tempfile.mktemp()
585: f = os.popen("/usr/local/mpiwg/archive/archiver "+singlepath+" > "+self.filenames[singlepath]+" &","r")
586:
587: security.declarePublic('messages')
588: def messages(self):
589: self.lines=[]
590: for path in self.filenames.keys():
591:
592: self.lines.append(Error([path,self.parsearchive(open(self.filenames[path],"r").readlines())]))
593: return self.lines
594:
595:
596: def evalext(str):
597: return eval(str)
598:
599: def storeerror(ret,path,context,i):
600: session=context.REQUEST.SESSION
601: session['error%i'%i]=ret
602: session['path%i'%i]=path
603:
604: return 'error?number=%i'%i
605:
606: def geterror(str,context):
607: session=context.REQUEST.SESSION
608: return session[str]
609:
610: def readfile(path):
611:
612: ret=""
613: f=open(path,'r')
614: for g in f.readlines():
615: ret=ret+g
616: return ret
617:
618: def writefile(self,path,txt,REQUEST):
619: f=open(path,'w')
620: f.write(txt)
621: f.close()
622: rval=self.aq_acquire('archive2')
623: return rval()
624:
625:
626: def metachecker(self,path):
627: """check the metadata the documents in path"""
628: self.REQUEST.SESSION['path']=self.REQUEST['path']
629: return metacheck(path)
630:
631: def archiver(self,path):
632: """archive the documents in path"""
633: tmp=archive(path,self.REQUEST.SESSION)
634: return self.REQUEST.RESPONSE.redirect('archive4')
635:
636: def getText(nodelist):
637:
638: rc = ""
639: for node in nodelist:
1.2 dwinter 640:
1.1 dwinter 641: if node.nodeType == node.TEXT_NODE:
642: rc = rc + node.data
643: return rc
644:
645: def getBib(nodelist):
646: rc= "<table border='0'>"
647: print "HI"
648: for node in nodelist:
649:
650: if node.nodeType == node.ELEMENT_NODE:
651: """nothing"""
652: rc = rc+"<tr><td valign='right'>"+str(node.nodeName)+":</td><td> "+getText(node.childNodes)+"</td></tr>"
653: #print rc
654: return rc+"</table>"
655:
656: def getMetafile(path):
657: """get index.meta"""
658: html=[]
659: if not os.path.exists(path+"/index.meta"):
660:
661: return "NO_METADATA"
662: else:
663: f = os.popen("cat "+path+"/index.meta","r")
664: lines = f.read()
665: dom = xml.dom.minidom.parseString(lines)
666: name=getText(dom.getElementsByTagName("name")[0].childNodes)
667: creator=getText(dom.getElementsByTagName("creator")[0].childNodes)
668: creation_date=getText(dom.getElementsByTagName("archive-creation-date")[0].childNodes)
669: description=getText(dom.getElementsByTagName("description")[0].childNodes)
670: try:
671: type=getText(dom.getElementsByTagName("content-type")[0].childNodes)
672: except:
673: type=""
674: if type=="scanned document":
675: html="<h3>Document: "+name+"</h3>"
676: elif type=="folder":
677: html="<h3>Folder: "+name+"</h3>"
678: else:
679: html="<h3>Document: "+name+"</h3>"
680:
681: html=html+"<p><i>created by: "+creator+" at: "+creation_date+"</i></p>"
682: html=html+"<h4>Description</h4><p>"+description+"</p>"
683: try:
684: bib = dom.getElementsByTagName("meta")[0].getElementsByTagName("bib")[0]
685: if bib.attributes.has_key('type'):
686: html=html+"<h4>Info ("+bib.attributes['type'].value+")</h4>"
687: else:
688: html=html+"<h4>Info</h4>"
689: html=html+getBib(bib.childNodes)
690: print html
691: except:
692: """none"""
693:
694: # html=html.encode('utf-8','replace')+getBib(bib.childNodes).encode('utf-8','replace')
695:
696: return html
697:
698: def hasMetafile(path):
699: """get index.meta"""
700: return os.path.exists(path+"/index.meta")
701: #return path
702:
703: def isdigilib2(path):
704: """check if folder is candidate for digilib without metadata"""
705: try:
706: dir=os.listdir(path)
707:
708: imagesuffixes=['.gif','.jpg','.jpeg','.png','.tiff','.tif','.JPG','.TIFF','.TIF']
709: ret=""
710: for a in dir:
711:
712: suffix=os.path.splitext(a)
713:
714: if suffix[1] in imagesuffixes:
715: return 1
716:
717: try:
718: dom=xml.dom.minidom.parse(os.path.split(path)[0]+"/index.meta")
719: for node in dom.getElementsByTagName("dir"):
720:
721: if getText(node.getElementsByTagName("content-type")[0].childNodes)=="images":
722:
723: if getText(node.getElementsByTagName("name")[0].childNodes)==os.path.split(path)[1]:
724: return 1
725: return 0
726: except:
727:
728: return 0
729:
730:
731:
732:
733:
734: except:
735: return 0
736:
737: def isFullText(path,folder_name):
738: """check if foldername in path is full text"""
1.2 dwinter 739:
1.1 dwinter 740: try:
741: dom=xml.dom.minidom.parse(path+"/index.meta")
1.2 dwinter 742: except:
743: """ nothing"""
744: return 0
745: for node in dom.getElementsByTagName("dir"):
746:
747: try:
748: child=getText(node.getElementsByTagName("content-type")[0].childNodes)
749:
750: if child =="fulltext":
751:
1.1 dwinter 752: if getText(node.getElementsByTagName("name")[0].childNodes)==folder_name:
753: return 1
1.2 dwinter 754: except:
755: """nothing"""
756: #print "erro",node
757: #print sys.exc_info()
758: #return 0
1.1 dwinter 759:
1.2 dwinter 760: return 0
761:
1.1 dwinter 762:
763:
764: def isPresentation(path,folder_name):
765: """check if foldername in path is full text"""
766: try:
767: dom=xml.dom.minidom.parse(path+"/index.meta")
768: #print dom.toxml()
769: for dirnode in dom.getElementsByTagName("dir"):
770: try:
771:
772: if getText(dirnode.getElementsByTagName('content-type')[0].childNodes)=='presentation':
773: if getText(dirnode.getElementsByTagName("name")[0].childNodes)==folder_name:
774: return 1
775: except:
776: """nothing"""
777: return 0
778: except:
779:
780: return 0
781:
782:
783:
784:
785:
786: def changeName(path):
787: try:
788: temp2=re.search(r"(.*)/mpiwg/online/(.*)",path)
789: if temp2==None:
790: return "digifiles/"+re.search(r"(.*)/mpiwg/production/docuserver/(.*)",path).group(2)
791: else:
792: return temp2.group(2)
793: except: # hack - im archivbereich keine online darstellung gibt jetzt ein no zurück.
794: return "NO"
795:
796:
797: def test(self):
798: self.i=1
799: #newtemplate=PageTemplateFile('/usr/local/mpiwg/Zope/Extensions/test').__of__(self)
800: self.manage_addProduct['OFSP'].manage_addDTMLMethod('neu','neu')
801: self.getattr('neu').manage_edit('HELLO','neu')
802: return "ok"
803:
804:
805: class ls(Implicit, Persistent, RoleManager):
806: """File entry class"""
807: path = ""
808: user = ""
809: month = ""
810: date =""
811: time = ""
812:
813: security=ClassSecurityInfo()
814:
815: def __init__(self,start):
816: self.outfile=tempfile.mktemp()
817: start['outfile']=self
818: os.popen("ls -R / >"+self.outfile+" &","r")
819:
820:
821: security.declarePublic('read')
822: def read(self):
823: return self.f.read()
824: security.declarePublic('retself')
825: def retself(self):
826: return self
827: security.declarePublic('all')
828: def all(self):
829: ret=""
830: for g in self.f:
831: ret=ret+g
832: return ret
833:
834: security.declarePublic('printOutfile')
835: def printOutfile(self):
836: while not os.path.exists(self.outfile):
837: """nothing"""
838: return open(self.outfile).readlines()
839:
840: class overview(Implicit,Persistent, RoleManager):
841: dir=[]
842: resources={}
843: security=ClassSecurityInfo()
844:
845: def __init__(self,path):
846: dir=os.listdir(path)
847:
848: for file in dir:
849: self.resources[self.getResource(path,file)]=path+"/"+file
850:
851:
852: def getResource(self,path,filename):
853: f=file(path+"/"+filename,'r')
854:
855: for line in f.readlines():
856:
857: if line[0:4]=="INFO":
858: if line[6:14]=="resource":
859: return line
860: return "error"
861:
862: def parsearchive(self,str):
863: """parse for error"""
864: retstr=''
865:
866: if not len(str)==0:
867: for line in str:
868: retstr=retstr+line+"<br>"
869: check=re.search(r"(.*):(.*)",line)
870: if check.group(1)=='ABORT':
871: error="error"
872: elif check.group(1)=='DONE':
873: error="ok"
874: else:
875: error="running"
876:
877: return [retstr,error]
878: else:
879: return ['','running']
880:
881: security.declarePublic('messages')
882: def messages(self):
883: self.lines=[]
884: for name in self.resources.keys():
885: path=self.resources[name]
886:
887: self.lines.append(Error([name,self.parsearchive(open(path,"r").readlines())]))
888: return self.lines
889:
890: security.declarePublic('printResource')
891: def printResource(self):
892: return self.resources
893:
894: def getoverview(path):
895:
896: return overview(path)
897:
898:
899: def ls_test(self):
900: tmp=ls(self.REQUEST.SESSION)
901: return self.REQUEST.RESPONSE.redirect('next')
902:
903: def storeFile(self,something):
904: self.REQUEST.SESSION['something']=something
905: return 1
906:
907: def getFile(self):
908: return self.REQUEST.SESSION['something']
909:
910: def isFolder(self,path):
911: """returns TRUE, wenn path ein Folder ist in den weitere Objekte Folder oder Dokumente gelegt werden dürfen"""
912: return not isScannedDocument(self,path) # vorläufig sind alle Documente die keine scanned documente sind folder.
913:
914: def isScannedDocument(self,path):
915: """returns TRUE, wenn path der Stammordner eines gescannten Documents ist"""
916: try:
917: f = file(path+"/index.meta","r")
918: lines = f.read()
919:
920: try:
921: dom = xml.dom.minidom.parseString(lines)
922: content_type=getText(dom.getElementsByTagName("content-type")[0].childNodes)
923: if (content_type=="scanned-document") or (content_type=="scanned document"):
924: return 1
925: else:
926: return 0
927: except:
928: return 0
929: except:
930: return 0
931:
932: from time import localtime,strftime
933:
934: def date(self):
935: return strftime("%d.%m.%Y",localtime())
936:
937:
938:
939: def EditIndex(self,path):
940: try:
941: dom=xml.dom.minidom.parse(path+"/index.meta")
942: indexmeta=dom.toxml()
943: except:
944: indexmeta=""
945: self.REQUEST.SESSION['indexmeta']=indexmeta
946: self.REQUEST.SESSION['path']=path
947: newtemplate=PageTemplateFile('/usr/local/mpiwg/Zope/Extensions/editindex').__of__(self)
948: return newtemplate()
949:
950: def EditIndex2(self):
951: if not self.REQUEST.has_key('fileupload'):
952: #newtext=urllib.unquote(self.REQUEST['indexmeta'])
953: newtext=self.REQUEST['indexmeta']
954: print newtext
955: else:
956: self.file_name=self.REQUEST['fileupload'].filename
957: #newtext=self.REQUEST.form['fileupload'].read()
958: # HACK DW
959: newtext=self.REQUEST['indexmeta']
960:
961: indexmeta=file(self.REQUEST.SESSION['path']+"/index.meta","w")
962: indexmeta.writelines(newtext)
963: return self.REQUEST.response.redirect("storage/storefiles?path="+self.REQUEST.SESSION['path'])