1: #Neue Version Begin 5.4.2004
2:
3:
4: """Methoden zum hinzufügen von Dokumenten ins Archiv"""
5: from OSAS_helpers import readArchimedesXML
6: try:
7: import archive
8: except:
9: print "archive not imported"
10:
11: import os
12:
13: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
14: from Products.PageTemplates.PageTemplate import PageTemplate
15: import string
16: import urllib
17: import xml.dom.minidom
18: from time import localtime,strftime
19: from Globals import package_home
20:
21: import re
22: def showHelp(helptext):
23: """show helptext"""
24: return """<html>
25: <body>
26: %
27: </body>
28: </html>"""%helptext
29: def add(self, no_upload=0):
30: """ Add metadata or metadata and documents to the repository
31: no_upload=0 kein upload sonst upload von documententen"""
32:
33: #self.referencetypes=self.ZopeFind(self,obj_metatypes=['OSAS_MetadataMapping'])
34: self.referencetypes=self.ZopeFind(self)
35:
36: newtemplate=PageTemplateFile('Products/OSA_system/zpt/OSAS_add_new').__of__(self)
37: self.REQUEST.SESSION['path']=self.REQUEST['path']
38: if no_upload==0:
39: self.REQUEST.SESSION['no_upload']='yes'
40: else:
41: if self.REQUEST.SESSION.has_key('no_upload'):
42: del self.REQUEST.SESSION['no_upload']
43:
44: return newtemplate()
45:
46:
47:
48: def getISO():
49: """ISO"""
50: try:
51: f=file(os.path.join(package_home(globals()),'iso639-1.inc'),'r').readlines()
52:
53: ret={}
54: for lineraw in f:
55: line=lineraw.encode('ascii','replace')
56: value=string.split(line,'\t')[0].encode('ascii','replace')
57: key=string.split(line,'\t')[1].encode('ascii','replace')
58: ret[key]=value
59: except:
60: ret={}
61: return ret
62:
63:
64: def add2(self):
65: self.reftype=self.REQUEST['Reference Type']
66: self.REQUEST.SESSION['reftype']=self.reftype
67: self.bibdata={}
68: for referenceType in self.referencetypes:
69: #print referenceType
70: if referenceType[1].title == self.reftype:
71: self.bibdata[referenceType[1].title]=referenceType[1].fields
72: self.bibdata['data']=referenceType[1]
73: self.fields=self.bibdata[self.reftype]
74:
75: self.isolist=getISO()
76: tmp=getISO().keys()
77: tmp.sort()
78: self.isokeys=tmp
79: #listed=[ x for x in self.isolist.keys()]
80: #print listed
81: #sorted=listed.sort()
82: #print sorted
83:
84: newtemplate=PageTemplateFile('Products/OSA_system/zpt/OSAS_add_bibdata').__of__(self)
85: return newtemplate()
86: #return self.fields
87:
88:
89: def parse_query_string(str):
90: queries={}
91: key=""
92: value=""
93: tmp=""
94: toggle="key"
95: str=urllib.unquote(str)
96: for i in str:
97: if i=="=":
98: key=tmp
99: toggle="value"
100: tmp=""
101: elif i=="&":
102: queries[key]=tmp
103: tmp=""
104: toggle="key"
105: else:
106: if toggle=="key":
107: if i=="+" : i="-"
108: else:
109: if i=="+" : i=" "
110: tmp=tmp+i
111: queries[key]=tmp
112: return queries
113:
114: def add3(self):
115: """Foldername"""
116: metadata=parse_query_string(self.REQUEST['QUERY_STRING'])
117: self.REQUEST.SESSION['metadata']=metadata
118: vorschlag=[]
119: if metadata.has_key('author'):
120: vorschlag.append(metadata['author'][:5])
121: if metadata.has_key('title'):
122: vorschlag.append(metadata['title'][:5])
123: if metadata.has_key('year'):
124: vorschlag.append(metadata['year'])
125:
126:
127: vorschlag_naming=string.join(vorschlag,"_")
128:
129:
130: self.vorschlag_naming=unicode(vorschlag_naming,'ascii','ignore')
131: if self.REQUEST.SESSION.has_key('no_upload'):
132: self.REQUEST.SESSION['folder_name']=self.REQUEST.SESSION['path']
133: return add5(self)
134: else:
135: newtemplate=PageTemplateFile('Products/OSA_system/zpt/OSAS_add_naming').__of__(self)
136: return newtemplate()
137:
138:
139: def add4(self):
140: self.path=re.search(r"/mpiwg(.*)",self.REQUEST.SESSION['path']).group(1)
141:
142: self.folder_name=self.REQUEST['folder_name']
143: # next has to be changed -> error if back button is used!!
144: self.REQUEST.SESSION['folder_name']=self.folder_name
145: if self.REQUEST['submit']=="upload images":
146: self.REQUEST.SESSION['path']=os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name'])
147:
148:
149: try:
150: os.mkdir(self.REQUEST.SESSION['path'])
151: os.chmod(self.REQUEST.SESSION['path'],0774)
152: except:
153: """nothing"""
154:
155: self.image_folder_name="pageimg"
156: newtemplate=PageTemplateFile('Products/OSA_system/zpt/OSAS_upload').__of__(self)
157: return newtemplate()
158: else:
159: os.mkdir(os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name']))
160: return addText(self,os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name']))
161:
162:
163: def add5(self):
164: """ADD INDEX.META"""
165: newtemplate=PageTemplateFile('Products/OSA_system/zpt/OSAS_add_metadata').__of__(self)
166: return newtemplate()
167:
168: def add6(self):
169: metadata=parse_query_string(self.REQUEST['QUERY_STRING'])
170: metadata['archive-path']=os.path.split(self.REQUEST.SESSION['path'])[0]
171: #metadata['folder_name']=self.REQUEST.SESSION['folder_name']
172: metadata['folder_name']=os.path.split(self.REQUEST.SESSION['path'])[1]
173: metadata['content-type']="scanned document"
174: self.reftype=self.REQUEST.SESSION['reftype']
175: self.REQUEST.SESSION['add_metadata']=metadata
176: self.add_metadata=metadata
177: self.metadata=self.REQUEST.SESSION['metadata']
178: self.metadataprint=""
179: for tag in self.metadata.keys():
180: if tag!="":
181: self.metadataprint=self.metadataprint+"<"+tag+">"+self.metadata[tag]+"</"+tag+">\n"
182:
183: newtemplate=PageTemplateFile('Products/OSA_system/zpt/index_meta').__of__(self)
184: newtemplate.content_type="text/plain"
185: renderxml = newtemplate()
186: if self.REQUEST.SESSION.has_key('no_upload'):
187: metapath=self.REQUEST.SESSION['path']+"/index.meta"
188: else:
189: metapath=self.add_metadata['archive-path']+"/"+self.add_metadata['folder_name']+"/index.meta"
190:
191: f=open(metapath,'w')
192: f.writelines(renderxml)
193: f.close()
194: os.chmod(metapath,0664)
195: os.popen('chmod -R 0775 %s'%self.add_metadata['archive-path']+"/"+self.add_metadata['folder_name'])
196: if self.REQUEST.SESSION.has_key('no_upload'):
197:
198: #newtemplate2=PageTemplateFile('/usr/local/mpiwg/Zope/Extensions/done',"text/html").__of__(self)
199: return self.REQUEST.response.redirect(self.REQUEST['URL2']+"?path="+self.REQUEST.SESSION['path'])
200: else:
201: #print self.add_metadata['archive-path']
202: self.viewpath=re.search(r"/mpiwg/online/(.*)",self.add_metadata['archive-path']).group(1)
203:
204: if (self.REQUEST.SESSION.has_key('no_upload')) and (self.REQUEST.SESSION['no_upload']=="text"):
205: """text upload"""
206: return 1
207: else:
208: newtemplate2=PageTemplateFile('Products/OSA_system/zpt/OSAS_saved').__of__(self)
209: newtemplate2.content_type="text/html"
210: self.REQUEST.response.setHeader('Content-Type','text/html')
211: return newtemplate2()
212:
213:
214:
215: def date(self):
216: return strftime("%d.%m.%Y",localtime())
217:
218:
219: def addPresentation(self,path):
220: """add presentation to the path"""
221:
222: dom=xml.dom.minidom.parse(path+"/index.meta")
223:
224:
225: try:
226: author=archive.getText(dom.getElementsByTagName('author')[0].childNodes)
227: except:
228: try:
229: author=archive.getText(dom.getElementsByTagName('Author')[0].childNodes)
230: except:
231: try:
232: author=archive.getText(dom.getElementsByTagName('Editor')[0].childNodes)
233: except:
234: author=""
235: try:
236: title=archive.getText(dom.getElementsByTagName('title')[0].childNodes)
237: except:
238: title=""
239:
240: try:
241: date=archive.getText(dom.getElementsByTagName('year')[0].childNodes)
242: except:
243: try:
244: date=archive.getText(dom.getElementsByTagName('Year')[0].childNodes)
245: except:
246: try:
247: date=archive.getText(dom.getElementsByTagName('date')[0].childNodes)
248: except:
249: date=""
250: i=1
251: while os.path.exists("%02d-presentation"%i):
252: i+=1
253: self.REQUEST.SESSION['presentationname']="%02d-presentation"%i
254: self.REQUEST.SESSION['path']=path
255: self.REQUEST.SESSION['xmlvorschlag']="""<info>
256: <author>%s</author>
257: <title>%s</title>
258: <date>%s</date>
259: <display>yes</display>
260: </info>"""%(author,title,date)
261:
262: newtemplate=PageTemplateFile('Products/OSA_system/zpt/addPresentation').__of__(self)
263: return newtemplate()
264:
265: def addPresentation2(self):
266: """add presentation """
267: folder_name=self.REQUEST['folder_name']
268: #print self.REQUEST['folder_name']
269: content_description=self.REQUEST['content_description']
270:
271: path=self.REQUEST.SESSION['path']
272:
273: if not self.REQUEST.has_key('fileupload'):
274: xmlinfo=self.REQUEST['xmltext']
275: file_name="info.xml"
276:
277: else:
278: file_name=self.REQUEST['fileupload'].filename
279: xmlinfo=self.REQUEST.form['fileupload'].read()
280: # hack Multipart auswertung funktioniert nicht ausser bei mozilla
281: file_name="info.xml"
282: xmlinfo=self.REQUEST['xmltext']
283: try:
284: os.mkdir(path+"/"+folder_name)
285: except:
286: """nothing"""
287: #print "NAME:",file_name
288: f=open(path+"/"+folder_name+"/"+file_name,"w")
289: f.write(xmlinfo)
290: f.close()
291: try:
292: os.chmod(path+"/"+folder_name,0755)
293: except:
294: """NO"""
295:
296: os.chmod(path+"/"+folder_name+"/"+file_name,0644)
297: addDirsToIndexMeta(path,folder_name,content_description,'presentation')
298:
299: return self.REQUEST.RESPONSE.redirect(self.REQUEST['URL2']+'?path='+path)
300:
301: def addText(self,path,folder=None):
302: """add fulltext to the path"""
303: self.REQUEST.SESSION['existing_names']=['pageimg'] # to be done generate list of existing text files
304: self.REQUEST.SESSION['pathnew']=path
305: newtemplate=PageTemplateFile('Products/OSA_system/zpt/addText').__of__(self)
306: return newtemplate()
307:
308: def addText2(self):
309: """addtext"""
310: folder_name=self.REQUEST['folder_name']
311: #print self.REQUEST['folder_name']
312: content_description=self.REQUEST['content_description']
313: path=self.REQUEST.SESSION['pathnew']
314: file_name=self.REQUEST['fileupload'].filename
315: filedata=self.REQUEST.form['fileupload'].read()
316: os.mkdir(path+"/"+folder_name)
317: f=open(path+"/"+folder_name+"/"+file_name,"w")
318: f.write(filedata)
319: f.close()
320: os.chmod(path+"/"+folder_name,0755)
321: os.chmod(path+"/"+folder_name+"/"+file_name,0644)
322: addDirsToIndexMeta(path,folder_name,content_description,'fulltext')
323:
324: return self.REQUEST.RESPONSE.redirect(self.REQUEST['URL2']+'?path='+path)
325:
326: def addTextExternal(self,path,texturl,version):
327: """hinzufügen eines externen textes"""
328: try: #neue text version einlesen
329: texttemp=urllib.urlopen(texturl).readlines()
330: text=""
331: for line in texttemp:
332: text=text+line
333: except: #fehler beim lesen des textes
334: return "ERROR: cannot read: %s"%texturl
335: if TextExternalError(text): #kein xml header
336: return "ERROR: cannot read: %s"%texturl, "received:",text
337: textpath=getNewTextPath(path) #erzeuge neuen Ornder für den Text
338: splitted=string.split(texturl,"/")
339: name=splitted[len(splitted)-1] #Name des XML-files
340: try:
341: writefile=file(path+"/"+textpath+"/"+name,"w")
342: except:
343: return"ERROR: cannot write: %s"%path+"/"+textpath+"/"+name
344: writefile.write(text)
345: writefile.close()
346: os.chmod(path+"/"+textpath+"/"+name,0644)
347:
348: #add new file to XML
349: dom=xml.dom.minidom.parse(path+"/index.meta")
350: node=dom.getElementsByTagName('resource')[0] #getNode
351:
352: subnode=dom.createElement('dir')
353:
354: namenode=dom.createElement('name')
355: namenodetext=dom.createTextNode(textpath)
356: namenode.appendChild(namenodetext)
357: subnode.appendChild(namenode)
358:
359: descriptionnode=dom.createElement('description')
360: descriptionnodetext=dom.createTextNode('archimedes text:'+version)
361: descriptionnode.appendChild(descriptionnodetext)
362: subnode.appendChild(descriptionnode)
363:
364: contentnode=dom.createElement('content-type')
365: contentnodetext=dom.createTextNode('fulltext')
366: contentnode.appendChild(contentnodetext)
367: subnode.appendChild(contentnode)
368:
369: node.appendChild(subnode)
370:
371: writefile=file(path+"/index.meta","w")
372: writefile.write(dom.toxml().encode('utf-8'))
373: writefile.close()
374:
375: #change texttool tag
376: dom=xml.dom.minidom.parse(path+"/index.meta")
377: node=dom.getElementsByTagName('meta')[0] #getNode
378:
379: try: #texttool existiert schon
380: subnode=node.getElementsByTagName('texttool')[0]
381: except: #wenn nicht Fehler ausgeben
382: return "ERROR:no presentation configured yet, user Web Front End to do so!"
383:
384:
385: try:
386: texttoolnodelist=subnode.getElementsByTagName('text')
387:
388: if not len(texttoolnodelist)==0: #texttool tag existiert schon, dann löschen
389: subsubnode=subnode.removeChild(texttoolnodelist[0])
390: subsubnode.unlink()
391: except:
392: """nothing"""
393: # text neu anlegen
394: textfoldernode=dom.createElement('text')
395: textfoldernodetext=dom.createTextNode(textpath+"/"+name)
396: textfoldernode.appendChild(textfoldernodetext)
397: subnode.appendChild(textfoldernode)
398:
399: #index.meta ausgeben
400: writefile=file(path+"/index.meta","w")
401: writefile.write(dom.toxml().encode('utf-8'))
402: writefile.close()
403:
404: #registrieren
405: return urllib.urlopen("http://nausikaa2.rz-berlin.mpg.de:86/cgi-bin/toc/admin/reg.cgi?path=%s"%path).readlines()
406:
407:
408:
409: def TextExternalError(text):
410: firsts=text[0:10]
411: #print firsts
412: try:
413: match=re.search(r".*<?xml.*",firsts)
414: except:
415: return 1
416: return 0
417:
418: def getNewTextPath(path):
419: i=1
420: while os.path.exists(path+"/fulltext%i"%i):
421: i+=1
422: os.mkdir(path+"/fulltext%i"%i)
423: os.chmod(path+"/fulltext%i"%i,0755)
424: return "fulltext%i"%i
425:
426: def addImages(self,path):
427: """Imagesfolder to the path"""
428: self.REQUEST.SESSION['existing_names']=['pageimg'] # to be done generate list of existing pageimages files
429: self.REQUEST.SESSION['path']=path
430: newtemplate=PageTemplateFile('Products/OSA_system/zpt/OSAS_addImages').__of__(self)
431: return newtemplate()
432:
433: def addImages2(self):
434:
435: self.image_folder_name=self.REQUEST['folder_name']
436: #print self.REQUEST['folder_name']
437: self.content_description=self.REQUEST['content_description']
438: #self.path=self.REQUEST.SESSION['path']
439:
440:
441: self.content_type='images'
442: addDirsToIndexMeta(self.REQUEST.SESSION['path'],self.image_folder_name,self.content_description,self.content_type)
443: self.REQUEST.SESSION['path']=re.search(r"/mpiwg(.*)",self.REQUEST.SESSION['path']).group(1)
444: newtemplate=PageTemplateFile('Products/OSA_system/zpt/OSAS_upload2').__of__(self)
445: return newtemplate()
446:
447:
448:
449: def addDirsToIndexMeta(path,folder_name,content_description,content_type):
450: #f=file(path+"/index.meta",r)
451: dom=xml.dom.minidom.parse(path+"/index.meta")
452: node=dom.getElementsByTagName('resource')[0] #getNode
453:
454: subnode=dom.createElement('dir')
455:
456: namenode=dom.createElement('name')
457: namenodetext=dom.createTextNode(folder_name)
458: namenode.appendChild(namenodetext)
459: subnode.appendChild(namenode)
460:
461: descriptionnode=dom.createElement('description')
462: descriptionnodetext=dom.createTextNode(content_description)
463: descriptionnode.appendChild(descriptionnodetext)
464: subnode.appendChild(descriptionnode)
465:
466: contentnode=dom.createElement('content-type')
467: contentnodetext=dom.createTextNode(content_type)
468: contentnode.appendChild(contentnodetext)
469: subnode.appendChild(contentnode)
470:
471: node.appendChild(subnode)
472:
473: writefile=file(path+"/index.meta","w")
474: writefile.write(dom.toxml().encode('utf-8'))
475: writefile.close()
476:
477: def readArchimedesXML(folder):
478: """gib URL aus """
479: XML=urllib.urlopen("http://archimedes.mpiwg-berlin.mpg.de/cgi-bin/toc/toc.cgi?step=xmlcorpusmanifest").read()
480: #print XML
481: dom=xml.dom.minidom.parseString(XML)
482: items=dom.getElementsByTagName('item')
483: dict={}
484:
485: for item in items:
486: #print item.attributes['dir'].value
487: try:
488: dict[item.attributes['dir'].value]=item.attributes['xml'].value
489: #print item.attributes['dir'].value,item.attributes['text'].value
490: except:
491: """nothing"""
492:
493: if dict.has_key(folder):
494: return dict[folder]
495: else:
496: return ""
497:
498:
499:
500:
501: def combineTextImage2(self,path):
502: """erstellt bzw. ändert texttool meta tag"""
503: dom=xml.dom.minidom.parse(path+"/index.meta")
504: node=dom.getElementsByTagName('meta')[0] #getNode
505:
506:
507: subnodelist=node.getElementsByTagName('texttool')
508: if not len(subnodelist)==0: #texttool tag existiert schon, dann löschen
509: subnode=node.removeChild(subnodelist[0])
510: subnode.unlink()
511:
512: subnode=dom.createElement('texttool') #neu erzeugen
513:
514:
515: presentfile=os.listdir(path+"/"+self.REQUEST['presentation'])[0]
516:
517:
518: displaynode=dom.createElement('display')
519: displaynodetext=dom.createTextNode('yes')
520: displaynode.appendChild(displaynodetext)
521: subnode.appendChild(displaynode)
522:
523: if self.REQUEST.has_key('image'):
524: namenode=dom.createElement('image')
525: namenodetext=dom.createTextNode(self.REQUEST['image'])
526: namenode.appendChild(namenodetext)
527: subnode.appendChild(namenode)
528:
529: if self.REQUEST.has_key('text'):
530: textfile=os.listdir(path+"/"+self.REQUEST['text'])[0]
531: textfoldernode=dom.createElement('text')
532: textfoldernodetext=dom.createTextNode(path+"/"+self.REQUEST['text']+"/"+textfile)
533: textfoldernode.appendChild(textfoldernodetext)
534: subnode.appendChild(textfoldernode)
535:
536: if self.REQUEST.has_key('external'):#USE CVS instead of local text
537: textfoldernode=dom.createElement('text')
538: textfoldernodetext=dom.createTextNode(self.REQUEST.SESSION['externxml'])
539: textfoldernode.appendChild(textfoldernodetext)
540: subnode.appendChild(textfoldernode)
541:
542: if self.REQUEST.has_key('pagebreak'):
543: pagebreaknode=dom.createElement('pagebreak')
544: pagebreaknodetext=dom.createTextNode(self.REQUEST['pagebreak'])
545: pagebreaknode.appendChild(pagebreaknodetext)
546: subnode.appendChild(pagebreaknode)
547:
548: if self.REQUEST.has_key('presentation'):
549: presentationnode=dom.createElement('presentation')
550: presentationnodetext=dom.createTextNode(self.REQUEST['presentation']+"/"+presentfile)
551: presentationnode.appendChild(presentationnodetext)
552: subnode.appendChild(presentationnode)
553:
554:
555: if self.REQUEST.has_key('xslt'):
556: if not self.REQUEST['xslt']=="":
557: xsltnode=dom.createElement('xslt')
558: xsltnodetext=dom.createTextNode(self.REQUEST['xslt'])
559: xsltnode.appendChild(xsltnodetext)
560: subnode.appendChild(xsltnode)
561:
562:
563: if self.REQUEST.has_key('thumbtemplate'):
564: if not self.REQUEST['thumbtemplate']=="":
565: xsltnode=dom.createElement('thumbtemplate')
566: xsltnodetext=dom.createTextNode(self.REQUEST['thumbtemplate'])
567: xsltnode.appendChild(xsltnodetext)
568: subnode.appendChild(xsltnode)
569:
570: if self.REQUEST.has_key('topbar'):
571: if not self.REQUEST['topbar']=="":
572: xsltnode=dom.createElement('toptemplate')
573: xsltnodetext=dom.createTextNode(self.REQUEST['topbar'])
574: xsltnode.appendChild(xsltnodetext)
575: subnode.appendChild(xsltnode)
576:
577: if self.REQUEST.has_key('startpage'):
578: if not self.REQUEST['startpage']=="":
579: xsltnode=dom.createElement('startpage')
580: xsltnodetext=dom.createTextNode(self.REQUEST['startpage'])
581: xsltnode.appendChild(xsltnodetext)
582: subnode.appendChild(xsltnode)
583:
584: if self.REQUEST.has_key('project'):
585: if not self.REQUEST['project']=="":
586: xsltnode=dom.createElement('project')
587: xsltnodetext=dom.createTextNode(self.REQUEST['project'])
588: xsltnode.appendChild(xsltnodetext)
589: subnode.appendChild(xsltnode)
590:
591: node.appendChild(subnode)
592:
593: try:
594: node2=node.getElementsByTagName('bib')[0]
595: subs=node2.getElementsByTagName('lang')
596: for sub in subs:
597: node2.removeChild(sub)
598: except:
599: """nothing"""
600: try:
601: main=dom.getElementsByTagName('bib')[0]
602: node=dom.createElement('lang')
603: textnode=dom.createTextNode(self.REQUEST['lang'])
604: node.appendChild(textnode)
605: main.appendChild(node)
606: except:
607: try:
608: subs=dom.getElementsByTagName('lang')
609: main=dom.getElementsByTagName('resource')[0]
610: for sub in subs:
611: main.removeChild(sub)
612: except:
613: """nothing"""
614:
615: try:
616: main=dom.getElementsByTagName('resource')[0]
617: node=dom.createElement('lang')
618: textnode=dom.createTextNode(self.REQUEST['lang'])
619: #print "LANG:",self.REQUEST['lang']
620: node.appendChild(textnode)
621: main.appendChild(node)
622: except:
623: """nothing"""
624:
625: writefile=file(path+"/index.meta","w")
626: writefile.write(dom.toxml().encode('utf-8'))
627: writefile.close()
628:
629:
630:
631: urllib.urlopen("http://nausikaa2.rz-berlin.mpg.de:86/cgi-bin/toc/admin/reg.cgi?path=%s"%path).readlines()
632:
633: if self.REQUEST.has_key('image'): # falls bilder
634: os.popen("ssh archive@nausikaa2.rz-berlin.mpg.de /usr/local/mpiwg/scripts/scaleomat.pl %s /mpiwg/temp/online/scaled/thumb 90 >> /tmp/sc.out &"% re.sub('/mpiwg/online/','',self.REQUEST['path']+"/"+self.REQUEST['image']))
635:
636: else: # falls keine Bilder (bug in reg.cgi info file ersetzen)
637: f=file("/tmp/tmp_info.xml","w")
638: f.write(patchedInfoXML(self.REQUEST['path']))
639: f.close()
640: splitted=path.split("/")
641: fn=splitted[len(splitted)-1]
642: remotePath="archive@nausikaa2.rz-berlin.mpg.de:/usr/local/share/archimedes/web/docs/proj/echo/1/docs/"+fn+"/info.xml"
643: os.popen("scp /tmp/tmp_info.xml %s"%remotePath)
644:
645: def patchedInfoXML(path):
646: dom=xml.dom.minidom.parse(path+"/index.meta")
647:
648: ret="<info>\n"
649: ret+="<remotetext>%s</remotetext>\n"%archive.getText(dom.getElementsByTagName('text')[0].childNodes)
650: ret+="<pagebreak>%s</pagebreak>\n"%archive.getText(dom.getElementsByTagName('pagebreak')[0].childNodes)
651: ret+="<display>%s</display>\n"%archive.getText(dom.getElementsByTagName('display')[0].childNodes)
652: try:
653: ret+="<toptemplate>%s</toptemplate>\n"%archive.getText(dom.getElementsByTagName('toptemplate')[0].childNodes)
654: except:
655: """not"""
656: try:
657: ret+="<thumbtemplate>%s</thumbtemplate>\n"%archive.getText(dom.getElementsByTagName('thumbtemplate')[0].childNodes)
658: except:
659: """not"""
660: try:
661: ret+="<startpage>%s</startpage>\n"%archive.getText(dom.getElementsByTagName('startpage')[0].childNodes)
662: except:
663: """not"""
664:
665: ret+="<lang>%s</lang>\n"%archive.getText(dom.getElementsByTagName('lang')[0].childNodes)
666: try:
667: ret+="<author>%s</author>\n"%archive.getText(dom.getElementsByTagName('author')[0].childNodes)
668: except:
669: """not"""
670: try:
671: ret+="<title>%s</title>\n"%archive.getText(dom.getElementsByTagName('title')[0].childNodes)
672: except:
673: """not"""
674:
675: ret+="</info>"
676:
677: return ret
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>