Annotation of OSAS/OSA_system/OSAS_add.py, revision 1.53
1.18 dwinter 1: #Neue Version Begin 5.4.2004
1.1 dwinter 2:
3:
1.53 ! casties 4: """Methoden zum hinzufuegen von Dokumenten ins Archiv"""
1.13 dwinter 5: from OSAS_helpers import readArchimedesXML
1.17 dwinter 6: try:
1.46 dwinter 7: import archive
1.17 dwinter 8: except:
1.46 dwinter 9: print "archive not imported"
10:
1.1 dwinter 11: import os
1.45 dwinter 12: import os.path
1.1 dwinter 13: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
14: from Products.PageTemplates.PageTemplate import PageTemplate
15: import string
16: import urllib
1.50 dwinter 17: import logging
18:
19: #ersetzt logging
20: def logger(txt,method,txt2):
21: """logging"""
22: logging.info(txt+ txt2)
23:
24:
1.1 dwinter 25: import xml.dom.minidom
1.12 dwinter 26: from time import localtime,strftime
1.18 dwinter 27: from Globals import package_home
1.44 dwinter 28: from types import *
1.1 dwinter 29:
30: import re
1.20 dwinter 31: def showHelp(helptext):
1.46 dwinter 32: """show helptext"""
33: return """<html>
34: <body>
35: %
36: </body>
37: </html>"""%helptext
1.1 dwinter 38: def add(self, no_upload=0):
39: """ Add metadata or metadata and documents to the repository
40: no_upload=0 kein upload sonst upload von documententen"""
41:
1.18 dwinter 42: #self.referencetypes=self.ZopeFind(self,obj_metatypes=['OSAS_MetadataMapping'])
1.46 dwinter 43: self.referencetypes=self.ZopeFind(self)
44:
45: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_add_new')).__of__(self)
1.1 dwinter 46: self.REQUEST.SESSION['path']=self.REQUEST['path']
1.46 dwinter 47: if no_upload==0:
48: self.REQUEST.SESSION['no_upload']='yes'
49: else:
50: if self.REQUEST.SESSION.has_key('no_upload'):
51: del self.REQUEST.SESSION['no_upload']
52:
53: return newtemplate()
1.1 dwinter 54:
55:
56:
57: def getISO():
1.46 dwinter 58: """ISO"""
59: try:
60: f=file(os.path.join(package_home(globals()),'iso639-1.inc'),'r').readlines()
61:
62: ret={}
63: for lineraw in f:
64: line=lineraw.encode('ascii','replace').strip()
65: value=string.split(line,'\t')[0].encode('ascii','replace')
66: key=string.split(line,'\t')[1].encode('ascii','replace')
67: ret[key]=value
68: except:
69: ret={}
70: return ret
1.1 dwinter 71:
1.20 dwinter 72:
1.1 dwinter 73: def add2(self):
1.46 dwinter 74: self.reftype=self.REQUEST['Reference Type']
75: self.REQUEST.SESSION['reftype']=self.reftype
76: self.bibdata={}
77: for referenceType in self.referencetypes:
78: #print referenceType
79: if referenceType[1].title == self.reftype:
80: self.bibdata[referenceType[1].title]=referenceType[1].fields
81: self.bibdata['data']=referenceType[1]
82: self.fields=self.bibdata[self.reftype]
83:
84: self.isolist=getISO()
85: tmp=getISO().keys()
86: tmp.sort()
87: self.isokeys=tmp
1.1 dwinter 88: #listed=[ x for x in self.isolist.keys()]
1.46 dwinter 89: #print listed
1.1 dwinter 90: #sorted=listed.sort()
1.46 dwinter 91: #print sorted
92:
93: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_add_bibdata')).__of__(self)
94: return newtemplate()
95: #return self.fields
1.1 dwinter 96:
97:
98: def parse_query_string(str):
1.46 dwinter 99: queries={}
100: key=""
101: value=""
102: tmp=""
103: toggle="key"
104: str=urllib.unquote(str)
105: for i in str:
106: if i=="=":
107: key=tmp
108: toggle="value"
109: tmp=""
110: elif i=="&":
111: queries[key]=tmp
112: tmp=""
113: toggle="key"
114: else:
115: if toggle=="key":
116: if i=="+" : i="-"
117: else:
118: if i=="+" : i=" "
119: tmp=tmp+i
120: queries[key]=tmp
121: return queries
122:
1.1 dwinter 123: def add3(self):
1.46 dwinter 124: """Foldername"""
125: metadata=parse_query_string(self.REQUEST['QUERY_STRING'])
126: self.REQUEST.SESSION['metadata']=metadata
127: vorschlag=[]
128:
129: if metadata.has_key('author'):
130: vorschlag.append(metadata['author'][:5])
131: if metadata.has_key('title'):
132: vorschlag.append(metadata['title'][:5])
133: if metadata.has_key('year'):
134: vorschlag.append(metadata['year'])
135:
136:
137: vorschlag_naming=string.join(vorschlag,"_")
138:
139:
140: self.vorschlag_naming=unicode(vorschlag_naming,'ascii','ignore')
141: if self.REQUEST.SESSION.has_key('no_upload'):
142: self.REQUEST.SESSION['folder_name']=self.REQUEST.SESSION['path']
143: return add5(self)
144: else:
145: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_add_naming')).__of__(self)
146: return newtemplate()
147:
1.1 dwinter 148:
149: def add4(self):
1.36 dwinter 150:
1.46 dwinter 151: self.path=re.search(r"/mpiwg(.*)",self.REQUEST.SESSION['path']).group(1)
152:
153: self.folder_name=self.REQUEST['folder_name']
154: # next has to be changed -> error if back button is used!!
155: self.REQUEST.SESSION['folder_name']=self.folder_name
156: #return self.REQUEST['submit']
157:
158: try:
159: #os.popen('mkdir '+self.REQUEST.SESSION['path'])
160: os.mkdir(os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name']))
161: os.chmod(os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name']),0775)
162:
163: except:
164:
165: """nothing"""
166:
167: if self.REQUEST['submit']=="upload images":
168: self.REQUEST.SESSION['path']=os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name'])
169:
170:
171: self.image_folder_name="pageimg"
172: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_upload')).__of__(self)
173:
174: return newtemplate()
175:
176: elif self.REQUEST['submit']=="upload pdf":
177: os.mkdir(os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name']))
178: return addPdf(self,os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name']))
179: else:
180: os.mkdir(os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name']))
181: return addText(self,os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name']))
182:
183:
1.1 dwinter 184: def add5(self):
1.46 dwinter 185: """ADD INDEX.META"""
186: try:
187: os.chmod(self.REQUEST.SESSION['path'],0775)
188: except:
189: pass
1.38 dwinter 190:
1.46 dwinter 191: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_add_metadata')).__of__(self)
192: return newtemplate()
1.1 dwinter 193:
194: def add6(self):
1.46 dwinter 195: metadata=parse_query_string(self.REQUEST['QUERY_STRING'])
196: metadata['archive-path']=os.path.split(self.REQUEST.SESSION['path'])[0]
197: #metadata['folder_name']=self.REQUEST.SESSION['folder_name']
198: metadata['folder_name']=os.path.split(self.REQUEST.SESSION['path'])[1]
199: metadata['content-type']="scanned document"
200: self.reftype=self.REQUEST.SESSION['reftype']
201: self.REQUEST.SESSION['add_metadata']=metadata
202: self.add_metadata=metadata
203: self.metadata=self.REQUEST.SESSION['metadata']
204: self.metadataprint=""
205: for tag in self.metadata.keys():
206: if tag!="":
207: self.metadataprint=self.metadataprint+"<"+tag+">"+self.metadata[tag]+"</"+tag+">\n"
208:
209: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','index_meta')).__of__(self)
210: newtemplate.content_type="text/plain"
211: renderxml = newtemplate(encode='utf-8')
212:
213:
214: if self.REQUEST.SESSION.has_key('no_upload'):
215: metapath=self.REQUEST.SESSION['path']+"/index.meta"
216: else:
217: metapath=self.add_metadata['archive-path']+"/"+self.add_metadata['folder_name']+"/index.meta"
218:
219: f=open(metapath,'w')
220: try:
221: f.write(renderxml.encode('utf-8'))
222: except:
223: f.write(unicode(renderxml,'latin-1').encode('utf-8'))
224: #f.write(renderxml)
225:
226: f.close()
227: os.chmod(metapath,0664)
228: os.popen('chmod -R 0775 %s'%self.add_metadata['archive-path']+"/"+self.add_metadata['folder_name'])
229: if self.REQUEST.SESSION.has_key('no_upload'):
230:
231: #newtemplate2=PageTemplateFile('/usr/local/mpiwg/Zope/Extensions/done',"text/html").__of__(self)
232: return self.REQUEST.response.redirect(self.REQUEST['URL2']+"?path="+self.REQUEST.SESSION['path'])
233: else:
1.1 dwinter 234: #print self.add_metadata['archive-path']
1.46 dwinter 235: self.viewpath=re.search(r"/mpiwg/online/(.*)",self.add_metadata['archive-path']).group(1)
236:
237: if (self.REQUEST.SESSION.has_key('no_upload')) and (self.REQUEST.SESSION['no_upload']=="text"):
238: """text upload"""
239: return 1
240: else:
241: newtemplate2=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_saved')).__of__(self)
242: newtemplate2.content_type="text/html"
243: self.REQUEST.response.setHeader('Content-Type','text/html')
244: return newtemplate2()
245:
1.12 dwinter 246:
1.1 dwinter 247:
248: def date(self):
1.46 dwinter 249: return strftime("%d.%m.%Y",localtime())
1.1 dwinter 250:
251:
252: def addPresentation(self,path):
1.46 dwinter 253: """add presentation to the path"""
254:
255: dom=xml.dom.minidom.parse(path+"/index.meta")
256:
257:
258: try:
259: author=archive.getText(dom.getElementsByTagName('author')[0].childNodes)
260: except:
261: try:
262: author=archive.getText(dom.getElementsByTagName('Author')[0].childNodes)
263: except:
264: try:
265: author=archive.getText(dom.getElementsByTagName('Editor')[0].childNodes)
266: except:
267: author=""
268: try:
269: title=archive.getText(dom.getElementsByTagName('title')[0].childNodes)
270: except:
271: title=""
272:
273: try:
274: date=archive.getText(dom.getElementsByTagName('year')[0].childNodes)
275: except:
276: try:
277: date=archive.getText(dom.getElementsByTagName('Year')[0].childNodes)
278: except:
279: try:
280: date=archive.getText(dom.getElementsByTagName('date')[0].childNodes)
281: except:
282: date=""
283: i=1
284: while os.path.exists(path+"/%02d-presentation"%i):
285: i+=1
286: self.REQUEST.SESSION['presentationname']="%02d-presentation"%i
287: self.REQUEST.SESSION['path']=path
288:
1.53 ! casties 289: tmpTxt=u"""<?xml version="1.0" encoding="UTF-8"?>
1.46 dwinter 290: <info>
291: <author>%s</author>
292: <title>%s</title>
293: <date>%s</date>
294: <display>yes</display>
1.1 dwinter 295: </info>"""%(author,title,date)
1.46 dwinter 296:
297: self.REQUEST.SESSION['xmlvorschlag']=tmpTxt.encode('utf-8')
298:
299: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','addPresentation')).__of__(self)
300: return newtemplate()
1.1 dwinter 301:
302: def addPresentation2(self):
1.46 dwinter 303: """add presentation """
304: folder_name=self.REQUEST['folder_name']
305: #print self.REQUEST['folder_name']
306: content_description=self.REQUEST['content_description']
307:
308: path=self.REQUEST.SESSION['path']
309:
310: if not self.REQUEST.has_key('fileupload'):
311: xmlinfo=self.REQUEST['xmltext']
312: file_name="info.xml"
313:
314: else:
315: file_name=self.REQUEST['fileupload'].filename
316: xmlinfo=self.REQUEST.form['fileupload'].read()
317: # hack Multipart auswertung funktioniert nicht ausser bei mozilla
318: file_name="info.xml"
319: xmlinfo=self.REQUEST['xmltext']
320: try:
321: os.mkdir(path+"/"+folder_name)
322: except:
323: """nothing"""
324: #print "NAME:",file_name
325: f=open(path+"/"+folder_name+"/"+file_name,"w")
1.53 ! casties 326: f.write(xmlinfo.encode('utf-8'))
1.46 dwinter 327: f.close()
328: try:
329: os.chmod(path+"/"+folder_name,0775)
330: except:
331: """NO"""
332:
333: os.chmod(path+"/"+folder_name+"/"+file_name,0664)
334: addDirsToIndexMeta(path,folder_name,content_description,'presentation')
335:
336: return self.REQUEST.RESPONSE.redirect(self.REQUEST['URL2']+'?path='+path)
1.32 dwinter 337:
338:
339: def addPdf(self,path,folder=None):
1.46 dwinter 340: """add fulltext to the path"""
341: self.REQUEST.SESSION['existing_names']=['pageimg'] # to be done generate list of existing text files
342: self.REQUEST.SESSION['pathnew']=path
343: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','addPdf')).__of__(self)
344: return newtemplate()
1.32 dwinter 345:
346: def addPdf2(self):
1.46 dwinter 347: """addtext"""
348: folder_name="pdf" # foldername fixed
349:
350: if self.REQUEST['file_name']=="":
351: file_name=self.REQUEST['fileupload'].filename
352: else:
353: file_name=self.REQUEST['file_name']
354:
355: #print self.REQUEST['folder_name']
356: content_description=self.REQUEST['content_description']
357: path=self.REQUEST.SESSION['pathnew']
358:
359: filedata=self.REQUEST.form['fileupload'].read()
360: try:
361: os.mkdir(path+"/"+folder_name)
362: except:
363: """nothing"""
364: f=open(path+"/"+folder_name+"/"+file_name,"w")
365: f.write(filedata)
366: f.close()
367: os.chmod(path+"/"+folder_name,0755)
368: os.chmod(path+"/"+folder_name+"/"+file_name,0644)
369: addDirsToIndexMeta(path,folder_name,content_description,'pdf')
1.32 dwinter 370:
1.46 dwinter 371: return self.REQUEST.RESPONSE.redirect(self.REQUEST['URL2']+'?path='+path)
1.1 dwinter 372:
1.21 dwinter 373: def addText(self,path,folder=None):
1.46 dwinter 374: """add fulltext to the path"""
375: self.REQUEST.SESSION['existing_names']=['pageimg'] # to be done generate list of existing text files
376: self.REQUEST.SESSION['pathnew']=path
377: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','addText')).__of__(self)
378: return newtemplate()
1.1 dwinter 379:
380: def addText2(self):
1.46 dwinter 381: """addtext"""
382: folder_name=self.REQUEST['folder_name']
383: #print self.REQUEST['folder_name']
384: content_description=self.REQUEST['content_description']
385: path=self.REQUEST.SESSION['pathnew']
386: file_name=self.REQUEST['fileupload'].filename
387: filedata=self.REQUEST.form['fileupload'].read()
388: os.mkdir(path+"/"+folder_name)
389: f=open(path+"/"+folder_name+"/"+file_name,"w")
390: f.write(filedata)
391: f.close()
392: os.chmod(path+"/"+folder_name,0755)
393: os.chmod(path+"/"+folder_name+"/"+file_name,0644)
394: addDirsToIndexMeta(path,folder_name,content_description,'fulltext')
1.1 dwinter 395:
1.46 dwinter 396: return self.REQUEST.RESPONSE.redirect(self.REQUEST['URL2']+'?path='+path)
1.1 dwinter 397:
398: def addTextExternal(self,path,texturl,version):
1.53 ! casties 399: """hinzufuegen eines externen textes"""
1.46 dwinter 400: try: #neue text version einlesen
401: texttemp=urllib.urlopen(texturl).readlines()
402: text=""
403: for line in texttemp:
404: text=text+line
405: except: #fehler beim lesen des textes
406: return "ERROR: cannot read: %s"%texturl
407: if TextExternalError(text): #kein xml header
408: return "ERROR: cannot read: %s"%texturl, "received:",text
1.53 ! casties 409: textpath=getNewTextPath(path) #erzeuge neuen Ornder fuer den Text
1.46 dwinter 410: splitted=string.split(texturl,"/")
411: name=splitted[len(splitted)-1] #Name des XML-files
412: try:
413: writefile=file(path+"/"+textpath+"/"+name,"w")
414: except:
415: return"ERROR: cannot write: %s"%path+"/"+textpath+"/"+name
416: writefile.write(text)
417: writefile.close()
418: os.chmod(path+"/"+textpath+"/"+name,0644)
419:
420: #add new file to XML
421: dom=xml.dom.minidom.parse(path+"/index.meta")
422: node=dom.getElementsByTagName('resource')[0] #getNode
423:
424: subnode=dom.createElement('dir')
425:
426: namenode=dom.createElement('name')
427: namenodetext=dom.createTextNode(textpath)
428: namenode.appendChild(namenodetext)
429: subnode.appendChild(namenode)
430:
431: descriptionnode=dom.createElement('description')
432: descriptionnodetext=dom.createTextNode('archimedes text:'+version)
433: descriptionnode.appendChild(descriptionnodetext)
434: subnode.appendChild(descriptionnode)
435:
436: contentnode=dom.createElement('content-type')
437: contentnodetext=dom.createTextNode('fulltext')
438: contentnode.appendChild(contentnodetext)
439: subnode.appendChild(contentnode)
440:
441: node.appendChild(subnode)
442:
443: writefile=file(path+"/index.meta","w")
444: writefile.write(dom.toxml(encoding="UTF-8"))
445: writefile.close()
446:
447: #change texttool tag
448: dom=xml.dom.minidom.parse(path+"/index.meta")
449: node=dom.getElementsByTagName('meta')[0] #getNode
450:
451: try: #texttool existiert schon
452: subnode=node.getElementsByTagName('texttool')[0]
453: except: #wenn nicht Fehler ausgeben
454: return "ERROR:no presentation configured yet, user Web Front End to do so!"
455:
456:
457: try:
458: texttoolnodelist=subnode.getElementsByTagName('text')
459:
1.53 ! casties 460: if not len(texttoolnodelist)==0: #texttool tag existiert schon, dann loeschen
1.46 dwinter 461: subsubnode=subnode.removeChild(texttoolnodelist[0])
462: subsubnode.unlink()
463: except:
464: """nothing"""
465: # text neu anlegen
466: textfoldernode=dom.createElement('text')
467: textfoldernodetext=dom.createTextNode(textpath+"/"+name)
468: textfoldernode.appendChild(textfoldernodetext)
469: subnode.appendChild(textfoldernode)
470:
471: #index.meta ausgeben
472: writefile=file(path+"/index.meta","w")
473: writefile.write(dom.toxml(encoding="UTF-8"))
474: writefile.close()
475:
476: #registrieren
477: return urllib.urlopen("http://nausikaa2.rz-berlin.mpg.de:86/cgi-bin/toc/admin/reg.cgi?path=%s"%path).readlines()
478:
479:
1.1 dwinter 480:
481: def TextExternalError(text):
1.46 dwinter 482: firsts=text[0:10]
483: #print firsts
484: try:
485: match=re.search(r".*<?xml.*",firsts)
486: except:
487: return 1
488: return 0
1.1 dwinter 489:
490: def getNewTextPath(path):
1.46 dwinter 491: i=1
492: while os.path.exists(path+"/fulltext%i"%i):
493: i+=1
494: os.mkdir(path+"/fulltext%i"%i)
495: os.chmod(path+"/fulltext%i"%i,0755)
496: return "fulltext%i"%i
1.1 dwinter 497:
498: def addImages(self,path):
1.46 dwinter 499: """Imagesfolder to the path"""
500: self.REQUEST.SESSION['existing_names']=['pageimg'] # to be done generate list of existing pageimages files
501: self.REQUEST.SESSION['path']=path
502: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_addImages')).__of__(self)
503: return newtemplate()
1.1 dwinter 504:
505: def addImages2(self):
1.46 dwinter 506:
507: self.image_folder_name=self.REQUEST['folder_name']
508: #print self.REQUEST['folder_name']
509: self.content_description=self.REQUEST['content_description']
510: #self.path=self.REQUEST.SESSION['path']
511:
512:
513: self.content_type='images'
514: addDirsToIndexMeta(self.REQUEST.SESSION['path'],self.image_folder_name,self.content_description,self.content_type)
515: self.REQUEST.SESSION['path']=re.search(r"/mpiwg(.*)",self.REQUEST.SESSION['path']).group(1)
516: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_upload2')).__of__(self)
517: return newtemplate()
518:
1.1 dwinter 519:
520:
521: def addDirsToIndexMeta(path,folder_name,content_description,content_type):
1.46 dwinter 522: #f=file(path+"/index.meta",r)
523: dom=xml.dom.minidom.parse(path+"/index.meta")
524: node=dom.getElementsByTagName('resource')[0] #getNode
525:
526: subnode=dom.createElement('dir')
527:
528: namenode=dom.createElement('name')
529: namenodetext=dom.createTextNode(folder_name)
530: namenode.appendChild(namenodetext)
531: subnode.appendChild(namenode)
532:
533: descriptionnode=dom.createElement('description')
534: descriptionnodetext=dom.createTextNode(content_description)
535: descriptionnode.appendChild(descriptionnodetext)
536: subnode.appendChild(descriptionnode)
537:
538: contentnode=dom.createElement('content-type')
539: contentnodetext=dom.createTextNode(content_type)
540: contentnode.appendChild(contentnodetext)
541: subnode.appendChild(contentnode)
542:
543: node.appendChild(subnode)
544:
545: writefile=file(path+"/index.meta","w")
546: writefile.write(dom.toxml(encoding='UTF-8'))
547: writefile.close()
1.1 dwinter 548:
1.5 dwinter 549: def readArchimedesXML(folder):
1.46 dwinter 550: """gib URL aus """
551: XML=urllib.urlopen("http://archimedes.mpiwg-berlin.mpg.de/cgi-bin/toc/toc.cgi?step=xmlcorpusmanifest").read()
552: #print XML
553: dom=xml.dom.minidom.parseString(XML)
554: items=dom.getElementsByTagName('item')
555: dict={}
556:
557: for item in items:
558: #print item.attributes['dir'].value
559: try:
560: dict[item.attributes['dir'].value]=item.attributes['xml'].value
561: #print item.attributes['dir'].value,item.attributes['text'].value
562: except:
563: """nothing"""
564:
565: if dict.has_key(folder):
566: return dict[folder]
567: else:
568: return ""
569:
1.5 dwinter 570:
1.46 dwinter 571:
1.1 dwinter 572:
573: def combineTextImage2(self,path):
1.53 ! casties 574: """erstellt bzw. aendert texttool meta tag"""
1.46 dwinter 575: dom=xml.dom.minidom.parse(path+"/index.meta")
576: node=dom.getElementsByTagName('meta')[0] #getNode
577:
578:
579: subnodelist=node.getElementsByTagName('texttool')
1.53 ! casties 580: if not len(subnodelist)==0: #texttool tag existiert schon, dann loeschen
1.46 dwinter 581: subnode=node.removeChild(subnodelist[0])
582: subnode.unlink()
583:
584: subnode=dom.createElement('texttool') #neu erzeugen
585:
586:
1.52 dwinter 587: presentfiles=os.listdir(path+"/"+self.REQUEST['presentation'])
588: for presentfileTmp in presentfiles:
589: if (presentfileTmp[0]!="."): #schliesse unsichbare DAteien aus.
590: presentfile=presentfileTmp
1.46 dwinter 591:
592:
593: displaynode=dom.createElement('display')
594: displaynodetext=dom.createTextNode('yes')
595: displaynode.appendChild(displaynodetext)
596: subnode.appendChild(displaynode)
597:
598: if self.REQUEST.has_key('image'):
599: namenode=dom.createElement('image')
600: namenodetext=dom.createTextNode(self.REQUEST['image'])
601: namenode.appendChild(namenodetext)
602: subnode.appendChild(namenode)
603:
604: if self.REQUEST.has_key('text'):
605: textfile=os.listdir(path+"/"+self.REQUEST['text'])[0]
606: textfoldernode=dom.createElement('text')
607: textfoldernodetext=dom.createTextNode(path+"/"+self.REQUEST['text']+"/"+textfile)
608: textfoldernode.appendChild(textfoldernodetext)
609: subnode.appendChild(textfoldernode)
610:
611: if self.REQUEST.has_key('external'):#USE CVS instead of local text
612: textfoldernode=dom.createElement('text')
613: textfoldernodetext=dom.createTextNode(self.REQUEST.SESSION['externxml'])
614: textfoldernode.appendChild(textfoldernodetext)
615: subnode.appendChild(textfoldernode)
616:
617: if self.REQUEST.has_key('pagebreak'):
618: pagebreaknode=dom.createElement('pagebreak')
619: pagebreaknodetext=dom.createTextNode(self.REQUEST['pagebreak'])
620: pagebreaknode.appendChild(pagebreaknodetext)
621: subnode.appendChild(pagebreaknode)
622:
623: if self.REQUEST.has_key('presentation'):
624: presentationnode=dom.createElement('presentation')
625: presentationnodetext=dom.createTextNode(self.REQUEST['presentation']+"/"+presentfile)
626: presentationnode.appendChild(presentationnodetext)
627: subnode.appendChild(presentationnode)
628:
629:
630: if self.REQUEST.has_key('xslt'):
631: if not self.REQUEST['xslt']=="":
632: xsltnode=dom.createElement('xslt')
633: xsltnodetext=dom.createTextNode(self.REQUEST['xslt'])
634: xsltnode.appendChild(xsltnodetext)
635: subnode.appendChild(xsltnode)
636:
637:
638: if self.REQUEST.has_key('thumbtemplate'):
639: if not self.REQUEST['thumbtemplate']=="":
640: xsltnode=dom.createElement('thumbtemplate')
641: xsltnodetext=dom.createTextNode(self.REQUEST['thumbtemplate'])
642: xsltnode.appendChild(xsltnodetext)
643: subnode.appendChild(xsltnode)
644:
645: if self.REQUEST.has_key('topbar'):
646: if not self.REQUEST['topbar']=="":
647: xsltnode=dom.createElement('toptemplate')
648: xsltnodetext=dom.createTextNode(self.REQUEST['topbar'])
649: xsltnode.appendChild(xsltnodetext)
650: subnode.appendChild(xsltnode)
651:
652: if self.REQUEST.has_key('startpage'):
653: if not self.REQUEST['startpage']=="":
654: xsltnode=dom.createElement('startpage')
655: xsltnodetext=dom.createTextNode(self.REQUEST['startpage'])
656: xsltnode.appendChild(xsltnodetext)
657: subnode.appendChild(xsltnode)
658:
659: if self.REQUEST.has_key('project'):
660: if not self.REQUEST['project']=="":
661: xsltnode=dom.createElement('project')
662: xsltnodetext=dom.createTextNode(self.REQUEST['project'])
663: xsltnode.appendChild(xsltnodetext)
664: subnode.appendChild(xsltnode)
665:
666: if self.REQUEST.has_key('digiliburlprefix'):
667: if not self.REQUEST['digiliburlprefix']=="":
668: xsltnode=dom.createElement('digiliburlprefix')
669: xsltnodetext=dom.createTextNode(self.REQUEST['digiliburlprefix'])
670: xsltnode.appendChild(xsltnodetext)
671: subnode.appendChild(xsltnode)
672:
673: node.appendChild(subnode)
674:
675: try:
676: node2=node.getElementsByTagName('bib')[0]
677: subs=node2.getElementsByTagName('lang')
678: for sub in subs:
679: node2.removeChild(sub)
680: except:
681: """nothing"""
682: try:
683: main=dom.getElementsByTagName('bib')[0]
684: node=dom.createElement('lang')
685: textnode=dom.createTextNode(self.REQUEST['lang'])
686: node.appendChild(textnode)
687: main.appendChild(node)
688: except:
689: try:
690: subs=dom.getElementsByTagName('lang')
691: main=dom.getElementsByTagName('resource')[0]
692: for sub in subs:
693: main.removeChild(sub)
694: except:
695: """nothing"""
696:
697: try:
698: main=dom.getElementsByTagName('resource')[0]
699: node=dom.createElement('lang')
700: textnode=dom.createTextNode(self.REQUEST['lang'])
701: #print "LANG:",self.REQUEST['lang']
702: node.appendChild(textnode)
703: main.appendChild(node)
704: except:
705: """nothing"""
706:
707: writefile=file(path+"/index.meta","w")
708: writefile.write(dom.toxml(encoding="UTF-8"))
709: writefile.close()
710:
711:
712:
1.51 casties 713: # urllib.urlopen("http://nausikaa2.rz-berlin.mpg.de:86/cgi-bin/toc/admin/reg.cgi?path=%s"%path).readlines()
714: #
715: # if self.REQUEST.has_key('image'): # falls bilder
716: # path=re.sub('//','/',self.REQUEST['path']) # falls '//' im Pfad
717: # dlpath = re.sub('/mpiwg/online/','',path)+"/"+self.REQUEST['image']
718: #
719: # logger('OSas',logging.INFO,"ssh archive@nausikaa2.rz-berlin.mpg.de /usr/local/mpiwg/scripts/scaleomat -src=/mpiwg/online -dest=/mpiwg/temp/online/scaled/thumb -dir=%s -scaleto=90 -sync >> /tmp/sc.out &"%dlpath )
720: # ret=os.popen("ssh archive@nausikaa2.rz-berlin.mpg.de /usr/local/mpiwg/scripts/scaleomat -src=/mpiwg/online -dest=/mpiwg/temp/online/scaled/thumb -dir=%s -scaleto=90 -sync >> /tmp/sc.out &"%dlpath ).read()
721: # logger('OSAS (combine)',logging.INFO,ret)
1.46 dwinter 722:
723:
724:
1.49 dwinter 725: #else: # falls keine Bilder (bug in reg.cgi info file ersetzen)
726: # f=file("/tmp/tmp_info.xml","w")
727: # tmp=patchedInfoXML(self.REQUEST['path'])
728: # f.write(tmp.encode('utf-8'))
729: # f.close()
730: # splitted=path.split("/")
731: # fn=splitted[len(splitted)-1]
732: # remotePath="archive@nausikaa2.rz-berlin.mpg.de:/usr/local/share/archimedes/web/docs/proj/echo/1/docs/"+fn+"/info.xml"
733: # os.popen("scp /tmp/tmp_info.xml %s"%remotePath)
1.24 dwinter 734:
735: def patchedInfoXML(path):
1.46 dwinter 736: dom=xml.dom.minidom.parse(path+"/index.meta")
737:
738: ret="<info>\n"
739: ret+="<remotetext>%s</remotetext>\n"%archive.getText(dom.getElementsByTagName('text')[0].childNodes)
740: ret+="<pagebreak>%s</pagebreak>\n"%archive.getText(dom.getElementsByTagName('pagebreak')[0].childNodes)
741: ret+="<display>%s</display>\n"%archive.getText(dom.getElementsByTagName('display')[0].childNodes)
742: try:
743: ret+="<toptemplate>%s</toptemplate>\n"%archive.getText(dom.getElementsByTagName('toptemplate')[0].childNodes)
744: except:
745: """not"""
746: try:
747: ret+="<thumbtemplate>%s</thumbtemplate>\n"%archive.getText(dom.getElementsByTagName('thumbtemplate')[0].childNodes)
748: except:
749: """not"""
750: try:
751: ret+="<startpage>%s</startpage>\n"%archive.getText(dom.getElementsByTagName('startpage')[0].childNodes)
752: except:
753: """not"""
754:
755: ret+="<lang>%s</lang>\n"%archive.getText(dom.getElementsByTagName('lang')[0].childNodes)
756: try:
757: ret+="<author>%s</author>\n"%archive.getText(dom.getElementsByTagName('author')[0].childNodes)
758: except:
759: """not"""
760: try:
761: ret+="<title>%s</title>\n"%archive.getText(dom.getElementsByTagName('title')[0].childNodes)
762: except:
763: """not"""
764:
765: ret+="</info>"
1.22 dwinter 766:
1.46 dwinter 767: return ret
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>