1: #Neue Version Begin 5.4.2004
2:
3:
4: """Methoden zum hinzufügen von Dokumenten ins Archiv"""
5: from OSAS_helpers import readArchimedesXML
6: try:
7: import archive
8: except:
9: print "archive not imported"
10:
11: import os
12: import os.path
13: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
14: from Products.PageTemplates.PageTemplate import PageTemplate
15: import string
16: import urllib
17: import zLOG
18: import xml.dom.minidom
19: from time import localtime,strftime
20: from Globals import package_home
21: from types import *
22:
23: import re
24: def showHelp(helptext):
25: """show helptext"""
26: return """<html>
27: <body>
28: %
29: </body>
30: </html>"""%helptext
31: def add(self, no_upload=0):
32: """ Add metadata or metadata and documents to the repository
33: no_upload=0 kein upload sonst upload von documententen"""
34:
35: #self.referencetypes=self.ZopeFind(self,obj_metatypes=['OSAS_MetadataMapping'])
36: self.referencetypes=self.ZopeFind(self)
37:
38: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_add_new')).__of__(self)
39: self.REQUEST.SESSION['path']=self.REQUEST['path']
40: if no_upload==0:
41: self.REQUEST.SESSION['no_upload']='yes'
42: else:
43: if self.REQUEST.SESSION.has_key('no_upload'):
44: del self.REQUEST.SESSION['no_upload']
45:
46: return newtemplate()
47:
48:
49:
50: def getISO():
51: """ISO"""
52: try:
53: f=file(os.path.join(package_home(globals()),'iso639-1.inc'),'r').readlines()
54:
55: ret={}
56: for lineraw in f:
57: line=lineraw.encode('ascii','replace').strip()
58: value=string.split(line,'\t')[0].encode('ascii','replace')
59: key=string.split(line,'\t')[1].encode('ascii','replace')
60: ret[key]=value
61: except:
62: ret={}
63: return ret
64:
65:
66: def add2(self):
67: self.reftype=self.REQUEST['Reference Type']
68: self.REQUEST.SESSION['reftype']=self.reftype
69: self.bibdata={}
70: for referenceType in self.referencetypes:
71: #print referenceType
72: if referenceType[1].title == self.reftype:
73: self.bibdata[referenceType[1].title]=referenceType[1].fields
74: self.bibdata['data']=referenceType[1]
75: self.fields=self.bibdata[self.reftype]
76:
77: self.isolist=getISO()
78: tmp=getISO().keys()
79: tmp.sort()
80: self.isokeys=tmp
81: #listed=[ x for x in self.isolist.keys()]
82: #print listed
83: #sorted=listed.sort()
84: #print sorted
85:
86: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_add_bibdata')).__of__(self)
87: return newtemplate()
88: #return self.fields
89:
90:
91: def parse_query_string(str):
92: queries={}
93: key=""
94: value=""
95: tmp=""
96: toggle="key"
97: str=urllib.unquote(str)
98: for i in str:
99: if i=="=":
100: key=tmp
101: toggle="value"
102: tmp=""
103: elif i=="&":
104: queries[key]=tmp
105: tmp=""
106: toggle="key"
107: else:
108: if toggle=="key":
109: if i=="+" : i="-"
110: else:
111: if i=="+" : i=" "
112: tmp=tmp+i
113: queries[key]=tmp
114: return queries
115:
116: def add3(self):
117: """Foldername"""
118: metadata=parse_query_string(self.REQUEST['QUERY_STRING'])
119: self.REQUEST.SESSION['metadata']=metadata
120: vorschlag=[]
121:
122: if metadata.has_key('author'):
123: vorschlag.append(metadata['author'][:5])
124: if metadata.has_key('title'):
125: vorschlag.append(metadata['title'][:5])
126: if metadata.has_key('year'):
127: vorschlag.append(metadata['year'])
128:
129:
130: vorschlag_naming=string.join(vorschlag,"_")
131:
132:
133: self.vorschlag_naming=unicode(vorschlag_naming,'ascii','ignore')
134: if self.REQUEST.SESSION.has_key('no_upload'):
135: self.REQUEST.SESSION['folder_name']=self.REQUEST.SESSION['path']
136: return add5(self)
137: else:
138: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_add_naming')).__of__(self)
139: return newtemplate()
140:
141:
142: def add4(self):
143:
144: self.path=re.search(r"/mpiwg(.*)",self.REQUEST.SESSION['path']).group(1)
145:
146: self.folder_name=self.REQUEST['folder_name']
147: # next has to be changed -> error if back button is used!!
148: self.REQUEST.SESSION['folder_name']=self.folder_name
149: #return self.REQUEST['submit']
150:
151: try:
152: #os.popen('mkdir '+self.REQUEST.SESSION['path'])
153: os.mkdir(os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name']))
154: os.chmod(os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name']),0775)
155:
156: except:
157:
158: """nothing"""
159:
160: if self.REQUEST['submit']=="upload images":
161: self.REQUEST.SESSION['path']=os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name'])
162:
163:
164: self.image_folder_name="pageimg"
165: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_upload')).__of__(self)
166:
167: return newtemplate()
168:
169: elif self.REQUEST['submit']=="upload pdf":
170: os.mkdir(os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name']))
171: return addPdf(self,os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name']))
172: else:
173: os.mkdir(os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name']))
174: return addText(self,os.path.join(self.REQUEST.SESSION['path'],self.REQUEST['folder_name']))
175:
176:
177: def add5(self):
178: """ADD INDEX.META"""
179: try:
180: os.chmod(self.REQUEST.SESSION['path'],0775)
181: except:
182: pass
183:
184: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_add_metadata')).__of__(self)
185: return newtemplate()
186:
187: def add6(self):
188: metadata=parse_query_string(self.REQUEST['QUERY_STRING'])
189: metadata['archive-path']=os.path.split(self.REQUEST.SESSION['path'])[0]
190: #metadata['folder_name']=self.REQUEST.SESSION['folder_name']
191: metadata['folder_name']=os.path.split(self.REQUEST.SESSION['path'])[1]
192: metadata['content-type']="scanned document"
193: self.reftype=self.REQUEST.SESSION['reftype']
194: self.REQUEST.SESSION['add_metadata']=metadata
195: self.add_metadata=metadata
196: self.metadata=self.REQUEST.SESSION['metadata']
197: self.metadataprint=""
198: for tag in self.metadata.keys():
199: if tag!="":
200: self.metadataprint=self.metadataprint+"<"+tag+">"+self.metadata[tag]+"</"+tag+">\n"
201:
202: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','index_meta')).__of__(self)
203: newtemplate.content_type="text/plain"
204: renderxml = newtemplate(encode='utf-8')
205:
206:
207: if self.REQUEST.SESSION.has_key('no_upload'):
208: metapath=self.REQUEST.SESSION['path']+"/index.meta"
209: else:
210: metapath=self.add_metadata['archive-path']+"/"+self.add_metadata['folder_name']+"/index.meta"
211:
212: f=open(metapath,'w')
213: try:
214: f.write(renderxml.encode('utf-8'))
215: except:
216: f.write(unicode(renderxml,'latin-1').encode('utf-8'))
217: #f.write(renderxml)
218:
219: f.close()
220: os.chmod(metapath,0664)
221: os.popen('chmod -R 0775 %s'%self.add_metadata['archive-path']+"/"+self.add_metadata['folder_name'])
222: if self.REQUEST.SESSION.has_key('no_upload'):
223:
224: #newtemplate2=PageTemplateFile('/usr/local/mpiwg/Zope/Extensions/done',"text/html").__of__(self)
225: return self.REQUEST.response.redirect(self.REQUEST['URL2']+"?path="+self.REQUEST.SESSION['path'])
226: else:
227: #print self.add_metadata['archive-path']
228: self.viewpath=re.search(r"/mpiwg/online/(.*)",self.add_metadata['archive-path']).group(1)
229:
230: if (self.REQUEST.SESSION.has_key('no_upload')) and (self.REQUEST.SESSION['no_upload']=="text"):
231: """text upload"""
232: return 1
233: else:
234: newtemplate2=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_saved')).__of__(self)
235: newtemplate2.content_type="text/html"
236: self.REQUEST.response.setHeader('Content-Type','text/html')
237: return newtemplate2()
238:
239:
240:
241: def date(self):
242: return strftime("%d.%m.%Y",localtime())
243:
244:
245: def addPresentation(self,path):
246: """add presentation to the path"""
247:
248: dom=xml.dom.minidom.parse(path+"/index.meta")
249:
250:
251: try:
252: author=archive.getText(dom.getElementsByTagName('author')[0].childNodes)
253: except:
254: try:
255: author=archive.getText(dom.getElementsByTagName('Author')[0].childNodes)
256: except:
257: try:
258: author=archive.getText(dom.getElementsByTagName('Editor')[0].childNodes)
259: except:
260: author=""
261: try:
262: title=archive.getText(dom.getElementsByTagName('title')[0].childNodes)
263: except:
264: title=""
265:
266: try:
267: date=archive.getText(dom.getElementsByTagName('year')[0].childNodes)
268: except:
269: try:
270: date=archive.getText(dom.getElementsByTagName('Year')[0].childNodes)
271: except:
272: try:
273: date=archive.getText(dom.getElementsByTagName('date')[0].childNodes)
274: except:
275: date=""
276: i=1
277: while os.path.exists(path+"/%02d-presentation"%i):
278: i+=1
279: self.REQUEST.SESSION['presentationname']="%02d-presentation"%i
280: self.REQUEST.SESSION['path']=path
281:
282: tmpTxt="""<?xml version="1.0" encoding="UTF-8"?>
283: <info>
284: <author>%s</author>
285: <title>%s</title>
286: <date>%s</date>
287: <display>yes</display>
288: </info>"""%(author,title,date)
289:
290: self.REQUEST.SESSION['xmlvorschlag']=tmpTxt.encode('utf-8')
291:
292: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','addPresentation')).__of__(self)
293: return newtemplate()
294:
295: def addPresentation2(self):
296: """add presentation """
297: folder_name=self.REQUEST['folder_name']
298: #print self.REQUEST['folder_name']
299: content_description=self.REQUEST['content_description']
300:
301: path=self.REQUEST.SESSION['path']
302:
303: if not self.REQUEST.has_key('fileupload'):
304: xmlinfo=self.REQUEST['xmltext']
305: file_name="info.xml"
306:
307: else:
308: file_name=self.REQUEST['fileupload'].filename
309: xmlinfo=self.REQUEST.form['fileupload'].read()
310: # hack Multipart auswertung funktioniert nicht ausser bei mozilla
311: file_name="info.xml"
312: xmlinfo=self.REQUEST['xmltext']
313: try:
314: os.mkdir(path+"/"+folder_name)
315: except:
316: """nothing"""
317: #print "NAME:",file_name
318: f=open(path+"/"+folder_name+"/"+file_name,"w")
319: f.write(xmlinfo)
320: f.close()
321: try:
322: os.chmod(path+"/"+folder_name,0775)
323: except:
324: """NO"""
325:
326: os.chmod(path+"/"+folder_name+"/"+file_name,0664)
327: addDirsToIndexMeta(path,folder_name,content_description,'presentation')
328:
329: return self.REQUEST.RESPONSE.redirect(self.REQUEST['URL2']+'?path='+path)
330:
331:
332: def addPdf(self,path,folder=None):
333: """add fulltext to the path"""
334: self.REQUEST.SESSION['existing_names']=['pageimg'] # to be done generate list of existing text files
335: self.REQUEST.SESSION['pathnew']=path
336: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','addPdf')).__of__(self)
337: return newtemplate()
338:
339: def addPdf2(self):
340: """addtext"""
341: folder_name="pdf" # foldername fixed
342:
343: if self.REQUEST['file_name']=="":
344: file_name=self.REQUEST['fileupload'].filename
345: else:
346: file_name=self.REQUEST['file_name']
347:
348: #print self.REQUEST['folder_name']
349: content_description=self.REQUEST['content_description']
350: path=self.REQUEST.SESSION['pathnew']
351:
352: filedata=self.REQUEST.form['fileupload'].read()
353: try:
354: os.mkdir(path+"/"+folder_name)
355: except:
356: """nothing"""
357: f=open(path+"/"+folder_name+"/"+file_name,"w")
358: f.write(filedata)
359: f.close()
360: os.chmod(path+"/"+folder_name,0755)
361: os.chmod(path+"/"+folder_name+"/"+file_name,0644)
362: addDirsToIndexMeta(path,folder_name,content_description,'pdf')
363:
364: return self.REQUEST.RESPONSE.redirect(self.REQUEST['URL2']+'?path='+path)
365:
366: def addText(self,path,folder=None):
367: """add fulltext to the path"""
368: self.REQUEST.SESSION['existing_names']=['pageimg'] # to be done generate list of existing text files
369: self.REQUEST.SESSION['pathnew']=path
370: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','addText')).__of__(self)
371: return newtemplate()
372:
373: def addText2(self):
374: """addtext"""
375: folder_name=self.REQUEST['folder_name']
376: #print self.REQUEST['folder_name']
377: content_description=self.REQUEST['content_description']
378: path=self.REQUEST.SESSION['pathnew']
379: file_name=self.REQUEST['fileupload'].filename
380: filedata=self.REQUEST.form['fileupload'].read()
381: os.mkdir(path+"/"+folder_name)
382: f=open(path+"/"+folder_name+"/"+file_name,"w")
383: f.write(filedata)
384: f.close()
385: os.chmod(path+"/"+folder_name,0755)
386: os.chmod(path+"/"+folder_name+"/"+file_name,0644)
387: addDirsToIndexMeta(path,folder_name,content_description,'fulltext')
388:
389: return self.REQUEST.RESPONSE.redirect(self.REQUEST['URL2']+'?path='+path)
390:
391: def addTextExternal(self,path,texturl,version):
392: """hinzufügen eines externen textes"""
393: try: #neue text version einlesen
394: texttemp=urllib.urlopen(texturl).readlines()
395: text=""
396: for line in texttemp:
397: text=text+line
398: except: #fehler beim lesen des textes
399: return "ERROR: cannot read: %s"%texturl
400: if TextExternalError(text): #kein xml header
401: return "ERROR: cannot read: %s"%texturl, "received:",text
402: textpath=getNewTextPath(path) #erzeuge neuen Ornder für den Text
403: splitted=string.split(texturl,"/")
404: name=splitted[len(splitted)-1] #Name des XML-files
405: try:
406: writefile=file(path+"/"+textpath+"/"+name,"w")
407: except:
408: return"ERROR: cannot write: %s"%path+"/"+textpath+"/"+name
409: writefile.write(text)
410: writefile.close()
411: os.chmod(path+"/"+textpath+"/"+name,0644)
412:
413: #add new file to XML
414: dom=xml.dom.minidom.parse(path+"/index.meta")
415: node=dom.getElementsByTagName('resource')[0] #getNode
416:
417: subnode=dom.createElement('dir')
418:
419: namenode=dom.createElement('name')
420: namenodetext=dom.createTextNode(textpath)
421: namenode.appendChild(namenodetext)
422: subnode.appendChild(namenode)
423:
424: descriptionnode=dom.createElement('description')
425: descriptionnodetext=dom.createTextNode('archimedes text:'+version)
426: descriptionnode.appendChild(descriptionnodetext)
427: subnode.appendChild(descriptionnode)
428:
429: contentnode=dom.createElement('content-type')
430: contentnodetext=dom.createTextNode('fulltext')
431: contentnode.appendChild(contentnodetext)
432: subnode.appendChild(contentnode)
433:
434: node.appendChild(subnode)
435:
436: writefile=file(path+"/index.meta","w")
437: writefile.write(dom.toxml(encoding="UTF-8"))
438: writefile.close()
439:
440: #change texttool tag
441: dom=xml.dom.minidom.parse(path+"/index.meta")
442: node=dom.getElementsByTagName('meta')[0] #getNode
443:
444: try: #texttool existiert schon
445: subnode=node.getElementsByTagName('texttool')[0]
446: except: #wenn nicht Fehler ausgeben
447: return "ERROR:no presentation configured yet, user Web Front End to do so!"
448:
449:
450: try:
451: texttoolnodelist=subnode.getElementsByTagName('text')
452:
453: if not len(texttoolnodelist)==0: #texttool tag existiert schon, dann löschen
454: subsubnode=subnode.removeChild(texttoolnodelist[0])
455: subsubnode.unlink()
456: except:
457: """nothing"""
458: # text neu anlegen
459: textfoldernode=dom.createElement('text')
460: textfoldernodetext=dom.createTextNode(textpath+"/"+name)
461: textfoldernode.appendChild(textfoldernodetext)
462: subnode.appendChild(textfoldernode)
463:
464: #index.meta ausgeben
465: writefile=file(path+"/index.meta","w")
466: writefile.write(dom.toxml(encoding="UTF-8"))
467: writefile.close()
468:
469: #registrieren
470: return urllib.urlopen("http://nausikaa2.rz-berlin.mpg.de:86/cgi-bin/toc/admin/reg.cgi?path=%s"%path).readlines()
471:
472:
473:
474: def TextExternalError(text):
475: firsts=text[0:10]
476: #print firsts
477: try:
478: match=re.search(r".*<?xml.*",firsts)
479: except:
480: return 1
481: return 0
482:
483: def getNewTextPath(path):
484: i=1
485: while os.path.exists(path+"/fulltext%i"%i):
486: i+=1
487: os.mkdir(path+"/fulltext%i"%i)
488: os.chmod(path+"/fulltext%i"%i,0755)
489: return "fulltext%i"%i
490:
491: def addImages(self,path):
492: """Imagesfolder to the path"""
493: self.REQUEST.SESSION['existing_names']=['pageimg'] # to be done generate list of existing pageimages files
494: self.REQUEST.SESSION['path']=path
495: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_addImages')).__of__(self)
496: return newtemplate()
497:
498: def addImages2(self):
499:
500: self.image_folder_name=self.REQUEST['folder_name']
501: #print self.REQUEST['folder_name']
502: self.content_description=self.REQUEST['content_description']
503: #self.path=self.REQUEST.SESSION['path']
504:
505:
506: self.content_type='images'
507: addDirsToIndexMeta(self.REQUEST.SESSION['path'],self.image_folder_name,self.content_description,self.content_type)
508: self.REQUEST.SESSION['path']=re.search(r"/mpiwg(.*)",self.REQUEST.SESSION['path']).group(1)
509: newtemplate=PageTemplateFile(os.path.join(package_home(globals()),'zpt','OSAS_upload2')).__of__(self)
510: return newtemplate()
511:
512:
513:
514: def addDirsToIndexMeta(path,folder_name,content_description,content_type):
515: #f=file(path+"/index.meta",r)
516: dom=xml.dom.minidom.parse(path+"/index.meta")
517: node=dom.getElementsByTagName('resource')[0] #getNode
518:
519: subnode=dom.createElement('dir')
520:
521: namenode=dom.createElement('name')
522: namenodetext=dom.createTextNode(folder_name)
523: namenode.appendChild(namenodetext)
524: subnode.appendChild(namenode)
525:
526: descriptionnode=dom.createElement('description')
527: descriptionnodetext=dom.createTextNode(content_description)
528: descriptionnode.appendChild(descriptionnodetext)
529: subnode.appendChild(descriptionnode)
530:
531: contentnode=dom.createElement('content-type')
532: contentnodetext=dom.createTextNode(content_type)
533: contentnode.appendChild(contentnodetext)
534: subnode.appendChild(contentnode)
535:
536: node.appendChild(subnode)
537:
538: writefile=file(path+"/index.meta","w")
539: writefile.write(dom.toxml(encoding='UTF-8'))
540: writefile.close()
541:
542: def readArchimedesXML(folder):
543: """gib URL aus """
544: XML=urllib.urlopen("http://archimedes.mpiwg-berlin.mpg.de/cgi-bin/toc/toc.cgi?step=xmlcorpusmanifest").read()
545: #print XML
546: dom=xml.dom.minidom.parseString(XML)
547: items=dom.getElementsByTagName('item')
548: dict={}
549:
550: for item in items:
551: #print item.attributes['dir'].value
552: try:
553: dict[item.attributes['dir'].value]=item.attributes['xml'].value
554: #print item.attributes['dir'].value,item.attributes['text'].value
555: except:
556: """nothing"""
557:
558: if dict.has_key(folder):
559: return dict[folder]
560: else:
561: return ""
562:
563:
564:
565:
566: def combineTextImage2(self,path):
567: """erstellt bzw. ändert texttool meta tag"""
568: dom=xml.dom.minidom.parse(path+"/index.meta")
569: node=dom.getElementsByTagName('meta')[0] #getNode
570:
571:
572: subnodelist=node.getElementsByTagName('texttool')
573: if not len(subnodelist)==0: #texttool tag existiert schon, dann löschen
574: subnode=node.removeChild(subnodelist[0])
575: subnode.unlink()
576:
577: subnode=dom.createElement('texttool') #neu erzeugen
578:
579:
580: presentfile=os.listdir(path+"/"+self.REQUEST['presentation'])[0]
581:
582:
583: displaynode=dom.createElement('display')
584: displaynodetext=dom.createTextNode('yes')
585: displaynode.appendChild(displaynodetext)
586: subnode.appendChild(displaynode)
587:
588: if self.REQUEST.has_key('image'):
589: namenode=dom.createElement('image')
590: namenodetext=dom.createTextNode(self.REQUEST['image'])
591: namenode.appendChild(namenodetext)
592: subnode.appendChild(namenode)
593:
594: if self.REQUEST.has_key('text'):
595: textfile=os.listdir(path+"/"+self.REQUEST['text'])[0]
596: textfoldernode=dom.createElement('text')
597: textfoldernodetext=dom.createTextNode(path+"/"+self.REQUEST['text']+"/"+textfile)
598: textfoldernode.appendChild(textfoldernodetext)
599: subnode.appendChild(textfoldernode)
600:
601: if self.REQUEST.has_key('external'):#USE CVS instead of local text
602: textfoldernode=dom.createElement('text')
603: textfoldernodetext=dom.createTextNode(self.REQUEST.SESSION['externxml'])
604: textfoldernode.appendChild(textfoldernodetext)
605: subnode.appendChild(textfoldernode)
606:
607: if self.REQUEST.has_key('pagebreak'):
608: pagebreaknode=dom.createElement('pagebreak')
609: pagebreaknodetext=dom.createTextNode(self.REQUEST['pagebreak'])
610: pagebreaknode.appendChild(pagebreaknodetext)
611: subnode.appendChild(pagebreaknode)
612:
613: if self.REQUEST.has_key('presentation'):
614: presentationnode=dom.createElement('presentation')
615: presentationnodetext=dom.createTextNode(self.REQUEST['presentation']+"/"+presentfile)
616: presentationnode.appendChild(presentationnodetext)
617: subnode.appendChild(presentationnode)
618:
619:
620: if self.REQUEST.has_key('xslt'):
621: if not self.REQUEST['xslt']=="":
622: xsltnode=dom.createElement('xslt')
623: xsltnodetext=dom.createTextNode(self.REQUEST['xslt'])
624: xsltnode.appendChild(xsltnodetext)
625: subnode.appendChild(xsltnode)
626:
627:
628: if self.REQUEST.has_key('thumbtemplate'):
629: if not self.REQUEST['thumbtemplate']=="":
630: xsltnode=dom.createElement('thumbtemplate')
631: xsltnodetext=dom.createTextNode(self.REQUEST['thumbtemplate'])
632: xsltnode.appendChild(xsltnodetext)
633: subnode.appendChild(xsltnode)
634:
635: if self.REQUEST.has_key('topbar'):
636: if not self.REQUEST['topbar']=="":
637: xsltnode=dom.createElement('toptemplate')
638: xsltnodetext=dom.createTextNode(self.REQUEST['topbar'])
639: xsltnode.appendChild(xsltnodetext)
640: subnode.appendChild(xsltnode)
641:
642: if self.REQUEST.has_key('startpage'):
643: if not self.REQUEST['startpage']=="":
644: xsltnode=dom.createElement('startpage')
645: xsltnodetext=dom.createTextNode(self.REQUEST['startpage'])
646: xsltnode.appendChild(xsltnodetext)
647: subnode.appendChild(xsltnode)
648:
649: if self.REQUEST.has_key('project'):
650: if not self.REQUEST['project']=="":
651: xsltnode=dom.createElement('project')
652: xsltnodetext=dom.createTextNode(self.REQUEST['project'])
653: xsltnode.appendChild(xsltnodetext)
654: subnode.appendChild(xsltnode)
655:
656: if self.REQUEST.has_key('digiliburlprefix'):
657: if not self.REQUEST['digiliburlprefix']=="":
658: xsltnode=dom.createElement('digiliburlprefix')
659: xsltnodetext=dom.createTextNode(self.REQUEST['digiliburlprefix'])
660: xsltnode.appendChild(xsltnodetext)
661: subnode.appendChild(xsltnode)
662:
663: node.appendChild(subnode)
664:
665: try:
666: node2=node.getElementsByTagName('bib')[0]
667: subs=node2.getElementsByTagName('lang')
668: for sub in subs:
669: node2.removeChild(sub)
670: except:
671: """nothing"""
672: try:
673: main=dom.getElementsByTagName('bib')[0]
674: node=dom.createElement('lang')
675: textnode=dom.createTextNode(self.REQUEST['lang'])
676: node.appendChild(textnode)
677: main.appendChild(node)
678: except:
679: try:
680: subs=dom.getElementsByTagName('lang')
681: main=dom.getElementsByTagName('resource')[0]
682: for sub in subs:
683: main.removeChild(sub)
684: except:
685: """nothing"""
686:
687: try:
688: main=dom.getElementsByTagName('resource')[0]
689: node=dom.createElement('lang')
690: textnode=dom.createTextNode(self.REQUEST['lang'])
691: #print "LANG:",self.REQUEST['lang']
692: node.appendChild(textnode)
693: main.appendChild(node)
694: except:
695: """nothing"""
696:
697: writefile=file(path+"/index.meta","w")
698: writefile.write(dom.toxml(encoding="UTF-8"))
699: writefile.close()
700:
701:
702:
703: urllib.urlopen("http://nausikaa2.rz-berlin.mpg.de:86/cgi-bin/toc/admin/reg.cgi?path=%s"%path).readlines()
704:
705: if self.REQUEST.has_key('image'): # falls bilder
706: path=re.sub('//','/',self.REQUEST['path']) # falls '//' im Pfad
707: dlpath = re.sub('/mpiwg/online/','',path)+"/"+self.REQUEST['image']
708:
709: zLOG.LOG('OSas',zLOG.INFO,"ssh archive@nausikaa2.rz-berlin.mpg.de /usr/local/mpiwg/scripts/scaleomat -src=/mpiwg/online -dest=/mpiwg/temp/online/scaled/thumb -dir=%s -scaleto=90 -sync >> /tmp/sc.out &"%dlpath )
710: ret=os.popen("ssh archive@nausikaa2.rz-berlin.mpg.de /usr/local/mpiwg/scripts/scaleomat -src=/mpiwg/online -dest=/mpiwg/temp/online/scaled/thumb -dir=%s -scaleto=100 -sync >> /tmp/sc.out &"%dlpath ).read()
711: zLOG.LOG('OSAS (combine)',zLOG.INFO,ret)
712:
713:
714:
715: else: # falls keine Bilder (bug in reg.cgi info file ersetzen)
716: f=file("/tmp/tmp_info.xml","w")
717: tmp=patchedInfoXML(self.REQUEST['path'])
718: f.write(tmp.encode('utf-8'))
719: f.close()
720: splitted=path.split("/")
721: fn=splitted[len(splitted)-1]
722: remotePath="archive@nausikaa2.rz-berlin.mpg.de:/usr/local/share/archimedes/web/docs/proj/echo/1/docs/"+fn+"/info.xml"
723: os.popen("scp /tmp/tmp_info.xml %s"%remotePath)
724:
725: def patchedInfoXML(path):
726: dom=xml.dom.minidom.parse(path+"/index.meta")
727:
728: ret="<info>\n"
729: ret+="<remotetext>%s</remotetext>\n"%archive.getText(dom.getElementsByTagName('text')[0].childNodes)
730: ret+="<pagebreak>%s</pagebreak>\n"%archive.getText(dom.getElementsByTagName('pagebreak')[0].childNodes)
731: ret+="<display>%s</display>\n"%archive.getText(dom.getElementsByTagName('display')[0].childNodes)
732: try:
733: ret+="<toptemplate>%s</toptemplate>\n"%archive.getText(dom.getElementsByTagName('toptemplate')[0].childNodes)
734: except:
735: """not"""
736: try:
737: ret+="<thumbtemplate>%s</thumbtemplate>\n"%archive.getText(dom.getElementsByTagName('thumbtemplate')[0].childNodes)
738: except:
739: """not"""
740: try:
741: ret+="<startpage>%s</startpage>\n"%archive.getText(dom.getElementsByTagName('startpage')[0].childNodes)
742: except:
743: """not"""
744:
745: ret+="<lang>%s</lang>\n"%archive.getText(dom.getElementsByTagName('lang')[0].childNodes)
746: try:
747: ret+="<author>%s</author>\n"%archive.getText(dom.getElementsByTagName('author')[0].childNodes)
748: except:
749: """not"""
750: try:
751: ret+="<title>%s</title>\n"%archive.getText(dom.getElementsByTagName('title')[0].childNodes)
752: except:
753: """not"""
754:
755: ret+="</info>"
756:
757: return ret
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>