1: import socket
2: import urllib
3: import xml.dom.minidom
4: from types import *
5: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
6: from Globals import package_home
7: import os.path
8:
9: displayTypes = ['ZSQLExtendFolder','ZSQLBibliography','ECHO_group','ECHO_collection','ECHO_resource','ECHO_link','ECHO_sqlElement','ECHO_pageTemplate','ECHO_externalLink','ImageCollectionIFrame','VLP_resource','VLP_essay','ECHO_ZCatalogElement','ImageCollection','versionedFileFolder']
10:
11: def content_html(self,type):
12: """template fuer content"""
13: #templates = self.ZopeFind(self.aq_parent,obj_ids=[type+"_template"])
14: #
15: #if templates:
16: # return templates[0][1]()
17:
18: if hasattr(self,type+"_template"):
19: obj=getattr(self,type+"_template")
20: return obj()
21: else:
22: pt=PageTemplateFile('Products/ECHO_content/zpt/ECHO_%s_template_standard.zpt'%type).__of__(self)
23: pt.content_type="text/html"
24: return pt()
25:
26:
27:
28: class ECHO_basis:
29: """basis eigenschaften fuer echo objekte"""
30: def showRDF(self):
31: """showrdf"""
32: self.REQUEST.RESPONSE.setHeader('Content-Type','text/xml')
33: ret="""<?xml version="1.0" encoding="utf-8"?>\n<RDF:RDF xmlns:RDF="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:ECHONAVIGATION="http://www.echo.eu/rdf#">\n"""
34: ret+=self.getRDF(urn="echo:colllectionroot")+"\n"
35:
36: ret+="""</RDF:RDF>"""
37: return ret
38:
39:
40:
41: def createSubElementRDF(self,urn=None):
42: """rdf list"""
43: if not urn:
44: urn=self.absolute_url()
45: ret=""
46:
47: rettemp="""<RDF:Seq RDF:about="%s">\n"""%urn
48: flag=0
49:
50: li="""<RDF:li RDF:resource="%s" />\n"""
51: if not ('<error>' in self.getFullTextXML(noredirect='Yes')):
52: nurn=self.absolute_url()+'/getFullTextXML'
53: rettemp+=li%nurn
54: flag=1
55: if not ('<error>' in self.getImageView(noredirect='Yes')):
56: nurn=self.absolute_url()+'/getImageView'
57: rettemp+=li%nurn
58: flag=1
59:
60:
61: if not ('<error>' in self.showMetaDataXML()):
62: nurn=self.absolute_url()+'/showMetaDataXML'
63: rettemp+=li%nurn
64: flag=1
65:
66: rettemp+="</RDF:Seq>"
67:
68: if flag==1:
69: ret+=rettemp
70:
71: if not ('<error>' in self.getFullTextXML(noredirect='Yes')):
72: nurn=self.absolute_url()+'/getFullTextXML'
73: ret+=getRDFDescription(self,self.absolute_url()+'/getFullTextXML',urn=nurn,nameDef="Fulltext",typeName="ECHO_fulltext")
74:
75: if not ('<error>' in self.getImageView(noredirect='Yes')):
76: nurn=self.absolute_url()+'/getImageView'
77: ret+=getRDFDescription(self,self.absolute_url()+'/getImageView',urn=nurn,nameDef="Image View",typeName="ECHO_imageview")
78:
79: if not ('<error>' in self.showMetaDataXML()):
80: nurn=self.absolute_url()+'/showMetaDataXML'
81: ret+=getRDFDescription(self,self.absolute_url()+'/showMetaDataXML',urn=nurn,nameDef="Metadata",typeName="ECHO_metaData")
82:
83: return ret
84:
85: def content_html(self,type="collection"):
86: """template fuer content bei einbau und ECHO_Umgebung"""
87:
88: return content_html(self,type)
89:
90: def getTitle(self):
91: """title"""
92: try:
93: return self.title.encode('utf-8','ignore')
94: except:
95: self.title=self.title.decode('iso-8859-1','ignore')[0:] #correnct conding error
96: return self.title.encode('utf-8','ignore')
97:
98: def getLabel(self):
99: """title"""
100: try:
101: return self.label.encode('utf-8','ignore')
102: except:
103: self.label=self.label.decode('iso-8859-1','ignore')[0:] #correnct conding error
104: return self.label.encode('utf-8','ignore')
105:
106:
107: def changeECHOEntriesForm(self):
108: """change Entries for the ECHO Navigation environment"""
109: pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','changeECHOEntriesForm')).__of__(self)
110: return pt()
111:
112: def changeECHOEntries(self,label,weight,description,queryString,RESPONSE=None):
113: """change Entries for the ECHO Navigation environment
114: @param label: label fuer die Navigation"""
115: self.label=label
116: self.weight=weight
117: self.description=description
118: self.queryString=queryString
119:
120: if RESPONSE:
121: RESPONSE.redirect("manage_main")
122:
123: manage_options=({'label':'change ECHO Navigation Entries','action':'changeECHOEntriesForm'},)
124:
125: #ende der echo erweiterungen
126:
127:
128: def toList(field):
129: """Einzelfeld in Liste umwandeln"""
130: if type(field)==StringType:
131: return [field]
132: else:
133: return field
134:
135: def getText(nodelist):
136:
137: rc = ""
138: for node in nodelist:
139: if node.nodeType == node.TEXT_NODE:
140: rc = rc + node.data
141: return rc
142:
143: def getTextFromNode(nodename):
144: nodelist=nodename.childNodes
145: rc = ""
146: for node in nodelist:
147: if node.nodeType == node.TEXT_NODE:
148: rc = rc + node.data
149: return rc
150:
151:
152: def readFieldFromXML(meta_url,parent,field):
153: """lesespezifisches metadatum"""
154:
155: try:
156: dom=xml.dom.minidom.parse(meta_url)
157:
158: except:
159: try:
160: fh=urllib.urlopen(meta_url)
161: dom=xml.dom.minidom.parse(fh)
162: except:
163: return None
164: if not dom: return None
165:
166: parent=dom.getElementsByTagName(parent)
167: if not parent: return None
168:
169: field=parent[0].getElementsByTagName(field)
170:
171: if not field: return None
172:
173: return getText(field[0].childNodes)
174:
175:
176:
177: def urlopen(url):
178: """urlopen mit timeout"""
179: socket.setdefaulttimeout(2)
180: ret=urllib.urlopen(url)
181: socket.setdefaulttimeout(5)
182: return ret
183: # urlopener = urllib.URLopener()
184: #
185: # try:
186: # con = urlopener.open(url)
187: # return con
188: # except timeoutsocket.Timeout:
189: # return None
190:
191:
192:
193:
194:
195: def checkOnlyOneInGroup(object):
196: """check if object is a group and if it containt only one element it return this element"""
197: displayedObjects=object.ZopeFind(object,obj_metatypes=displayTypes)
198: if len(displayedObjects)==1: # nur ein Object dann redirect auf dieses Object
199:
200: return displayedObjects[0][1]
201: else:
202: return object
203:
204: def getSubCols(self,sortfield="weight",subColTypes= displayTypes):
205:
206:
207: ids=[]
208: displayedObjects=self.ZopeFind(self,obj_metatypes=subColTypes)
209:
210:
211: for entry in displayedObjects:
212:
213: object=entry[1]
214: ids.append(object)
215:
216: try:
217: sortfield=self.sortfield
218: except:
219: """nothing"""
220:
221: tmplist=[]
222: for x in ids:
223: if hasattr(x,sortfield):
224: try:
225:
226: x=int(x)
227: except:
228: """nothing"""
229: tmp=getattr(x,sortfield)
230: else:
231: tmp=10000000
232: tmplist.append((tmp,x))
233: tmplist.sort()
234:
235: return [x for (key,x) in tmplist]
236:
237: def ECHO_rerenderLinksMD(self,obj=None,types=['title','label']):
238: """Rerender all Links"""
239: ret=""
240:
241: if not obj:
242: obj = self
243:
244: entries=obj.ZopeFind(obj,obj_metatypes=['ECHO_resource'],search_sub=1)
245:
246: for entry in entries:
247: if entry[1].meta_type == 'ECHO_resource':
248: try:
249: entry[1].ECHO_getResourceMD(template="no")
250: if "title" in types:
251: entry[1].generate_title()
252: if "label" in types:
253: entry[1].generate_label()
254: ret+="OK:"+entry[0]+"-- "+entry[1].getTitle().decode('utf-8')+"-- "+entry[1].getTitle().decode('utf-8')+"<br>"
255: except:
256: ret+="Error:"+entry[0]+"<br>"
257:
258:
259:
260:
261: return "<html><body>"+ret+"Rerenderd all links to resources in: "+self.title+"</html></body>"
262:
263: def reloadMetaDataFromStorage(self,RESPONSE=None):
264: """copy metadata from the storage to ECHO"""
265: ret=""
266: resources=self.ZopeFind(self,obj_metatypes=['ECHO_resource'],search_sub=1)
267:
268: for resource in resources:
269: x=str(resource[1].copyIndex_meta2echo_resource())+"<br>"
270: ret+=x
271: #print x
272:
273:
274: if RESPONSE is not None:
275: #RESPONSE.redirect('./manage_main')
276: return "<html><body>"+ret+"</html></body>"
277:
278: return ret
279:
280: def getRDFDescription(self,linkURL,urn=None,nameDef=None,typeName=None):
281: """rdf"""
282:
283: ret=""
284: about="""<RDF:Description RDF:about="%s">"""
285: name="""<ECHONAVIGATION:name>%s</ECHONAVIGATION:name>"""
286: link="""<ECHONAVIGATION:link xlink:href="%s">%s</ECHONAVIGATION:link>"""
287: clickable="""<ECHONAVIGATION:linkClickable>%s</ECHONAVIGATION:linkClickable>"""
288: #link="""<ECHONAVIGATION:link RDF:about="%s"/>"""
289: type="""<ECHONAVIGATION:type>%s</ECHONAVIGATION:type>"""
290: #xlink="""<ECHONAVIGATION:xlink xlink:href="%s"/>"""
291: if not urn:
292: #urn="urn:"+re.sub('/',':',self.absolute_url())
293: urn=self.absolute_url()
294: about2=about%urn
295: if not nameDef:
296: if hasattr(self,'label') and not (self.label==""):
297: name2=name%self.label
298: elif not self.title=="":
299: name2=name%self.title
300: else:
301: name2=name%self.getId()
302:
303: name2=re.sub('&','&',name2)
304: else:
305: name2=name%nameDef
306:
307: linkURL=re.sub('http:','',linkURL)
308: linkURL2=re.sub('&','&',linkURL)
309: link2=link%(("http:"+linkURL2),("http:"+urllib.quote(linkURL)))
310: clickable2=clickable%"true"
311:
312: if not typeName:
313: type2=type%self.meta_type
314: else:
315: type2=type%typeName
316:
317: #ret=about2+"\n"+name2+"\n"+link2+"\n"+type2+"\n"+clickable2+"\n</RDF:Description>"
318: ret=about2+"\n"+name2+"\n"+type2+"\n"+clickable2+"\n</RDF:Description>"
319: return ret
320:
321: def getCopyrightsFromForm(self,argv):
322: medias={}
323: partners={}
324: copyrights={}
325:
326: copyrightsFinal=[]
327: for arg in argv.keys():
328:
329: if arg[0:5]=='media':
330: nm=int(arg[5:])
331: medias[nm]=argv[arg]
332: elif arg[0:5]=='partn':
333: nm=int(arg[5:])
334: partners[nm]=argv[arg]
335: elif arg[0:5]=='copyr':
336: nm=int(arg[5:])
337: copyrights[nm]=argv[arg]
338:
339:
340:
341: copyrightsList=[(medias[nm],partners[nm],copyrights[nm]) for nm in medias.keys()]
342: for copyright in copyrightsList:
343:
344: if copyright[2]=='institution0000':
345: copyrightsFinal.append((copyright[0],copyright[1],self.getPartnerCopyright(copyright[1],'')))
346: else:
347: if not copyright[0]=='':
348: copyrightsFinal.append(copyright)
349:
350:
351: return copyrightsFinal
352:
353: #List of different types for the graphical linking viewer
354: viewClassificationListMaster=['view point','area']
355:
356:
357: def checkDiffs(self,metadict):
358: """check differences"""
359:
360:
361:
362:
363: def NoneToEmpty(obj):
364: if obj:
365: return obj
366: else:
367: return ""
368:
369:
370:
371: diffs={}
372:
373: tags=self.findTagsFromMapping(self.contentType)
374: self.referencetypes=tags[2]
375: self.fields=tags[3]
376:
377:
378: for field in tags[1]:
379: try:
380: if (NoneToEmpty(self.getFieldValue(self.getFieldTag(tags,field)))==metadict[self.getFieldTag(tags,field)]):
381: diffs[self.getFieldTag(tags,field)]=1
382: else:
383:
384: diffs[self.getFieldTag(tags,field)]=0
385: except:
386: diffs[self.getFieldTag(tags,field)]=0
387:
388: return diffs
389:
390:
391:
392:
393: def sendFile(self, filename, type):
394: """sends an object or a local file (in the product) as response"""
395: paths = filename.split('/')
396: object = self
397: # look for an object called filename
398: for path in paths:
399: if hasattr(object, path):
400: object = getattr(object, path)
401: else:
402: object = None
403: break
404: if object:
405: # if the object exists then send it
406: return object.index_html(self.REQUEST.REQUEST, self.REQUEST.RESPONSE)
407: else:
408: # send a local file with the given content-type
409: fn = os.path.join(package_home(globals()), filename)
410: self.REQUEST.RESPONSE.setHeader("Content-Type", type)
411: self.REQUEST.RESPONSE.write(file(fn).read())
412: return
413:
414: class BrowserCheck:
415: """check the browsers request to find out the browser type"""
416:
417: def __init__(self, zope):
418: """initialisiere"""
419: self.ua = zope.REQUEST.get_header("HTTP_USER_AGENT")
420: self.isN4 = (string.find(self.ua, 'Mozilla/4.') > -1) and (string.find(self.ua, 'MSIE') < 0)
421: self.isIE = string.find(self.ua, 'MSIE') > -1
422: self.nav = self.ua[string.find(self.ua, '('):]
423: ie = string.split(self.nav, "; ")[1]
424: if string.find(ie, "MSIE") > -1:
425: self.versIE = string.split(ie, " ")[1]
426: self.isMac = string.find(self.ua, 'Macintosh') > -1
427: self.isWin = string.find(self.ua, 'Windows') > -1
428: self.isIEWin = self.isIE and self.isWin
429: self.isIEMac = self.isIE and self.isMac
430:
431:
432:
433: def writeMetadata(url,metadict,project=None,startpage=None,xslt=None,thumbtemplate=None,topbar=None,digiLibTemplate=None,xmlfrag=None,digiliburlprefix=None):
434: """Einlesen der Metadaten und und erstellen des geaenderten XML file"""
435:
436: def updateTextToolNode(tag,value):
437: #print dom,tag,value
438: metanode=dom.getElementsByTagName('texttool')[0]
439: try:
440: nodeOld=metanode.getElementsByTagName(tag)
441: except:
442: nodeOld=None
443:
444: if nodeOld:
445: metanode.removeChild(nodeOld[0]).unlink()
446:
447: node=dom.createElement(tag)
448: nodetext=dom.createTextNode(value)
449: node.appendChild(nodetext)
450: metanode.appendChild(node)
451:
452: if xmlfrag:
453: geturl="""<?xml version="1.0" ?>
454: <resource type="MPIWG">
455: <meta>
456: <bib type="Book">
457: </bib>
458: </meta>
459: </resource>"""
460: dom=xml.dom.minidom.parseString(geturl)
461: else:
462: try:
463: geturl=""
464: for line in ECHO_helpers.urlopen(url).readlines():
465: geturl=geturl+line
466:
467:
468: except:
469: return (None,"Cannot open: "+url)
470:
471: try:
472: dom=xml.dom.minidom.parseString(geturl)
473: except:
474: return (None,"Cannot parse: "+url+"<br>"+geturl)
475:
476:
477:
478: metanodes=dom.getElementsByTagName('bib')
479:
480: if not metanodes:
481: metanodes=dom.getElementsByTagName('archimedes')
482:
483: metanode=metanodes[0]
484:
485: for metaData in metadict.keys():
486:
487: try:
488: nodeOld=metanode.getElementsByTagName(metaData)
489: except:
490: nodeOld=None
491:
492: if nodeOld:
493: metanode.removeChild(nodeOld[0]).unlink()
494: else:
495: # try also old writing rule - instead of _:
496: try:
497: nodeOld=metanode.getElementsByTagName(re.sub('_','-',metaData))
498: except:
499: nodeOld=None
500:
501: if nodeOld:
502: metanode.removeChild(nodeOld[0]).unlink()
503:
504: metanodeneu=dom.createElement(metaData)
505: metanodetext=dom.createTextNode(metadict[metaData])
506: #try:
507: #metanodetext=dom.createTextNode(unicode(metadict[metaData],"utf-8"))
508: #except:
509: #metanodetext=dom.createTextNode(metadict[metaData].encode('utf-8'))
510: metanodeneu.appendChild(metanodetext)
511: metanode.appendChild(metanodeneu)
512:
513:
514:
515:
516:
517: if project:
518: updateTextToolNode('project',project)
519:
520: if startpage:
521: updateTextToolNode('startpage',startpage)
522:
523: if topbar:
524: updateTextToolNode('toptemplate',topbar)
525:
526: if thumbtemplate:
527: updateTextToolNode('thumbtemplate',thumbtemplate)
528:
529: if xslt:
530: updateTextToolNode('xslt',xslt)
531:
532:
533: if digiliburlprefix:
534: updateTextToolNode('digiliburlprefix',digiliburlprefix)
535:
536: try:
537: return dom.toxml().encode('utf-8')
538: except:
539: return dom.toxml('utf-8')
540:
541:
542:
543: def readMetadata(url):
544: """Methode zum Auslesen der Metadateninformation zu einer Resource
545: Vorerst noch Typ bib"""
546:
547: metadict={}
548: try:
549: geturl=""
550: for line in ECHO_helpers.urlopen(url).readlines():
551: geturl=geturl+line
552:
553:
554: except:
555: return (None,"Cannot open: "+url)
556:
557: try:
558: dom=xml.dom.minidom.parseString(geturl)
559: except:
560: return (None,"Cannot parse: "+url+"<br>"+geturl)
561:
562: metanode=dom.getElementsByTagName('bib')
563: metadict['bib_type']='Book'
564: if len(metanode)==0:
565: metanode=dom.getElementsByTagName('archimedes')
566: metadict['bib_type']='Archimedes'
567:
568:
569: if not len(metanode)==0:
570: metacontent=metanode[0].childNodes
571:
572: try:
573: metadict['bib_type']=getText(dom.getElementsByTagName('bib')[0].attributes['type'].childNodes)
574: except:
575: """nothing"""
576:
577: for node in metacontent:
578: try:
579: #print urllib.unquote(getText(node.childNodes)),getText(node.childNodes)
580: metadict[re.sub('-','_',node.tagName.lower())]=urllib.unquote(getText(node.childNodes))
581: except:
582: """nothing"""
583:
584:
585: return metadict,""
586:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>