--- cdli/cdli_files.py 2007/08/31 14:22:52 1.79 +++ cdli/cdli_files.py 2008/10/07 06:53:57 1.93 @@ -21,13 +21,23 @@ from ZPublisher.HTTPRequest import HTTPR from ZPublisher.HTTPResponse import HTTPResponse from ZPublisher.BaseRequest import RequestContainer import threading -from BTrees.OOBTree import OOBTree +from BTrees.OOBTree import OOBTree, OOTreeSet import logging import transaction import copy import codecs import sys - +from BTrees.IOBTree import IOBTree +import cdliSplitter +from sets import Set +import md5 +from DownloadBasket import DownloadBasketFinallyThread + +def makelist(mySet): + x = list(mySet) + x.sort() + return x + def unicodify(s): """decode str (utf-8 or latin-1 representation) into unicode object""" if not s: @@ -50,7 +60,72 @@ def utf8ify(s): else: return s.encode('utf-8') +def formatAtfHtml(l): + """escape special ATF characters for HTML""" + if not l: + return "" + + # replace & + l = l.replace('&','&') + # replace angular brackets + l = l.replace('<','<') + l = l.replace('>','>') + return l + +def formatAtfLineHtml(l, nolemma=True): + """format ATF line for HTML""" + if not l: + return "" + + if nolemma: + # ignore lemma lines + if l.lstrip().startswith('#lem:'): + return "" + + return formatAtfHtml(l) + + +def formatAtfFullLineNum(txt, nolemma=True): + """format full line numbers in ATF text""" + # surface codes + surfaces = {'@obverse':'obv', + '@reverse':'rev', + '@surface':'surface', + '@edge':'edge', + '@left':'left', + '@right':'right', + '@top':'top', + '@bottom':'bottom', + '@face':'face', + '@seal':'seal'} + + if not txt: + return "" + + ret = [] + surf = "" + col = "" + for line in txt.splitlines(): + line = unicodify(line) + if line and line[0] == '@': + # surface or column + words = line.split(' ') + if words[0] in surfaces: + surf = line.replace(words[0],surfaces[words[0]]).strip() + + elif words[0] == '@column': + col = ' '.join(words[1:]) + + elif line and line[0] in '123456789': + # ordinary line -> add line number + line = "%s:%s:%s"%(surf,col,line) + + ret.append(line) + + return '\n'.join(ret) + + def generateXMLReturn(hash): """erzeugt das xml file als returnwert fuer uploadATFRPC""" @@ -76,14 +151,6 @@ def generateXMLReturn(hash): return ret - - - - - - - - def unique(s): """Return a list of the elements in s, but without duplicates. @@ -163,7 +230,7 @@ class BasketContent(SimpleItem): def getContent(self): """get content""" - + return self.contentList def setContent(self,content): @@ -253,7 +320,7 @@ class uploadATFfinallyThread(Thread): self.result+="

Start processing

" #shall I only upload the changed files? - logging.info("uploadATFfinally procedure: %s"%procedure) + logging.debug("uploadATFfinally procedure: %s"%procedure) if procedure=="uploadchanged": changed=[x[0] for x in SESSION.get('changed',[])] uploadFns=changed+SESSION.get('newPs',[]) @@ -267,48 +334,48 @@ class uploadATFfinallyThread(Thread): #or maybe nothing elif procedure=="noupload": - return True + return True else: uploadFns=[] #do first the changed files i=0 for fn in uploadFns: + logging.debug("uploadATFfinally uploadFn=%s"%fn) i+=1 founds=ctx2.CDLICatalog.search({'title':fn}) if len(founds)>0: SESSION['author']=str(username) self.result="

Changing : %s"%fn+self.result + logging.debug("uploadatffinallythread changing:%s"%fn+self.result) founds[0].getObject().manage_addCDLIFileObject('',comment,SESSION['author'],file=os.path.join(SESSION['tmpdir'],fn),from_tmp=True) - if i==200: - i=0 - transaction.get().commit() - logging.info("changing: do commit") + if i%200==0: + transaction.get().commit() + logging.debug("uploadatffinallythread changing: do commit") transaction.get().commit() - logging.info("changing: last commit") + logging.debug("uploadatffinallythread changing: last commit") #now add the new files newPs=SESSION['newPs'] if len(newPs)>0: tmpDir=SESSION['tmpdir'] - logging.info("adding start") + logging.debug("uploadatffinallythread adding start") self.result="

Adding files

"+self.result #TODO: make this configurable, at the moment base folder for the files has to be cdli_main - ctx2.importFiles(comment=comment,author=str(username) ,folderName=tmpDir, files=newPs,ext=self) - logging.info("adding finished") - + logging.debug("uploadatffinallythread adding finished") #unlock locked files? if unlock: - logging.info("unlocking start") + logging.debug("uploadatffinallythread unlocking start") self.result="

Unlock files

"+self.result unlockFns=[] for x in os.listdir(SESSION['tmpdir']): if not x in SESSION['errors']: unlockFns.append(x) - logging.info("unlocking have now what to unlock") + + logging.debug("unlocking have now what to unlock") for fn in unlockFns: #logging.info("will unlock: %s"%fn) @@ -317,30 +384,31 @@ class uploadATFfinallyThread(Thread): if len(founds)>0: #logging.info("unlock: %s"%founds[0].getObject().getId()) SESSION['author']=str(username) - founds[0].getObject().lockedBy="" - logging.info("unlocking done") + + logging.debug("uploadatffinallythread unlocking done") #if a basketname is given, add files to the basket if not (basketname ==''): - logging.info("add to basket %s"%basketname) + logging.debug("uploadatffinallythread add to basket %s"%basketname) self.result="

Add to basket

"+self.result basketId=ctx2.basketContainer.getBasketIdfromName(basketname) if not basketId: # create new basket - logging.info("create basket %s"%basketname) + logging.debug("uploadatffinallythread create basket %s"%basketname) self.result="

Create a new basket

"+self.result ob=ctx2.basketContainer.addBasket(basketname) basketId=ob.getId() basket=getattr(ctx2.basketContainer,str(basketId)) ids=os.listdir(SESSION['tmpdir']) + #logging.debug("should add:"+repr(ids)) basket.addObjects(ids,deleteOld=True,username=str(username)) + logging.debug("uploadatffinallythread uploadfinally done") + if RESPONSE is not None: RESPONSE.redirect(self.aq_parent.absolute_url()) - - logging.info("uploadfinally done") return True class tmpStore(SimpleItem): @@ -572,7 +640,7 @@ class CDLIBasketContainer(OrderedFolder) ret+=str(object[0].getData())+"\n" elif current=="yes": #search current object - logging.info("crrent: %s"%object[1].getId().split(".")[0]) + #logging.debug("current: %s"%object[1].getId().split(".")[0]) founds=self.CDLICatalog.search({'title':object[1].getId().split(".")[0]}) if len(founds)>0: ret+=str(founds[0].getObject().getLastVersion().getData())+"\n" @@ -864,10 +932,13 @@ class CDLIBasketContainer(OrderedFolder) """store it""" if not ids: ids=self.REQUEST.SESSION['fileIds'] - - if type(ids) is not ListType: + + if (type(ids) is not ListType) and (not isinstance(ids,Set)): ids=[ids] + if isinstance(ids,Set): + ids=list(ids) + if (submit.lower()=="store in new basket") or (submit.lower()=="new basket"): basketRet=self.addBasket(newBasketName) self.setActiveBasket(basketRet.getId()) @@ -881,7 +952,7 @@ class CDLIBasketContainer(OrderedFolder) if fromFileList: - return self.cdli_main.findObjectsFromList(list=self.REQUEST.SESSION['fileIds'],basketName=basket.title,numberOfObjects=added) + return self.cdli_main.findObjectsFromList(list=ids,basketName=basket.title,numberOfObjects=added) if RESPONSE: @@ -912,7 +983,7 @@ class CDLIBasket(Folder,CatalogAware): def searchInBasket(self,indexName,searchStr,regExp=False): """searchInBasket""" - lst=self.searchInLineIndexDocs(indexName,searchStr,uniq=True,regExp=regExp) + lst=self.searchInLineIndexDocs(indexName,searchStr,uniq=True,regExp=regExp) #TODO: fix this ret={} lv=self.getLastVersion() @@ -960,21 +1031,26 @@ class CDLIBasket(Folder,CatalogAware): def isActual(self,obj): """teste ob im basket die aktuelle version ist""" - actualNo=obj[1].getLastVersion().getVersionNumber() - storedNo=obj[0].getVersionNumber() - - founds=self.CDLICatalog.search({'title':obj[0].getId()}) - if len(founds)>0: - actualNo=founds[0].getObject().getLastVersion().getVersionNumber() + try: + #logging.debug("isActual:"+repr(obj)) + actualNo=obj[1].getLastVersion().getVersionNumber() + storedNo=obj[0].getVersionNumber() + + + #actualNo=self.getFileObjectLastVersion(obj.getId()).getVersionNumber() + + #if len(founds)>0 and founds[0].getObject().aq_parent.getId()==".trash": + # return False, -1 - if len(founds)>0 and founds[0].getObject().aq_parent.getId()==".trash": + if actualNo==storedNo: + return True , 0 + else: + return False, actualNo + except: + logging.error( """is actual: %s (%s %s)"""%(repr(obj),sys.exc_info()[0],sys.exc_info()[1])) + return False, -1 - - if actualNo==storedNo: - return True , 0 - else: - return False, actualNo - + def history(self): """history""" @@ -1117,6 +1193,10 @@ class CDLIBasket(Folder,CatalogAware): def addObjects(self,ids,deleteOld=None,username=None): """generate a new version of the basket with objects added""" + + def swap(x): + return (x[1],x[0]) + logging.info("add to basket (%s)"%(self.getId())) lastVersion=self.getLastVersion() @@ -1128,27 +1208,42 @@ class CDLIBasket(Folder,CatalogAware): if deleteOld: oldContent=[] - newContent=[] added=0 - for id in ids: - try: - founds=self.CDLICatalog.search({'title':id}) - except: - founds=[] - - for found in founds: - if found.getObject() not in oldContent: - #TODO: was passiert wenn, man eine Object dazufŸgt, das schon da ist aber eine neuere version - newContent.append((found.getObject().getLastVersion(),found.getObject())) - added+=1 +# for id in ids: +# logging.debug("adding:"+id) +# try: +# founds=self.CDLICatalog.search({'title':id}) +# except: +# founds=[] +# +# for found in founds: +# if found.getObject() not in oldContent: +# #TODO: was passiert wenn, man eine Object dazufŸgt, das schon da ist aber eine neuere version +# newContent.append((found.getObject().getLastVersion(),found.getObject())) +# added+=1 - content=oldContent+newContent + hash = md5.new(repr(makelist(ids))).hexdigest() # erzeuge hash als identification + #logging.debug("JJJJJJJ:"+repr(self.makelist(ids))) + + + if hasattr(self.cdliRoot,'v_tmpStore') and self.cdliRoot.v_tmpStore.has_key("hash"): + logging.debug("from store!") + newContent=Set(map(swap,self.cdliRoot.v_tmpStore[hash])) + + else: + logging.debug("not from store!") + newContent=Set([(self.getFileObjectLastVersion(x),self.getFileObject(x)) for x in ids]) + + + content=Set(oldContent).union(newContent) + added = len(content)-len(oldContent) if not username: user=self.getActualUserName() else: user = username - - ob=manage_addCDLIBasketVersion(self,user,comment="",basketContent=content) + + #logging.debug("content:"+repr(list(content))) + ob=manage_addCDLIBasketVersion(self,user,comment="",basketContent=list(content)) logging.info("add to basket (%s) done"%(self.getId())) return added @@ -1160,7 +1255,7 @@ class CDLIBasket(Folder,CatalogAware): lv=self.getLastVersion() for obj in lv.content.getContent(): - logging.info("XXXXXXXXXX %s"%repr(obj)) + #logging.info("XXXXXXXXXX %s"%repr(obj)) ret.append((obj[1].getId(),obj[0].versionNumber)) return ret @@ -1283,25 +1378,19 @@ class CDLIBasketVersion(Implicit,Persist return self.downloadObjectsAsOneFileFinally(lock=lock,procedure=procedure,REQUEST=REQUEST,current="no") - def downloadObjectsAsOneFileFinally(self,lock=None,procedure=None,REQUEST=None,current="no"): + def downloadObjectsAsOneFileFinally(self,lock=None,procedure=None,REQUEST=None,current="no",repeat=None): """print do the download""" - + + ret="" lockedObjects={} - self.temp_folder.downloadCounterBaskets+=1 - self._p_changed=1 - transaction.get().commit() + if lock: - + logging.debug("------lock:"+repr(lock)) if str(self.REQUEST['AUTHENTICATED_USER'])=='Anonymous User': - self.temp_folder.downloadCounterBaskets-=1 - self._p_changed=1 - transaction.get().commit() - self.temp_folder.downloadCounterBaskets-=1 - self._p_changed=1 - transaction.get().commit() + return "please login first" #check if a locked object exist in the basket. @@ -1319,47 +1408,123 @@ class CDLIBasketVersion(Implicit,Persist self.REQUEST.SESSION['lockedObjects']=lockedObjects pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','lockedObjects.zpt')).__of__(self) - self.temp_folder.downloadCounterBaskets-=1 - self._p_changed=1 - transaction.get().commit() - + return pt() elif not procedure: #keine fails gesperrt dann alle donwloaden procedure="downloadAll" + + + threadName=repeat + if not threadName or threadName=="": + thread=DownloadBasketFinallyThread() + threadName=thread.getName()[0:] + if (not hasattr(self,'_v_downloadBasket')): + self._v_downloadBasket={} - for object in self.content.getContent(): - - if (procedure=="downloadAll") or (object[1].lockedBy=='') or (object[1].lockedBy==self.REQUEST['AUTHENTICATED_USER']): - if current=="no": #version as they are in the basket - ret+=str(object[0].getData())+"\n" - elif current=="yes": - #search current object - founds=self.CDLICatalog.search({'title':object[1].getId().split(".")[0]}) - if len(founds)>0: - ret+=str(founds[0].getObject().getLastVersion().getData())+"\n" - - if lock and object[1].lockedBy=='': - object[1].lockedBy=self.REQUEST['AUTHENTICATED_USER'] - basket_name=self.aq_parent.title+"_V"+self.getId() + + self._v_downloadBasket[threadName]=thread + logging.debug("dwonloadfinally:"+repr(self)) + basketID=self.aq_parent.aq_parent.getId() + versionNumber=self.aq_parent.getId() + + if lock: + logging.debug("-----start locking") + for object in self.content.getContent(): + if object[1].lockedBy =='': + object[1].lockedBy=self.REQUEST['AUTHENTICATED_USER'] + logging.debug("-----finished locking") + + #obj.lockedBy=user + self._v_downloadBasket[threadName].set(lock,procedure,self.REQUEST['AUTHENTICATED_USER'],current,basketID,versionNumber) + + self._v_downloadBasket[threadName].start() + + + + wait_template=self.aq_parent.ZopeFind(self.aq_parent,obj_ids=['wait_template']) + + if wait_template: + return wait_template[0][1]() + pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','downloadBasketWait.zpt')).__of__(self) + + return pt(txt=self.absolute_url()+'/downloadObjectsAsOneFileFinally',threadName=threadName, + counter=self._v_downloadBasket[threadName].getCounter(), + number=self._v_downloadBasket[threadName].getNumberOfFiles()) + #_v_xmltrans.run() - #write basketname to header of atf file - ret="#basket: %s\n"%basket_name+ret + else: + #recover thread, if lost + if not hasattr(self,'_v_downloadBasket'): + self._v_downloadBasket={} + if not self._v_downloadBasket.get(threadName,None): + for thread in threading.enumerate(): + if threadName == thread.getName(): + self._v_downloadBasket[threadName]=thread + + if self._v_downloadBasket.get(threadName,None) and (self._v_downloadBasket[threadName] is not None) and (not self._v_downloadBasket[threadName].end) : - self.temp_folder.downloadCounterBaskets-=1 - self._p_changed=1 - transaction.get().commit() + wait_template=self.aq_parent.ZopeFind(self.aq_parent,obj_ids=['wait_template']) + if wait_template: + return wait_template[0][1]() + + pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','downloadBasketWait.zpt')).__of__(self) + return pt(txt=self.absolute_url()+'/downloadObjectsAsOneFileFinally',threadName=threadName, + counter=self._v_downloadBasket[threadName].getCounter(), + number=self._v_downloadBasket[threadName].getNumberOfFiles()) + else: + + + logging.debug("FINISHED") + if not self._v_downloadBasket.get(threadName,None): + for thread in threading.enumerate(): + if threadName == thread.getName(): + self._v_downloadBasket[threadName]=thread + + #files = self._v_downloadBasket[threadName].result + files=self.basketContainer.resultHash[threadName] + lockedFiles=self.basketContainer.resultLockedHash[threadName] + + # fh=file("/var/tmp/test") + #ret =fh.read() + + if (not isinstance(self.aq_parent,CDLIBasket)): + basket_name=self.aq_parent.aq_parent.title+"_V"+self.getId() + else: + basket_name=self.aq_parent.title+"_V"+self.getId() - self.REQUEST.RESPONSE.setHeader("Content-Disposition","""attachement; filename="%s.atf" """%basket_name) - self.REQUEST.RESPONSE.setHeader("Content-Type","application/octet-stream") - length=len(ret) - self.REQUEST.RESPONSE.setHeader("Content-Length",length) - self.REQUEST.RESPONSE.write(ret) - return True + + #write basketname to header of atf file + + + self.REQUEST.RESPONSE.setHeader("Content-Disposition","""attachement; filename="%s.atf" """%basket_name) + self.REQUEST.RESPONSE.setHeader("Content-Type","application/octet-stream") + #length=len(ret) + #self.REQUEST.RESPONSE.setHeader("Content-Length",length) + + ret="#basket: %s\n"%basket_name + self.REQUEST.RESPONSE.write(ret) + + for fileName in files: + try: + self.REQUEST.RESPONSE.write(file(fileName).read()) + except: + logging.error("downloadasonefile: cannot read %s"%fileName) + + + self.REQUEST.RESPONSE.write("\n# locked files\n") + for fileName in lockedFiles: + self.REQUEST.RESPONSE.write("# %s by %s\n"%fileName) + + self.REQUEST.RESPONSE.write("# locked files end\n") + + del self.basketContainer.resultHash[threadName] + del self.basketContainer.resultLockedHash[threadName] + def numberOfItems(self): """return anzahl der elemente im basket""" return self.content.numberOfItems() @@ -1450,45 +1615,47 @@ class CDLIFileObject(CatalogAware,extVer security=ClassSecurityInfo() - - security.declarePublic('makeThisVersionCurrent') - security.declareProtected('manage','index_html') + + security.declarePublic('view') + view = PageTemplateFile('zpt/viewCDLIFile.zpt', globals()) + + security.declarePublic('editATF') + editATF = PageTemplateFile('zpt/editATFFile.zpt', globals()) + def PrincipiaSearchSource(self): """Return cataloguable key for ourselves.""" return str(self) + def setAuthor(self, author): + """change the author""" + self.author = author + def makeThisVersionCurrent_html(self): - """form for making this version current""" + """form for mthis version current""" pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','makeThisVersionCurrent.zpt')).__of__(self) return pt() + + security.declarePublic('makeThisVersionCurrent') def makeThisVersionCurrent(self,comment,author,RESPONSE=None): """copy this version to current""" parent=self.aq_parent - - - newversion=parent.manage_addCDLIFileObject('',comment,author) - newversion.manage_upload(self.getData()) + parent.manage_addVersionedFileObject(id=None,vC=comment,author=author,file=self.getData(),RESPONSE=RESPONSE) + #newversion=parent.manage_addCDLIFileObject('',comment,author) + #newversion.manage_upload(self.getData()) - if RESPONSE is not None: - RESPONSE.redirect(self.aq_parent.absolute_url()+'/history') - + #if RESPONSE is not None: + # RESPONSE.redirect(self.aq_parent.absolute_url()+'/history') return True - security.declarePublic('view') - def getFormattedData(self): """fromat text""" data=self.getData() # return re.sub("\s\#lem"," #lem",data) #remove return vor #lem return re.sub("#lem"," #lem",data) #remove return vor #lem - def view(self): - """view file""" - pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','viewCDLIFile.zpt')).__of__(self) - return pt() security.declarePublic('getPNumber') def getPNumber(self): @@ -1517,13 +1684,14 @@ class CDLIFileObject(CatalogAware,extVer return txt.group(2) except: return "ERROR" + manage_addCDLIFileObjectForm=DTMLFile('dtml/fileAdd', globals(),Kind='CDLIFileObject',kind='CDLIFileObject', version='1') -def manage_addCDLIFileObject(self,id,vC='',author='', file='',title='',precondition='', content_type='', +def manage_addCDLIFileObject(self,id,vC='',author='', file='',title='',versionNumber=0, + precondition='', content_type='', from_tmp=False,REQUEST=None): """Add a new File object. - Creates a new File object 'id' with the contents of 'file'""" id=str(id) @@ -1536,48 +1704,57 @@ def manage_addCDLIFileObject(self,id,vC= self=self.this() # First, we create the file without data: - self._setObject(id, CDLIFileObject(id,title,'',content_type, precondition)) - self._getOb(id).versionComment=str(vC) - self._getOb(id).time=time.localtime() - - setattr(self._getOb(id),'author',author) - + self._setObject(id, CDLIFileObject(id,title,versionNumber=versionNumber,versionComment=vC,time=time.localtime(),author=author)) + fob = self._getOb(id) # Now we "upload" the data. By doing this in two steps, we # can use a database trick to make the upload more efficient. if file and not from_tmp: - self._getOb(id).manage_upload(file) + fob.manage_upload(file) elif file and from_tmp: - self._getOb(id).manage_upload_from_tmp(file) + fob.manage_file_upload(file) # manage_upload_from_tmp doesn't exist in ExtFile2 + # fob.manage_upload_from_tmp(file) # manage_upload_from_tmp doesn't exist in ExtFile2 if content_type: - self._getOb(id).content_type=content_type + fob.content_type=content_type + #logging.debug("manage_add: lastversion=%s"%self.getData()) + logging.debug("reindex1: %s in %s"%(repr(self),repr(self.default_catalog))) self.reindex_object() - self._getOb(id).reindex_object() + #logging.debug("manage_add: fob_data=%s"%fob.getData()) + logging.debug("reindex2: %s in %s"%(repr(fob), repr(fob.default_catalog))) + fob.index_object() + self.CDLIRoot.updateOrAddToFileBTree(ob) if REQUEST is not None: REQUEST['RESPONSE'].redirect(self.absolute_url()+'/manage_main') + class CDLIFile(extVersionedFile,CatalogAware): """CDLI file""" security=ClassSecurityInfo() meta_type="CDLI file" + content_meta_type = ["CDLI File Object"] + default_catalog='CDLICatalog' + security.declareProtected('manage','index_html') - #security.declarePublic('history') + def getLastVersionData(self): """get last version data""" - return self.getLastVersion().getData() + return self.getData() def getLastVersionFormattedData(self): """get last version data""" - return self.getLastVersion().getFormattedData() + return self.getContentObject().getFormattedData() + + def getTextId(self): + """returns P-number of text""" + # assuming that its the beginning of the title + return self.title[:7] #security.declarePublic('history') - - def history(self): """history""" @@ -1622,6 +1799,12 @@ class CDLIFile(extVersionedFile,CatalogA #return [x.getObject() for x in context.CDLIBasketCatalog.search({'getFileNamesInLastVersion':self.getId()})] + def _newContentObject(self, id, title='', versionNumber=0, versionComment=None, time=None, author=None): + """factory for content objects. to be overridden in derived classes.""" + logging.debug("_newContentObject(CDLI)") + return CDLIFileObject(id,title,versionNumber=versionNumber,versionComment=versionComment,time=time,author=author) + + def addCDLIFileObjectForm(self): """add a new version""" @@ -1647,58 +1830,30 @@ class CDLIFile(extVersionedFile,CatalogA except: pass - - if changeName=="yes": - filename=file.filename - self.title=filename[max(filename.rfind('/'), - filename.rfind('\\'), - filename.rfind(':'), - )+1:] - - - if not newName=='': - self.title=newName[0:] - - + ob = self.addContentObject(id, vC, author, file, title, changeName=changeName, newName=newName, from_tmp=from_tmp, + precondition=precondition, content_type=content_type) - - positionVersionNum=getattr(self,'positionVersionNum','front') - - if positionVersionNum=='front': - id="V%i"%self.getVersion()+"_"+self.title - else: - tmp=os.path.splitext(self.title) - if len(tmp)>1: - id=tmp[0]+"_V%i"%self.getVersion()+tmp[1] - else: - id=tmp[0]+"_V%i"%self.getVersion() - - - manage_addCDLIFileObject(self,id,vC,author,file,id,precondition, content_type,from_tmp=from_tmp) - #objs=self.ZopeFind(self,obj_ids=[id])[0][1].setVersionNumber(int(self.getVersion())) - objs=getattr(self,id).setVersionNumber(int(self.getVersion())) try: - #FIXME: wozu ist das gut? - self.REQUEST.SESSION['objID_parent']=self.getId() + #FIXME: wozu ist das gut? + self.REQUEST.SESSION['objID_parent']=self.getId() except: - pass + pass + #self.cdliRoot.updateOrAddToFileBTree(self)# now update the object in the cache + + if RESPONSE: - - obj=self.ZopeFind(self,obj_ids=[id])[0][1] - if obj.getSize()==0: - self.REQUEST.SESSION['objID']=obj.getId() + if ob.getSize()==0: + self.REQUEST.SESSION['objID']=ob.getId() pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','errorUploadFile')).__of__(self) return pt() - else: if come_from and (come_from!=""): - RESPONSE.redirect(come_from+"?change="+self.getId()) + RESPONSE.redirect(come_from+"?change="+self.getId()) else: RESPONSE.redirect(self.REQUEST['URL2']+'?uploaded=%s'%self.title) - else: - return self.ZopeFind(self,obj_ids=[id])[0][1] + return ob def manage_addCDLIFileForm(self): @@ -1712,8 +1867,6 @@ def manage_addCDLIFile(self,id,title,loc tryToggle=True tryCount=0 - - self._setObject(id,newObj) getattr(self,id).reindex_object() @@ -1721,6 +1874,7 @@ def manage_addCDLIFile(self,id,title,loc if RESPONSE is not None: RESPONSE.redirect('manage_main') + def checkUTF8(data): """check utf 8""" try: @@ -1751,6 +1905,7 @@ def splitatf(fh,dir=None,ext=None): nf=None i=0 + #ROC: why split \n first and then \r??? if (type(fh) is StringType) or (type(fh) is UnicodeType): iter=fh.split("\n") else: @@ -1804,34 +1959,37 @@ class CDLIFileFolder(extVersionedFileFol security=ClassSecurityInfo() meta_type="CDLI Folder" - filesMetaType=['CDLI file'] - folderMetaType=['CDLI Folder'] - default_catalog='CDLICatalog' - defaultFileCatalog=default_catalog #wenn dieses definiert ist, wird beim hinzufŸgen einer neuen version eines files dieser catalog neuiniziert + file_meta_type=['CDLI file'] + folder_meta_type=['CDLI Folder'] + + file_catalog='CDLICatalog' + #downloadCounter=0 # counts how many download for all files currently run, be mehr als 5 wird verweigert. tmpStore2={} + + def _newVersionedFile(self, id, title='', lockedBy=None, author=None): + """factory for versioned files. to be overridden in derived classes.""" + logging.debug("_newVersionedFile(CDLI)") + return CDLIFile(id, title, lockedBy=lockedBy, author=author) + def setTemp(self,name,value): """set tmp""" setattr(self,name,value) + deleteFileForm = PageTemplateFile("zpt/doDeleteFile", globals()) - def delete(self,ids): - """delete this file, i.e. move into a trash folder""" - - found=self.ZopeFind(self,obj_ids=['.trash']) - - if len(found)<1: - manage_addCDLIFileFolder(self, '.trash',title="Trash") - trash=self._getOb('.trash') - else: - trash=found[0][1] - + def delete(self,ids,REQUEST=None): + """delete these files""" if type(ids) is not ListType: ids=[ids] - cut=self.manage_cutObjects(ids) - trash.manage_pasteObjects(cut) + + self.manage_delObjects(ids) + if REQUEST is not None: + return self.index_html() + + def getVersionNumbersFromIds(self,ids): """get the numbers of the current versions of documents described by their ids""" @@ -1841,38 +1999,39 @@ class CDLIFileFolder(extVersionedFileFol founds=self.CDLICatalog.search({'title':searchStr}) for found in founds: - lastVersion=found.getObject().getLastVersion() + lastVersion=found.getObject().getContentObject() ret.append((found.getId,lastVersion)) return ret def getFile(self,fn): """get the content of the file fn""" - founds=self.CDLICatalog.search({'title':fn}) - if not founds: - return "" + logging.debug("getFile: %s"%repr(fn)) + if not self.hasObject(fn): + # search deeper + founds=getattr(self, self.file_catalog).search({'textid':fn}) + if founds: + obj=founds[0].getObject().getContentObject() + else: + return "" else: - obj=founds[0].getObject().getLastVersion() + obj = self[fn].getContentObject() - return obj.getData()[0:] + return obj.getData()[0:] + def checkCatalog(self,fn): """check if fn is in the catalog""" #TODO add checkCatalog - def findObjectsFromListWithVersion(self,list,author=None): """find objects from a list with versions @param list: list of tuples (cdliFile,version) """ - - - #self.REQUEST.SESSION['fileIds']=list#store fieldIds in session for further usage #self.REQUEST.SESSION['searchList']=self.REQUEST.SESSION['fileIds'] - pt=getattr(self,'filelistVersioned.html') return pt(search=list,author=author) @@ -1885,10 +2044,20 @@ class CDLIFileFolder(extVersionedFileFol return ret - def findObjectsFromList(self,enterList=None,display=False,start=None,upload=None,list=None,basketName=None,numberOfObjects=None,RESPONSE=None): + def expandFile(self,fileId,fileTree): + """wildcard in fileID suche alle Treffer""" + founds=self.CDLICatalog({'title':fileId}) + for found in founds: + fileTree.add(found.getId) + logging.debug("ADDD:"+found.getId) + + def findObjectsFromList(self,enterList=None,display=False,start=None,upload=None,list=None,basketName=None,numberOfObjects=None,RESPONSE=None,REQUEST=None,returnHash=False,hash=None): """findObjectsFromList (, TAB oder LINE separated)""" + logging.debug("start: findObjectsFromList") + #logging.debug("start: findObjectsFromList"+repr(list)) + if upload: # list from file upload txt=upload.read() @@ -1919,32 +2088,85 @@ class CDLIFileFolder(extVersionedFileFol pt=getattr(self,'filelist.html') return pt(basketName=basketName,numberOfObjects=numberOfObjects) + if hash is not None and hasattr(self.cdliRoot,'v_tmpStore') and self.cdliRoot.v_tmpStore.has_key(hash): + + logging.debug("asking for storage2") + result =self.cdliRoot.v_tmpStore[hash] + if result: + logging.debug("give result from storage2") + return hash,self.cdliRoot.v_tmpStore[hash] + if list is not None: # got already a list + + logging.debug(" ----List version") ret=[] + fileTree=Set() + for fileId in list: - if fileId.find("*"): #check for wildcards - fileId=fileId + + if fileId.find("*")>-1: #check for wildcards + self.expandFile(fileId,fileTree) + elif len(fileId.split("."))==1: fileId=fileId+".atf" + fileTree.add(fileId) + #logging.debug(" -----:"+fileId) + #ret+=self.CDLICatalog({'title':fileId}) + #x =self.getFileObject(fileId) + #if x is not None: + # ret.append(x) - ret+=self.CDLICatalog({'title':fileId}) + + + ids = fileTree & self.v_file_ids + #self.REQUEST.SESSION['fileIds']=ids#store fieldIds in session for further usage + l=makelist(fileTree)[0:] + #logging.debug("l-list:"+repr(l)) + self.REQUEST.SESSION['fileIds']=l#store fieldIds in session for further usage + self.REQUEST.SESSION['searchList']=l + #self.REQUEST.SESSION['searchList']=['P000001.atf'] + + + hash = md5.new(repr(makelist(fileTree))).hexdigest() # erzeuge hash als identification + self.REQUEST.SESSION['hash']=hash + #TODO: do I need garbage collection for v_tmpStore ? + + #logging.debug("Hash:"+repr(hash)) +# +# if hasattr(self.cdliRoot,'v_tmpStore') and self.cdliRoot.v_tmpStore.has_key(hash): +# logging.debug("asking for storage") +# res=self.cdliRoot.v_tmpStore[hash] +# if res: +# if returnHash == True: +# return hash,res +# return res + #TODO: get rid of one of these.. - ids=[x.getObject().getId() for x in ret] - self.REQUEST.SESSION['fileIds']=ids#store fieldIds in session for further usage - self.REQUEST.SESSION['searchList']=self.REQUEST.SESSION['fileIds'] + #ids=[x.getObject().getId() for x in ret] + ret=[(self.getFileObject(x),self.getFileObjectLastVersion(x)) for x in ids] + #self.REQUEST.SESSION['fileIds']=ids#store fieldIds in session for further usage + #self.REQUEST.SESSION['searchList']=self.REQUEST.SESSION['fileIds'] + if display: pt=getattr(self,'filelist.html') return pt(search=ids) - else: + else: + #self.REQUEST.SESSION['hash'] = ret # store in session + if not hasattr(self,'v_tmpStore'): + self.cdliRoot.v_tmpStore={} + #logging.debug("HHHHHHNEU:"+repr(self.makelist(ids))) + #logging.debug("HHHHHHNEU:"+repr(hash)) + self.cdliRoot.v_tmpStore[hash] = ret # store in session + if returnHash == True: + return hash,ret return ret if start: RESPONSE.redirect("filelist.html?start:int="+str(start)) - security.declareProtected('Manage','createAllFilesAsSingleFile') def createAllFilesAsSingleFile(self,RESPONSE=None): @@ -1953,7 +2175,7 @@ class CDLIFileFolder(extVersionedFileFol def sortF(x,y): return cmp(x[0],y[0]) - catalog=getattr(self,self.default_catalog) + catalog=getattr(self,self.file_catalog) #tf,tfilename=mkstemp() if not hasattr(self.temp_folder,'downloadCounter'): self.temp_folder.downloadCounter=0 @@ -1980,7 +2202,7 @@ class CDLIFileFolder(extVersionedFileFol #os.write(tf,obj.getLastVersion().data) if RESPONSE: - RESPONSE.write(obj.getLastVersion().getData()[0:]) + RESPONSE.write(obj.getData()[0:]) RESPONSE.write("\n") self.temp_folder.downloadCounter-=1 self._p_changed=1 @@ -2000,7 +2222,7 @@ class CDLIFileFolder(extVersionedFileFol def hasParent(self): """returns true falls subfolder""" - if self.aq_parent.meta_type in self.folderMetaType: + if self.aq_parent.meta_type in self.folder_meta_type: return True else: return False @@ -2008,11 +2230,11 @@ class CDLIFileFolder(extVersionedFileFol def getFolders(self): """get all subfolders""" ret=[] - folders=self.ZopeFind(self,obj_metatypes=self.folderMetaType) + folders=self.ZopeFind(self,obj_metatypes=self.folder_meta_type) for folder in folders: ret.append((folder[1], - len(self.ZopeFind(folder[1],obj_metatypes=self.folderMetaType)), - len(self.ZopeFind(folder[1],obj_metatypes=self.filesMetaType)) + len(self.ZopeFind(folder[1],obj_metatypes=self.folder_meta_type)), + len(self.ZopeFind(folder[1],obj_metatypes=self.file_meta_type)) )) return ret @@ -2064,243 +2286,298 @@ class CDLIRoot(Folder): """main folder for cdli""" meta_type="CDLIRoot" - downloadCounterBaskets=0# counts the current basket downloads if counter > 10 no downloads are possible + downloadCounterBaskets=0 # counts the current basket downloads if counter > 10 no downloads are possible - def deleteFiles(self,ids): - """delete files (resp. move into .trash folder)""" - # find or generete trash folder - - found=self.ZopeFind(self,obj_ids=['.trash']) - - if len(found)<1: - manage_addCDLIFileFolder(self, '.trash',title="Trash") - trash=self._getOb('.trash') - else: - logging.info(found) - trash=found[0][1] + file_catalog = 'CDLICatalog' + + # word splitter for search + splitter = {'words':cdliSplitter.wordSplitter(), + 'graphemes':cdliSplitter.graphemeSplitter()} + + + def viewATF(self,id,RESPONSE): + """view an Object""" + ob = self.CDLICatalog({'title':id}) + logging.debug(ob[0].getObject().getLastVersion().absolute_url()+"/view") + if len(ob)>0: + RESPONSE.redirect(ob[0].getObject().getLastVersion().absolute_url()+"/view") + return "not found" + + def history(self,id,RESPONSE): + """view an Object""" + ob = self.CDLICatalog({'title':id}) + if len(ob)>0: + RESPONSE.redirect(ob[0].absolute_url+"/history") + return "not found" + + + def downloadLocked(self,id,RESPONSE): + """view an Object""" + ob = self.CDLICatalog({'title':id}) + if len(ob)>0: + RESPONSE.redirect(ob[0].absolute_url+"/downloadLocked") + return "not found" + + def download(self,id,RESPONSE): + """view an Object""" + ob = self.CDLICatalog({'title':id}) + if len(ob)>0: + RESPONSE.redirect(ob[0].getLastVersion().absolute_url()) + return "not found" + def addCDLIFileObjectForm(self,id,RESPONSE): + """view an Object""" + ob = self.CDLICatalog({'title':id}) + if len(ob)>0: + RESPONSE.redirect(ob[0].absolute_url+"/addCDLIFileObjectForm") + return "not found" + + def addVersionedFileObjectForm(self,id,RESPONSE): + """view an Object""" + ob = self.CDLICatalog({'title':id}) + if len(ob)>0: + RESPONSE.redirect(ob[0].absolute_url+"/addVersionedFileObjectForm") + return "not found" + + def unlock(self,id,RESPONSE): + """view an Object""" + ob = self.CDLICatalog({'title':id}) + if len(ob)>0: + RESPONSE.redirect(ob[0].absolute_url+"/unlock") + return "not found" + + def getFileObject(self,fileId): + """get an object""" + x=self.v_files.get(fileId) + #logging.debug(x) + return x + + def getFileObjectLastVersion(self,fileId): + """get an object""" + x=self.v_files_lastVersion.get(fileId) + l#ogging.debug("lastVersion: "+repr(x)) + return x + + def showFileIds(self): + """showIds""" + return self.v_file_ids + + def generateFileBTree(self): + """erzeuge einen Btree aus allen Files""" + self.v_files = OOBTree() + self.v_files_lastVersion = OOBTree() + self.v_file_ids = Set() + + for x in self.CDLICatalog.searchResults(): + + self.v_files.update({x.getId:x.getObject()}) + self.v_files_lastVersion.update({x.getId:x.getObject().getLastVersion()}) + self.v_file_ids.add(x.getId) + logging.debug("add:"+x.getId+"XXX"+repr(x.getObject())) + return True + + + def updateOrAddToFileBTree(self,obj): + """update a BTree""" + self.v_files.update({obj.getId():obj}) + self.v_files_lastVersion.update({obj.getId():obj.getLastVersion()}) + + self.v_file_ids.add(obj.getId()) + logging.debug("update:"+obj.getId()+"XXX"+repr(obj)) + + def deleteFromBTree(self,objId): + """delete an obj""" + self.v_files.pop(objId) + self.v_files_lastVersion.pop(objId) + self.v_file_ids.remove(objId) + + + def deleteFiles(self,ids): + """delete files""" for id in ids: founds=self.CDLICatalog.search({'title':id.split(".")[0]}) if founds: - logging.info(founds) + logging.debug("deleting %s"%founds) folder=founds[0].getObject().aq_parent #get the parent folder of the object - logging.info(folder) - cut=folder.manage_cutObjects([founds[0].getId]) #cut it out - trash.manage_pasteObjects(cut) #paste it in the trash - - - def findWordRegExp(self,indexName,searchTerm): - """find all words in index which match regexp in SearchTerm - @param indexName: name of the index to be searched in - @param searchTerm: word to be searched""" - - ret=[] - for x in self.lineIndexes[indexName].iterkeys(): - if re.match(searchTerm,x): - ret.append(x) - return ret - - def searchRegExpInLineIndexDocs(self,indexName,searchTerm): - """search in inLineIndex with regexp - @param indexName: name of the index to be searched in - @param searchTerm: term to be searched - """ - if not searchTerm: - return [] - ret=[] - words=self.findWordRegExp(indexName,searchTerm) # suche nach allen Treffern - logging.info("wd:%s"%words) - for word in words: + logging.debug("deleting from %s"%folder) + cut=folder.delete([founds[0].getId]) #cut it out - ret+=self.searchInLineIndexDocs(indexName,word) - - x= unique(ret) - logging.info("words_done") - return x - def showInLineIndex(self): - """get the index for debug purposes""" - print "show" - for key in self.lineIndexes.keys(): - logging.info("index:%s"%key) - for x in self.lineIndexes[key].iterkeys(): - logging.info("word:%s"%repr(x)) - #for y in self.lineIndex[x].iterkeys(): - # print "doc",repr(y),repr(self.lineIndex[x][y]) - - return self.lineIndexes - - def searchInLineIndexDocs(self,indexName,word,uniq=True,regExp=False): - """search occurences in an index - @param indexName: name of the index to be searched in - @param word: word to be searched - @param unique: (optional) unify the list of results - @param regExp: (optional) use regular expressions - """ - - if regExp: - return self.searchRegExpInLineIndexDocs(indexName,word) - - try: - - lst=list(self.lineIndexes[indexName].get(word).keys()) - except: - logging.error("error: searchInLineIndexDocs (%s %s)"%(sys.exc_info()[0:2])) - lst=[] - if uniq: - return unique(lst) - else: - return lst - - def getLinesFromIndex(self,indexName,word,doc,regExp=False): - """return all lines from a document where word is found - @param indexName: Name of the index - @param word: word to be searched - @param doc: name of the document (usuallay the p-number) - @param regExp: (optional) use regExp - """ - - if not regExp: - return self.lineIndexes[indexName].get(word)[doc] - else: # wenn regexp, suche welches word - for w in self.findWordRegExp(indexName,word): - if self.lineIndexes[indexName].get(w): # ein word in im dex gefunden - try: - dc=self.lineIndex[indexName].get(word)[doc] - return dc # und ein document dann gib es zurueck - except: - pass #andernfalls weiter - - def cleanInLineIndex(self,indexName): - """empty an InlineIndex - @param indexName: name of the index - """ - for x in list(self.lineIndexes[indexName].keys()): - del(self.lineIndexes[indexName][x]) - print [x for x in self.lineIndexes[indexName].keys()] - - return "ok" - - def storeInLineIndex(self,indexName,key,value): - """store in index, key is normally a word or grapheme - and value is a tuple (documentname, line) where the word can be found - @param indexName: name of the index - @param key: key in index - @param value: value in index, value is a tuple (document name, line) - """ - logging.error("indexing: %s %s"%(indexName,key)) - if (not hasattr(self,'lineIndexes')): - - self.lineIndexes={} - - if self.lineIndexes.get(indexName,None) is None: - #index exisitiert noch nicht dann anlegen - - self.lineIndexes[indexName]=OOBTree() - lis=self.lineIndexes - li=lis[indexName] - - if li.has_key(key): - -# if li[key].has_key(value[0]) and (not (value[1] in li[key][value[0]])): - if li[key].has_key(value[0]): - tmp=li[key][value[0]] - tmp.append(value[1]) # add it if now in the array - li[key][value[0]]=tmp[0:] - else: - li[key][value[0]]=[value[1]] # new array for lines - - else: - - li[key]=OOBTree()# new btree for lines - li[key][value[0]]=[value[1]] - - - self.lineIndexes=lis - - transaction.get().commit() + def searchText(self, query, index='graphemes'): + """searches query in the fulltext index and returns a list of file ids/P-numbers""" + # see also: http://www.plope.com/Books/2_7Edition/SearchingZCatalog.stx#2-13 + logging.debug("searchtext for '%s' in index %s"%(query,index)) + #import Products.ZCTextIndex.QueryParser + #qp = QueryParser.QueryParser() + #logging.debug() + idxQuery = {index:{'query':query}} + idx = getattr(self, self.file_catalog) + # do search + resultset = idx.search(query_request=idxQuery,sort_index='textid') + # put only the P-Number in the result + results = [res.getId[:7] for res in resultset] + logging.debug("searchtext: found %d texts"%len(results)) + return results + + + def getFile(self, pnum): + """get the translit file with the given pnum""" + f = getattr(self, self.file_catalog).search({'textid':pnum}) + if not f: + return "" + return f[0].getObject().getData() + def showFile(self,fileId,wholePage=False): """show a file @param fileId: P-Number of the document to be displayed """ - f=self.CDLICatalog({'title':fileId}) + f=getattr(self, self.file_catalog).search({'textid':fileId}) if not f: return "" if wholePage: - logging.info("whole") - return f[0].getObject().getLastVersion().view() + logging.debug("show whole page") + return f[0].getObject().getContentObject().view() else: return f[0].getObject().getLastVersionFormattedData() - def showWordInFile(self,fileId,word,lineList=None,regExp=True,indexName=""): - """get lines with word fromFileId""" + def showWordInFile(self,fileId,word,indexName='graphemes',regExp=False,): + """get lines with word from FileId""" + logging.debug("showwordinfile word='%s' index=%s file=%s"%(word,indexName,fileId)) - file=self.showFile(fileId) - logging.info("regEXP %s"%regExp) + file = formatAtfFullLineNum(self.getFile(fileId)) ret=[] - if regExp: # wenn regexp dann generiere alle worte aus der list die der regexp entsprechen - wordlist=self.findWordRegExp(indexName,word) - else: - wordlist=[word] - for line in file.split("\n"): - found=False + # add whitespace before and whitespace and line-end to splitter bounds expressions + bounds = self.splitter[indexName].bounds + splitexp = "(%s|\s)(%%s)(%s|\s|\Z)"%(bounds,bounds) + # clean word expression + # TODO: this should use QueryParser itself + # take out double quotes + word = word.replace('"','') + # take out ignorable signs + ignorable = self.splitter[indexName].ignorex + word = ignorable.sub('', word) + # compile into regexp objects and escape parens + wordlist = [re.compile(splitexp%re.escape(w)) for w in word.split(' ')] + + for line in file.splitlines(): for word in wordlist: - try: # just a hack because of possible unicode errors in line - if line.find(word)>-1: - if lineList: #liste of moeglichen Zeilennummern - num=line.split(".")[0] #Zeilenummer ist alles vor dem . in der Zeile - - if num in lineList: - - ret.append(line) - else: # nimm alles ohne line check - ret.append(line) - - break; - except: - pass + #logging.debug("showwordinfile: searching for %s in %s"%(word.pattern,ignoreable.sub('',line))) + if word.search(ignorable.sub('',line)): + line = formatAtfLineHtml(line) + ret.append(line) + break + return ret - def tagWordInFile(self,fileId,word,lineList=None,regExp=True,indexName=""): - """get lines with word fromFileId""" + + def showWordInFiles(self,fileIds,word,indexName='graphemes',regExp=False): + """ + get lines with word from all ids in list FileIds. + returns dict with id:lines pairs. + """ + logging.debug("showwordinfiles word='%s' index=%s file=%s"%(word,indexName,fileIds)) + + return dict([(id,self.showWordInFile(id, word, indexName, regExp)) for id in fileIds]) + + + def tagWordInFile(self,fileId,word,indexName='graphemes',regExp=False): + """get text with word highlighted from FileId""" + logging.debug("tagwordinfile word='%s' index=%s file=%s"%(word,indexName,fileId)) - file=self.showFile(fileId) - tagStr=u'%s' + file=self.getFile(fileId) + tagStart=u'' + tagEnd=u'' + tagStr=tagStart + u'%%s' + tagEnd ret=[] - if regExp: # wenn regexp dann generiere alle worte aus der list die der regexp entsprechen - wordlist=self.findWordRegExp(indexName,word) - else: - wordlist=[word] + # add whitespace to splitter bounds expressions and compile into regexp object + bounds = self.splitter[indexName].bounds + wordsplit = re.compile("(%s|\s)"%bounds) + # clean word expression + # TODO: this should use QueryParser itself + word = word.replace('"','') # take out double quotes + # take out ignoreable signs + ignorable = self.splitter[indexName].ignorex + word = ignorable.sub('', word) + # split search terms by blanks + words = word.split(' ') + # split search terms again (for grapheme search with words) + splitwords = dict(((w,self.splitter[indexName].process([w])) for w in words)) - for line in file.split("\n"): + for line in file.splitlines(): line = unicodify(line) - found=False - for word in wordlist: - if line.find(word)>-1: #word ist gefunden dann makiere und breche die Schleife ab - if lineList: #liste of moeglichen Zeilennummern - num=line.split(".")[0] #Zeilenummer ist alles vor dem . in der Zeile - - if num in lineList: - - ret.append(line.replace(word,tagStr%word)) - - else: # nimm alles ohne line check - ret.append(line.replace(word,tagStr%word)) - found=True - break - if not found: #word wurde nicht gefunden keine makierung - ret.append(line) + # ignore lemma and other lines + if line.lstrip().startswith('#lem:'): + continue + # ignore p-num line + if line.startswith('&P'): + continue + # ignore version lines + if line.startswith('#version'): + continue + # ignore atf type lines + if line.startswith('#atf:'): + continue + + # first scan + hitwords = [] + for w in words: + if ignorable.sub('',line).find(w) > -1: + # word is in line + # append split word for grapheme search with words + hitwords.extend(splitwords[w]) + #hitwords.extend(wordsplit.split(w)) + + # examine hits closer + if hitwords: + # split line into words + parts = wordsplit.split(line) + line = "" + for p in parts: + #logging.debug("tagwordinfile: searching for %s in %s"%(p,hitwords)) + # reassemble line + if ignorable.sub('', p) in hitwords: + #logging.debug("tagwordinfile: found %s in %s"%(p,hitwords)) + # this part was found + line += tagStart + formatAtfHtml(p) + tagEnd + else: + line += formatAtfHtml(p) + + else: + # no hits + line = formatAtfHtml(line) + + ret.append(line) return u'
\n'.join(ret) + + + def tagWordInFiles(self,fileIds,word,indexName='graphemes',regExp=False): + """ + get texts with highlighted word from all ids in list FileIds. + returns dict with id:text pairs. + """ + logging.debug("tagwordinfiles word='%s' index=%s file=%s"%(word,indexName,fileIds)) + return dict([(id,self.tagWordInFile(id, word, indexName, regExp)) for id in fileIds]) + + + def getFileVersionList(self, pnum): + """get the version history as a list for the translit file with the given pnum""" + f = getattr(self, self.file_catalog).search({'textid':pnum}) + if not f: + return [] + + return f[0].getObject().getVersionList() + + def URLquote(self,str): """quote url""" return urllib.quote(str) @@ -2373,6 +2650,11 @@ class CDLIRoot(Folder): return """ 2;url=%s?repeat=%s """%(self.absolute_url()+txt,threadName) + def refreshTxtBasket(self,txt="",threadName=None): + """txt fuer refresh""" + + return """ 2;url=%s?repeat=%s """%(txt,threadName) + def getResult(self,threadName=None): """result of thread""" @@ -2549,12 +2831,24 @@ class CDLIRoot(Folder): pt=PageTemplateFile(os.path.join(package_home(globals()),'zpt','uploadATFWait.zpt')).__of__(self) return pt(txt='/uploadATFfinally',threadName=threadName) else: + + + idTmp=self.REQUEST.SESSION['idTmp'] + stObj=getattr(self.temp_folder,idTmp) self.REQUEST.SESSION['idTmp']=None + + #update changed + logging.debug("dir:"+repr(stObj.returnValue['changed'])) + for x in stObj.returnValue['changed']: + ob=self.CDLICatalog.search({'title':x[0]}) + + self.cdliRoot.updateOrAddToFileBTree(ob[0].getObject()) if RESPONSE is not None: RESPONSE.redirect(self.absolute_url()) def importFiles(self,comment="",author="" ,folderName="/Users/dwinter/atf", files=None,ext=None): """import files""" + logging.debug("importFiles folderName=%s files=%s ext=%s"%(folderName,files,ext)) root=self.cdli_main count=0 if not files: @@ -2563,44 +2857,49 @@ class CDLIRoot(Folder): for f in files: folder=f[0:3] f2=f[0:5] + + #check if main folder PXX already exists obj=self.ZopeFind(root,obj_ids=[folder]) + logging.debug("importFiles: folder=%s f2=%s obj=%s"%(folder,f2,obj)) if ext: - ext.result="

adding: %s

"%f+ext.result - if not obj: + + + if not obj: # if not create it manage_addCDLIFileFolder(root,folder,folder) fobj=getattr(root,folder) #transaction.get().commit() + else: fobj=obj[0][1] + #check IF PYYYYY already exist obj2=fobj.ZopeFind(fobj,obj_ids=[f2]) + logging.debug("importFiles: fobj=%s obj2=%s"%(fobj,obj2)) - if not obj2: + if not obj2:# if not create it manage_addCDLIFileFolder(fobj,f2,f2) fobj2=getattr(fobj,f2) else: fobj2=obj2[0][1] + # not add the file file2=os.path.join(folderName,f) id=f - manage_addCDLIFile(fobj2,f,'','') - id=f - ob=fobj2._getOb(f) - ob.title=id - - manage_addCDLIFileObject(ob,id,comment,author,file2,content_type='',from_tmp=True) - self.CDLICatalog.catalog_object(ob) - #self.CDLICatalog.manage_catalogFoundItems(obj_ids=[id],search_sub=1) - #self.CDLICatalog.manage_catalogObject(self.REQUEST, self.REQUEST.RESPONSE, 'CDLICatalog', urlparse.urlparse(ob.absolute_url())[1]) + logging.debug("importFiles: addCDLIFile fobj2=%s, f=%s file2=%s"%(fobj2,repr(f),repr(file2))) + fobj2.addFile(vC='',file=file(file2),author=author,newName=f) count+=1 - - if count > 1000: - print "committing" + + #now add the file to the storage + ob = getattr(fobj2,f) + self.cdliRoot.updateOrAddToFileBTree(ob) + + if count%100==0: + logging.debug("importfiles: committing") transaction.get().commit() - count=0 - transaction.get().commit() + + transaction.get().commit() return "ok"