rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
fileListing.append(entry)
finalList.append(entry)
def __readCache__(self,cacheListing=list()): """ Simple mehtod to read in a cache or list of cache files and return a list of files or an empty list if nothing found. It uses the pathing information from the files passed via cacheListing to aid in our filesystem search. """ #Open the cache entry and search for those entrys finalList=list() for entry in cacheListing: fileListing=list() #Cache files listed themselves comment out following line fileListing.append(entry) fileListing.extend([x.rstrip("\n") for x in file(entry)]) #PATCH START to add in the z distribution files for fname in fileListing: if ".html" in fname: zFile=fname.replace(".html",".txt") fileListing.append(zFile) #PATCH END #Pathing info pathingInfo=os.path.dirname(entry) for thisFile in fileListing: #Search filesystem for file full path finalList.extend(fnmatch.filter(self.fsys,"*%s*%s"%(pathingInfo,thisFile))) #Look for potential matching thumbnails if thisFile.endswith(".png"): finalList.extend(fnmatch.filter(self.fsys,"*%s"%thisFile.replace(".png","?thumb?png"))) return finalList
myMask="*%s*%s-findVetos_%s_%s.wiki"%\
myMask="*%s/*%s-findVetos_%s_%s.wiki"%\
def get_findVetos(self): tmpList=list() #H1,H2,L1-findFlags_H1,H2,L1_831695156.714.wiki #instrument,ifos ifoString="" for i in range(0,len(self.coinc.ifos)/2):ifoString=ifoString+"%s,"%self.coinc.ifos[2*i:2*i+2] ifoString=ifoString.rstrip(",") insString="" for i in range(0,len(self.coinc.instruments)/2):insString=insString+"%s,"%self.coinc.instruments[2*i:2*i+2] insString=insString.rstrip(",") myMask="*%s*%s-findVetos_%s_%s.wiki"%\ (self.coinc.type,insString,ifoString,self.coinc.time) tmpList.extend(fnmatch.filter(self.fsys,myMask)) return tmpList
myMask="*%s*%s-findFlags_%s_%s.wiki"%\
myMask="*%s/*%s-findFlags_%s_%s.wiki"%\
def get_findFlags(self): """ """ tmpList=list() #H1,H2,L1-findFlags_H1,H2,L1_831695156.714.wiki #instrument,ifos ifoString="" for i in range(0,len(self.coinc.ifos)/2):ifoString=ifoString+"%s,"%self.coinc.ifos[2*i:2*i+2] ifoString=ifoString.rstrip(",") insString="" for i in range(0,len(self.coinc.instruments)/2):insString=insString+"%s,"%self.coinc.instruments[2*i:2*i+2] insString=insString.rstrip(",") myMask="*%s*%s-findFlags_%s_%s.wiki"%\ (self.coinc.type,insString,ifoString,self.coinc.time) tmpList.extend(fnmatch.filter(self.fsys,myMask)) return tmpList
(self.coint.type,sngl.ifo,sngl.ifo,timeString)
(self.coinc.type,sngl.ifo,sngl.ifo,timeString)
def get_analyzeQscan_RDS(self): """ """ #analyseQscan.py_FG_RDS_full_data/H1-analyseQscan_H1_931176926_116_rds-unspecified-gpstime.cache cacheList=list() cacheFiles=list() for sngl in self.coinc.sngls: timeString=str(float(sngl.time)).replace(".","_") myCacheMask="*%s*/%s-analyseQscan_%s_%s_rds*.cache"%\ (self.coint.type,sngl.ifo,sngl.ifo,timeString) #Ignore the files with seis_rds in them for x in fnmatch.filter(self.fsys,myCacheMask): if not x.__contains__('seis_rds'): cacheList.append(x) #Read the cache file or files cacheFiles=self.__readCache__(cacheList) return cacheFiles
while os.path.exists(wikiFilename) and maxCount < 10:
while os.path.exists(wikiFilename) and maxCount < 15:
def prepareChecklist(wikiFilename=None,wikiCoinc=None,wikiTree=None,file2URL=None): """ Method to prepare a checklist where data products are isolated in directory. """ endOfS5=int(875232014) wikiFileFinder=findFileType(wikiTree,wikiCoinc) # # Check to see if wiki file with name already exists # maxCount=0 while os.path.exists(wikiFilename) and maxCount < 10: sys.stdout.write("File %s already exists.\n"%\ os.path.split(wikiFilename)[1]) wikiFilename=wikiFilename+".wiki" maxCount=maxCount+1 # #Create the wikipage object etc # wikiPage=wiki(wikiFilename) # # Create top two trigger params tables # cTable=wikiPage.wikiTable(2,9) cTable.data=[ ["Trigger Type", "Rank", "FAR", "SNR", "IFOS(Coinc)", "Instruments(Active)", "Coincidence Time (s)", "Total Mass (mSol)", "Chirp Mass (mSol)" ], ["%s"%(wikiCoinc.type), "%s"%(wikiCoinc.rank), "%s"%(wikiCoinc.far), "%s"%(wikiCoinc.snr), "%s"%(wikiCoinc.ifos), "%s"%(wikiCoinc.instruments), "%s"%(wikiCoinc.time), "%s"%(wikiCoinc.mass), "%s"%(wikiCoinc.mchirp) ] ] pTable=wikiPage.wikiTable(len(wikiCoinc.sngls_in_coinc())+1,7) pTable.data[0]=[ "IFO", "GPS Time(s)", "SNR", "CHISQR", "Mass 1", "Mass 2", "Chirp Mass" ] for row,cSngl in enumerate(wikiCoinc.sngls_in_coinc()): pTable.data[row+1]=[ "%s"%(cSngl.ifo), "%s"%(cSngl.time), "%s"%(cSngl.snr), "%s"%(cSngl.chisqr), "%s"%(cSngl.mass1), "%s"%(cSngl.mass2), "%s"%(cSngl.mchirp) ] #Write the tables into the Wiki object wikiPage.putText("Coincident Trigger Event Information: %s\n"\ %(stfu_pipe.gpsTimeToReadableDate(wikiCoinc.time))) wikiPage.insertTable(cTable) wikiPage.putText("Corresponding Coincident Single IFO Trigger Information\n") wikiPage.insertTable(pTable) #Generate a table of contents to appear after candidate params table wikiPage.tableOfContents(3) #Begin including each checklist item as section with subsections wikiPage.section("Follow-up Checklist") #Put each checklist item wikiPage.subsection("Checklist Summary") wikiPage.subsubsection("Does this candidate pass this checklist?") wikiPage.subsubsection("Answer") wikiPage.subsubsection("Relevant Information and Comments") wikiPage.insertHR() # #First real checklist item wikiPage.subsection("#0 False Alarm Probability") wikiPage.subsubsection("Question") wikiPage.putText("What is the false alarm rate associated with this candidate?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") farTable=wikiPage.wikiTable(2,1) farTable.setTableStyle("background-color: yellow; text-align center;") farTable.data[0][0]="False Alarm Rate" farTable.data[1][0]="%s"%(wikiCoinc.far) wikiPage.insertTable(farTable) wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # #Additional Checklist Item #First real checklist item wikiPage.subsection("#1 Data Quality Flags") wikiPage.subsubsection("Question") wikiPage.putText("Can the data quality flags coincident with this candidate be safely disregarded?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") wikiPath=os.path.split(wikiFilename)[0] dqFileList=wikiFileFinder.get_findFlags() if len(dqFileList) != 1: sys.stdout.write("Warning: DQ flags data product import problem.\n") print "Found %i files."%len(dqFileList) for mf in dqFileList: print mf for myFile in dqFileList: wikiPage.putText("%s\n"%(file(myFile).read())) wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # #Additional Checklist Item #First real checklist item wikiPage.subsection("#2 Veto Investigations") wikiPage.subsubsection("Question") wikiPage.putText("Does the candidate survive the veto investigations performed at its time?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") vetoFileList=wikiFileFinder.get_findVetos() if len(vetoFileList) != 1: sys.stdout.write("Warning: Veto flags data product import problem.\n") for myFile in vetoFileList:print myFile for myFile in vetoFileList: wikiPage.putText("%s\n"%(file(myFile).read())) wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # #Additional Checklist Item #First real checklist item wikiPage.subsection("#3 IFO Status") wikiPage.subsubsection("Question") wikiPage.putText("Are the interferometers operating normally with a reasonable level of sensitivity around the time of the candidate?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") #Add link to Daily Stats if wikiCoinc.time <= endOfS5: statsLink=wikiPage.makeExternalLink("http://blue.ligo-wa.caltech.edu/scirun/S5/DailyStatistics/",\ "S5 Daily Stats Page") else: statsLink="This should be a link to S6 Daily Stats!\n" wikiPage.putText(statsLink) #Link figures of merit #Get link for all members of wikiCoinc wikiPage.putText("Figures of Merit\n") if wikiCoinc.time > endOfS5: fomLinks=dict() elems=0 for wikiSngl in wikiCoinc.sngls: if not(wikiSngl.ifo.upper().rstrip().lstrip() == 'V1'): fomLinks[wikiSngl.ifo]=stfu_pipe.getFOMLinks(wikiCoinc.time,wikiSngl.ifo) elems=elems+len(fomLinks[wikiSngl.ifo]) else: for myLabel,myLink,myThumb in stfu_pipe.getFOMLinks(wikiCoinc.time,wikiSngl.ifo): wikiPage.putText("%s\n"%(wikiPage.makeExternalLink(myLink,myLabel))) cols=4 rows=(elems/3)+1 fTable=wikiPage.wikiTable(rows,cols) fTable.data[0]=["IFO,Shift","FOM1","FOM2","FOM3"] currentIndex=0 for myIFOKey in fomLinks.keys(): for label,link,thumb in fomLinks[myIFOKey]: myRow=currentIndex/int(3)+1 myCol=currentIndex%int(3)+1 fTable.data[myRow][0]=label thumbURL=thumb fTable.data[myRow][myCol]="%s"%(wikiPage.linkedRemoteImage(thumb,link)) currentIndex=currentIndex+1 wikiPage.insertTable(fTable) else: wikiPage.putText("Can not automatically fetch S5 FOM links.") wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # #Additional Checklist Item #First real checklist item wikiPage.subsection("#4 Candidate Appearance") wikiPage.subsubsection("Question") wikiPage.putText("Do the Qscan figures show what we would expect for a gravitational-wave event?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") imageDict=dict() indexDict=dict() thumbDict=dict() for sngl in wikiCoinc.sngls: frametype,channelName=stfu_pipe.figure_out_type(sngl.time,sngl.ifo,'hoft') indexDict[sngl.ifo]=fnmatch.filter(wikiFileFinder.get_hoft_frame(),\ "*/%s/*/%s/*index.html"%(frametype,sngl.time)) imageDict[sngl.ifo]=fnmatch.filter(wikiFileFinder.get_hoft_frame(),\ "*%s*_%s_16.00_spectrogram_whitened.png"\ %(sngl.time,channelName)) thumbDict[sngl.ifo]=fnmatch.filter(wikiFileFinder.get_hoft_frame(),\ "*%s*_%s_16.00_spectrogram_whitened?thumb.png"\ %(sngl.time,channelName)) # #Convert disk locals to URLs imageDict[sngl.ifo]=[file2URL.convert(x) for x in imageDict[sngl.ifo]] indexDict[sngl.ifo]=[file2URL.convert(x) for x in indexDict[sngl.ifo]] thumbDict[sngl.ifo]=[file2URL.convert(x) for x in thumbDict[sngl.ifo]] if len(indexDict[sngl.ifo]) < 1: wikiPage.putText("GW data channel scans for %s not available.\n"%sngl.ifo) enoughImage=[len(imageDict[key])>0 for key in imageDict.keys()].count(True) >= 1 enoughIndex=[len(indexDict[key])>0 for key in indexDict.keys()].count(True) >= 1 if enoughImage and enoughIndex: wikiPage.insertQscanTable(imageDict,\ thumbDict,\ indexDict) else: sys.stdout.write("Warning: Candidate appearance plot import problem.\n") wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # # #Additional Checklist Item wikiPage.subsection("#5 Seismic Plots") wikiPage.subsubsection("Question") wikiPage.putText("Is the seismic activity insignificant around the time of the candidate?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") wikiPage.putText("Plots and pipeline data go here!") imageDict=dict() indexDict=dict() thumbDict=dict() zValueDict=dict() imageDictAQ=dict() indexDictAQ=dict() thumbDictAQ=dict() zValueDictAQ=dict() for sngl in wikiCoinc.sngls_in_coinc(): indexDict[sngl.ifo]=fnmatch.filter(wikiFileFinder.get_RDS_R_L1_SEIS(),\ "*/%s_RDS_*/%s/*index.html"%(sngl.ifo,sngl.time)) imageDict[sngl.ifo]=fnmatch.filter(wikiFileFinder.get_RDS_R_L1_SEIS(),\ "*/%s_RDS_*/%s/*SEI*_512.00_spectrogram_whitened.png"%\ (sngl.ifo,sngl.time)) thumbDict[sngl.ifo]=fnmatch.filter(wikiFileFinder.get_RDS_R_L1_SEIS(),\ "*/%s_RDS_*/%s/*SEI*_512.00_spectrogram_whitened?thumb.png"%\ (sngl.ifo,sngl.time)) #Search for corresponding Omega summary.txt file zValueFiles=fnmatch.filter(wikiFileFinder.get_RDS_R_L1_SEIS(),\ "*/%s_RDS_*/%s/*summary.txt"%(sngl.ifo,sngl.time)) zValueDict[sngl.ifo]=list() if (len(zValueFiles) > 0): for zFile in zValueFiles: zValueDict[sngl.ifo].extend(wikiFileFinder.__readSummary__(zFile)) #Reparse only keeping SEI channels tmpList=list() for chan in zValueDict[sngl.ifo]: if "SEI" in chan[0]: tmpList.append(chan) zValueDict[sngl.ifo]=tmpList else: sys.stdout.write("Omega scan summary file not for for %s. ...skipping...\n"%sngl.ifo) #Search for analyzeQscan files #/L1-analyseQscan_L1_932797512_687_seis_rds_L1_SEI-ETMX_X_z_scat-unspecified-gpstime.png timeString=str(float(sngl.time)).replace(".","_") zValueFiles=fnmatch.filter(wikiFileFinder.get_analyzeQscan_SEIS(),\ "*_%s_%s_*.txt"%(sngl.ifo,timeString)) indexDictAQ[sngl.ifo]=fnmatch.filter(wikiFileFinder.get_analyzeQscan_SEIS(),\ "*_%s_%s_*.html"%(sngl.ifo,timeString)) thumbDictAQ[sngl.ifo]=fnmatch.filter(wikiFileFinder.get_analyzeQscan_SEIS(),\ "*%s-*_%s_*_SEI*_z_scat-unspecified-gpstime_thumb.png"\ %(sngl.ifo,timeString)) imageDictAQ[sngl.ifo]=fnmatch.filter(wikiFileFinder.get_analyzeQscan_SEIS(),\ "*%s-*_%s_*_SEI*_z_scat-unspecified-gpstime.png"\ %(sngl.ifo,timeString)) #Process zValue ranking file if found for IFO zValueDictAQ[sngl.ifo]=list() if len(zValueFiles)>0: for zFile in zValueFiles: zValueDictAQ[sngl.ifo].extend(wikiFileFinder.__readZranks__(zFile)) #Reparse keeping SEI channels tmpList=list() for chan in zValueDictAQ[sngl.ifo]: if "SEI" in chan[0]: tmpList.append(chan) zValueDictAQ[sngl.ifo]=tmpList else: sys.stdout.write("Analyze Qscan Z ranking file not found for %s. ...skipping...\n"%sngl.ifo) #Convert disk locals to URLs imageDict[sngl.ifo]=[file2URL.convert(x) for x in imageDict[sngl.ifo]] indexDict[sngl.ifo]=[file2URL.convert(x) for x in indexDict[sngl.ifo]] thumbDict[sngl.ifo]=[file2URL.convert(x) for x in thumbDict[sngl.ifo]] imageDictAQ[sngl.ifo]=[file2URL.convert(x) for x in imageDictAQ[sngl.ifo]] indexDictAQ[sngl.ifo]=[file2URL.convert(x) for x in indexDictAQ[sngl.ifo]] thumbDictAQ[sngl.ifo]=[file2URL.convert(x) for x in thumbDictAQ[sngl.ifo]] if len(indexDict[sngl.ifo]) < 1: wikiPage.putText("Seismic scans for %s not available.\n"%sngl.ifo) enoughImage=[len(imageDict[key])>0 for key in imageDict.keys()].count(True) >=1 enoughIndex=[len(indexDict[key])>0 for key in indexDict.keys()].count(True) >=1 if enoughImage and enoughIndex: wikiPage.insertAnalyzeQscanTable(imageDict, thumbDict, indexDict, zValueDict, imageDictAQ, thumbDictAQ, indexDictAQ, zValueDictAQ) else: sys.stdout.write("Warning: Seismic plots product import problem.\n") wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # # #Additional Checklist Item wikiPage.subsection("#6 Other environmental causes") wikiPage.subsubsection("Question") wikiPage.putText("Were the environmental disturbances (other than seismic) insignificant at the time of the candidate?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") wikiPage.putText("Plots and pipeline data go here!") imageDict=dict() indexDict=dict() thumbDict=dict() zValueDict=dict() imageDictAQ=dict() indexDictAQ=dict() thumbDictAQ=dict() zValueDictAQ=dict() #Select only PEM channels for sngl in wikiCoinc.sngls_in_coinc(): imageDict[sngl.ifo]=list() indexDict[sngl.ifo]=list() thumbDict[sngl.ifo]=list() for myFile in fnmatch.filter(wikiFileFinder.get_RDS_R_L1(),\ "*/%s_RDS_*/%s/*html"%(sngl.ifo,sngl.time)): indexDict[sngl.ifo].append(myFile) for myFile in fnmatch.filter(wikiFileFinder.get_RDS_R_L1(),\ "*/%s_RDS_*/%s/*_16.00_spectrogram_whitened.png"%\ (sngl.ifo,sngl.time)): if "PEM" in myFile.upper() and not "SEI" in myFile.upper(): imageDict[sngl.ifo].append(myFile) for myFile in fnmatch.filter(wikiFileFinder.get_RDS_R_L1(),\ "*/%s_RDS_*/%s/*_16.00_spectrogram_whitened?thumb.png"%\ (sngl.ifo,sngl.time)): if "PEM" in myFile.upper() and not "SEI" in myFile.upper(): thumbDict[sngl.ifo].append(myFile) #Search for corresponding Omega summary.txt file zValueFiles=fnmatch.filter(wikiFileFinder.get_RDS_R_L1(),\ "*/%s_RDS_*/%s/*summary.txt"%(sngl.ifo,sngl.time)) zValueDict[sngl.ifo]=list() if len(zValueFiles)>0: for zFile in zValueFiles: zValueDict[sngl.ifo].extend(wikiFileFinder.__readSummary__(zFile)) #Reparse only keeping PEM and not SEI channels tmpList=list() for chan in zValueDict[sngl.ifo]: if "PEM" in chan[0] and not "SEI" in chan[0]: tmpList.append(chan) zValueDict[sngl.ifo]=tmpList else: sys.stdout.write("Omega scan summary file not for for %s. ...skipping...\n"%sngl.ifo) #Select associated analyzeQscans imageDictAQ[sngl.ifo]=list() indexDictAQ[sngl.ifo]=list() thumbDictAQ[sngl.ifo]=list() timeString=str(float(sngl.time)).replace(".","_") for myFile in fnmatch.filter(wikiFileFinder.get_analyzeQscan_RDS(),\ "*%s-*_%s_*html"%(sngl.ifo,timeString)): indexDictAQ[sngl.ifo].append(myFile) zValueFiles=fnmatch.filter(wikiFileFinder.get_analyzeQscan_RDS(),\ "*%s-*_%s_*txt"%(sngl.ifo,timeString)) zValueDictAQ[sngl.ifo]=list() if len(zValueFiles)>0: for zFile in zValueFiles: zValueDictAQ[sngl.ifo].extend(wikiFileFinder.__readZranks__(zFile)) for chan in zValueDictAQ[sngl.ifo]: if "PEM" in chan[0] and not "SEI" in chan[0]: tmpList.append(chan) zValueDictAQ[sngl.ifo]=tmpList else: sys.stdout.write("Analyze Qscan Z ranking file not found for %s. ...skipping...\n"%sngl.ifo) #H1-analyseQscan_H1_931176926_116_rds_H0_PEM-MY_SEISX_z_scat-unspecified-gpstime_thumb.png #H1-analyseQscan_H1_931176926_116_rds_H0_PEM-MY_SEISX_z_scat-unspecified-gpstime.png for myFile in fnmatch.filter(wikiFileFinder.get_analyzeQscan_RDS(),\ "*%s-*_%s_*_z_scat-unspecified-gpstime.png"%\ (sngl.ifo,timeString)): if "PEM" in myFile.upper() and not "SEI" in myFile.upper(): imageDictAQ[sngl.ifo].append(myFile) for myFile in fnmatch.filter(wikiFileFinder.get_analyzeQscan_RDS(),\ "*%s-*_%s_*_z_scat-unspecified-gpstime?thumb.png"%\ (sngl.ifo,timeString)): if "PEM" in myFile.upper() and not "SEI" in myFile.upper(): thumbDictAQ[sngl.ifo].append(myFile) #Convert disk locals to URLs imageDict[sngl.ifo]=[file2URL.convert(x) for x in imageDict[sngl.ifo]] indexDict[sngl.ifo]=[file2URL.convert(x) for x in indexDict[sngl.ifo]] thumbDict[sngl.ifo]=[file2URL.convert(x) for x in thumbDict[sngl.ifo]] imageDictAQ[sngl.ifo]=[file2URL.convert(x) for x in imageDictAQ[sngl.ifo]] indexDictAQ[sngl.ifo]=[file2URL.convert(x) for x in indexDictAQ[sngl.ifo]] thumbDictAQ[sngl.ifo]=[file2URL.convert(x) for x in thumbDictAQ[sngl.ifo]] if len(imageDict[sngl.ifo]) < 1: wikiPage.putText("PEM scans for %s not available.\n"%sngl.ifo) enoughImage=[len(imageDict[key])>0 for key in imageDict.keys()].count(True) >=1 enoughIndex=[len(indexDict[key])>0 for key in indexDict.keys()].count(True) >=1 if enoughImage and enoughIndex: wikiPage.insertAnalyzeQscanTable(imageDict, thumbDict, indexDict, zValueDict, imageDictAQ, thumbDictAQ, indexDictAQ, zValueDictAQ) else: sys.stdout.write("Warning: PEM plots import trouble.\n") wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # # #Additional Checklist Item wikiPage.subsection("#7 Auxiliary degree of freedom") wikiPage.subsubsection("Question") wikiPage.putText("Were the auxiliary channel transients coincident with the candidate insignificant?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") imageDict=dict() indexDict=dict() thumbDict=dict() zValueDict=dict() imageDictAQ=dict() indexDictAQ=dict() thumbDictAQ=dict() zValueDictAQ=dict() #Select only AUX channels for sngl in wikiCoinc.sngls: imageDict[sngl.ifo]=list() indexDict[sngl.ifo]=list() thumbDict[sngl.ifo]=list() for myFile in fnmatch.filter(wikiFileFinder.get_RDS_R_L1(),\ "*/%s_RDS_*/%s/*html"%(sngl.ifo,sngl.time)): indexDict[sngl.ifo].append(myFile) for myFile in fnmatch.filter(wikiFileFinder.get_RDS_R_L1(),\ "*/%s_RDS_*/%s/*_16.00_spectrogram_whitened.png"%\ (sngl.ifo,sngl.time)): if not "PEM" in myFile.upper() or not "SEI" in myFile.upper(): imageDict[sngl.ifo].append(myFile) for myFile in fnmatch.filter(wikiFileFinder.get_RDS_R_L1(),\ "*/%s_RDS_*/%s/*_16.00_spectrogram_whitened?thumb.png"%\ (sngl.ifo,sngl.time)): if not "PEM" in myFile.upper() or not "SEI" in myFile.upper(): thumbDict[sngl.ifo].append(myFile) zValueFiles=fnmatch.filter(wikiFileFinder.get_RDS_R_L1(),\ "*/%s_RDS_*/%s/*summary.txt"%(sngl.ifo,sngl.time)) zValueDict[sngl.ifo]=list() if len(zValueFiles)>0: for zFile in zValueFiles: zValueDict[sngl.ifo].extend(wikiFileFinder.__readSummary__(zFile)) #Reparse NOT keeping PEM or SEI channels tmpList=list() for chan in zValueDict[sngl.ifo]: if not "PEM" in chan[0] or not "SEI" in chan[0]: tmpList.append(chan) zValueDict[sngl.ifo]=tmpList else: sys.stdout.write("Omega scan summary file not for for %s. ...skipping...\n"%sngl.ifo) #Select associated analyzeQscans imageDictAQ[sngl.ifo]=list() indexDictAQ[sngl.ifo]=list() thumbDictAQ[sngl.ifo]=list() timeString=str(float(sngl.time)).replace(".","_") #H1-analyseQscan_H1_931176926_116_rds-unspecified-gpstime.html for myFile in fnmatch.filter(wikiFileFinder.get_analyzeQscan_RDS(),\ "*%s-*_%s_*html"%(sngl.ifo,timeString)): indexDictAQ[sngl.ifo].append(myFile) zValueFiles=fnmatch.filter(wikiFileFinder.get_analyzeQscan_RDS(),\ "*%s-*_%s_*txt"%(sngl.ifo,timeString)) #Process zValue ranking file if found for IFO zValueDictAQ[sngl.ifo]=list() if len(zValueFiles)>0: for zFile in zValueFiles: zValueDictAQ[sngl.ifo].extend(wikiFileFinder.__readZranks__(zFile)) #Reparse NOT keeping PEM or SEI channels tmpList=list() for chan in zValueDictAQ[sngl.ifo]: if not "PEM" in chan[0] or not "SEI" in chan[0]: tmpList.append(chan) zValueDictAQ[sngl.ifo]=tmpList else: sys.stdout.write("Z ranking file not found for %s. ...skipping...\n"%sngl.ifo) #H1-analyseQscan_H1_931176926_116_rds_H0_PEM-MY_SEISX_z_scat-unspecified-gpstime_thumb.png #H1-analyseQscan_H1_931176926_116_rds_H0_PEM-MY_SEISX_z_scat-unspecified-gpstime.png for myFile in fnmatch.filter(wikiFileFinder.get_analyzeQscan_RDS(),\ "*%s-*_%s_*_z_scat-unspecified-gpstime.png"%\ (sngl.ifo,timeString)): if not "PEM" in myFile.upper() or not "SEI" in myFile.upper(): imageDictAQ[sngl.ifo].append(myFile) for myFile in fnmatch.filter(wikiFileFinder.get_analyzeQscan_RDS(),\ "*%s-*_%s_*_z_scat-unspecified-gpstime?thumb.png"%\ (sngl.ifo,timeString)): if not "PEM" in myFile.upper() or not "SEI" in myFile.upper(): thumbDictAQ[sngl.ifo].append(myFile) #Convert disk locals to URLs imageDict[sngl.ifo]=[file2URL.convert(x) for x in imageDict[sngl.ifo]] indexDict[sngl.ifo]=[file2URL.convert(x) for x in indexDict[sngl.ifo]] thumbDict[sngl.ifo]=[file2URL.convert(x) for x in thumbDict[sngl.ifo]] imageDictAQ[sngl.ifo]=[file2URL.convert(x) for x in imageDictAQ[sngl.ifo]] indexDictAQ[sngl.ifo]=[file2URL.convert(x) for x in indexDictAQ[sngl.ifo]] thumbDictAQ[sngl.ifo]=[file2URL.convert(x) for x in thumbDictAQ[sngl.ifo]] if len(indexDict[sngl.ifo]) < 1: wikiPage.putText("Other scans for %s not available.\n"%sngl.ifo) enoughImage=[len(imageDict[key])>0 for key in imageDict.keys()].count(True) >=1 enoughIndex=[len(indexDict[key])>0 for key in indexDict.keys()].count(True) >=1 if enoughImage and enoughIndex: wikiPage.insertAnalyzeQscanTable(imageDict, thumbDict, indexDict, zValueDict, imageDictAQ, thumbDictAQ, indexDictAQ, zValueDictAQ) else: sys.stdout.write("Warning: AUX plots import trouble.\n") wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # # #Additional Checklist Item wikiPage.subsection("#8 Electronic Log Book") wikiPage.subsubsection("Question") wikiPage.putText("Were the instruments behaving normally according to the comments posted by the sci-mons or the operators in the e-log?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") wikiPage.putText("Plots and pipeline data go here!") wikiLinkLHOlog=wikiPage.makeExternalLink(stfu_pipe.getiLogURL(myCoinc.time,"H1"), "Hanford eLog") wikiLinkLLOlog=wikiPage.makeExternalLink(stfu_pipe.getiLogURL(myCoinc.time,"L1"), "Livingston eLog") wikiPage.putText("%s\n\n%s\n\n"%(wikiLinkLHOlog,wikiLinkLLOlog)) wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # # #Additional Checklist Item wikiPage.subsection("#9 Glitch Report") wikiPage.subsubsection("Question") wikiPage.putText("Were the instruments behaving normally according to the weekly glitch report?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") wikiPage.putText("Plots and pipeline data go here!") if int(wikiCoinc.time) >= endOfS5: wikiLinkGlitch=wikiPage.makeExternalLink( "https://www.lsc-group.phys.uwm.edu/twiki/bin/view/DetChar/GlitchStudies", "Glitch Reports for S6" ) else: wikiLinkGlitch=wikiPage.makeExternalLink( "http://www.lsc-group.phys.uwm.edu/glitch/investigations/s5index.html#shift", "Glitch Reports for S5" ) wikiPage.putText("%s\n"%(wikiLinkGlitch)) wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # # #Additional Checklist Item wikiPage.subsection("#10 Snr versus time") wikiPage.subsubsection("Question") wikiPage.putText("Is this trigger significant in a SNR versus time plot of all triggers in its analysis chunk?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") wikiPage.putText("Plots and pipeline data go here!") wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # # #Additional Checklist Item wikiPage.subsection("#11 Parameters of the candidate") wikiPage.subsubsection("Question") wikiPage.putText("Does the candidate have a high likelihood of being a gravitational-wave according to its parameters?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") wikiPage.putText("Effective Distance Ratio Test\n") effDList=wikiFileFinder.get_effDRatio() if len(effDList) != 1: sys.stdout.write("Warning: Effective Distance Test import problem.\n") for myFile in effDList: wikiPage.putText("%s\n"%(file(myFile).read())) wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # # #Additional Checklist Item wikiPage.subsection("#12 Snr and Chisq") wikiPage.subsubsection("Question") wikiPage.putText("Are the SNR and CHISQ time series consistent with our expectations for a gravitational wave?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") # #Put plots SNR and Chi sqr # indexList=fnmatch.filter(wikiFileFinder.get_plotsnrchisq(),"*.html") thumbList=fnmatch.filter(wikiFileFinder.get_plotsnrchisq(),"*_snr-*thumb.png") thumbList.extend(fnmatch.filter(wikiFileFinder.get_plotsnrchisq(),"*_chisq-*thumb.png")) thumbList.sort() indexList=[file2URL.convert(x) for x in indexList] thumbList=[file2URL.convert(x) for x in thumbList] #Two thumb types possible "_thumb.png" or ".thumb.png" imageList=[x.replace("_thumb.png",".png").replace(".thumb.png",".png") for x in thumbList] ifoCount=len(wikiCoinc.sngls) rowLabel={"SNR":1,"CHISQ":2} rowCount=len(rowLabel) colCount=ifoCount if len(indexList) >= 1: snrTable=wikiPage.wikiTable(rowCount+1,colCount+1) for i,sngl in enumerate(wikiCoinc.sngls): myIndex="" for indexFile in indexList: if indexFile.__contains__("_pipe_%s_FOLLOWUP_"%sngl.ifo): myIndex=indexFile if myIndex=="": snrTable.data[0][i+1]=" %s "%sngl.ifo else: snrTable.data[0][i+1]=wikiPage.makeExternalLink(myIndex,sngl.ifo) for col,sngl in enumerate(wikiCoinc.sngls): for row,label in enumerate(rowLabel.keys()): snrTable.data[row+1][0]=label for k,image in enumerate(imageList): if (image.__contains__("_%s-"%label.lower()) \ and image.__contains__("pipe_%s_FOLLOWUP"%sngl.ifo)): snrTable.data[row+1][col+1]=" %s "%(wikiPage.linkedRemoteImage(thumbList[k],thumbList[k])) wikiPage.insertTable(snrTable) else: sys.stdout.write("Warning: SNR and CHISQ plots not found.\n") wikiPage.putText("SNR and CHISQ plots not found.\n") wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # # #Additional Checklist Item wikiPage.subsection("#13 Template bank veto") wikiPage.subsubsection("Question") wikiPage.putText("Is the bank veto value consistent with our expectations for a gravitational wave?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") wikiPage.putText("Plots and pipeline data go here!") wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # # #Additional Checklist Item wikiPage.subsection("#14 Coherent studies") wikiPage.subsubsection("Question") wikiPage.putText("Are the triggers found in multiple interferometers coherent with each other?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") wikiPage.putText("Plots and pipeline data go here!") indexList=fnmatch.filter(wikiFileFinder.get_plotchiatimeseries(),"*.html") if len(indexList) >= 1: myIndex=file2URL.convert(indexList[0]) wikiPage.putText(wikiPage.makeExternalLink(myIndex,\ "%s Coherence Study Results"%(wikiCoinc.ifos))) thumbList=fnmatch.filter(wikiFileFinder.get_plotchiatimeseries(),\ "PLOT_CHIA_%s_snr-squared*thumb.png"%(wikiCoinc.time)) imageList=[x.replace("_thumb.png",".png").replace(".thumb.png",".png") for x in thumbList] rowCount=len(imageList) colCount=1 cohSnrTimeTable=wikiPage.wikiTable(rowCount+1,colCount) cohSnrTimeTable.data[0][0]="%s Coherent SNR Squared Times Series"%(wikiCoinc.ifos) for i,image in enumerate(imageList): cohSnrTimeTable.data[i+1][0]=wikiPage.linkedRemoteImage(image,thumbList[i]) wikiPage.insertTable(cohSnrTimeTable) else: sys.stdout.write("Warning: Coherent plotting jobs not found.\n") wikiPage.putText("Coherent Studies plots not found.\n") wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # # #Additional Checklist Item wikiPage.subsection("#15 Segmentation Stability") wikiPage.subsubsection("Question") wikiPage.putText("Is the candidate stable against changes in segmentation?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") wikiPage.putText("Plots and pipeline data go here!") wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # # #Additional Checklist Item wikiPage.subsection("#16 Calibration Stability") wikiPage.subsubsection("Question") wikiPage.putText("Is the candidate stable against changes in calibration that are consistent with systematic uncertainties?") wikiPage.subsubsection("Answer") wikiPage.putText("Edit Here") wikiPage.subsubsection("Relevant Information") wikiPage.putText("Plots and pipeline data go here!") wikiPage.subsubsection("Investigator Comments") wikiPage.putText("Edit Here") wikiPage.insertHR() # #
sys.stdout.write("Found: %s\n",publication_directory)
sys.stdout.write("Found: %s\n"%publication_directory)
def __init__(self,type=None,ifo=None,time=None,snr=None,chisqr=None,mass1=None,mass2=None,mchirp=None): """ """ self.type=str(type) self.ifo=str(ifo) self.time=float(time) self.snr=float(snr) self.chisqr=float(chisqr) self.mass1=float(mass1) self.mass2=float(mass2) self.mchirp=float(mchirp)
sys.stdout.write("Found: %s\n",publication_url)
sys.stdout.write("Found: %s\n"%publication_url)
def __init__(self,type=None,ifo=None,time=None,snr=None,chisqr=None,mass1=None,mass2=None,mchirp=None): """ """ self.type=str(type) self.ifo=str(ifo) self.time=float(time) self.snr=float(snr) self.chisqr=float(chisqr) self.mass1=float(mass1) self.mass2=float(mass2) self.mchirp=float(mchirp)
self.offset_vectors = offset_vectors
self.offset_vectors = list(offset_vectors) self.offset_vectors.sort(key = lambda offset_vector: sorted(offset_vector.items()))
def set_offset_vectors(self, offset_vectors): """ Set the list of offset vectors to be considered when deciding the bins in which each file belongs. Must be called before packing any files. The input is a list of dictionaries, each mapping instruments to offsets. """ self.offset_vectors = offset_vectors min_offset = min(min(offset_vector.values()) for offset_vector in offset_vectors) max_offset = max(max(offset_vector.values()) for offset_vector in offset_vectors) # largest gap that can conceivably be closed by the time # slides self.max_gap = max_offset - min_offset assert self.max_gap >= 0
def split_bins(cafepacker, extentlimit): """ Split bins of stored in CafePacker until each bin has an extent no longer than extentlimit. """
def split_bins(cafepacker, extentlimit, verbose = False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """
def split_bins(cafepacker, extentlimit): """ Split bins of stored in CafePacker until each bin has an extent no longer than extentlimit. """ # # loop overall the bins in cafepacker.bins. we pop items out of # cafepacker.bins and append new ones to the end so need a while loop # checking the extent of each bin in cafepacker.bins until all bins are # done being split # idx = 0 while idx < len(cafepacker.bins): if abs(cafepacker.bins[idx].extent) <= extentlimit: # # bin doesn't need splitting so move to next # idx += 1 continue # # split this bin so pop it out of the list # bigbin = cafepacker.bins.pop(idx) # # calculate the central time of the union of all the input # files in the bin # splittime = lsctables.LIGOTimeGPS(bigbin.extent[0] + (bigbin.extent[1] - bigbin.extent[0])/2) # # split the segmentlistdict at this time # splitseglistdict = segments.segmentlistdict() for key in bigbin.size.keys(): splitseglistdict[key] = segments.segmentlist([segments.segment(-segments.infinity(),splittime)]) # # create bins for the first and second halves # bin1 = LALCacheBin() bin1.size = bigbin.size & splitseglistdict bin1.extent = bigbin.extent & splitseglistdict.values()[0][0] bin2 = LALCacheBin() bin2.size = bigbin.size & ~splitseglistdict bin2.extent = bigbin.extent & (~splitseglistdict.values()[0])[0] # # remove unused keys from the smaller bins' segmentlistdicts # newsize = segments.segmentlistdict() for key in bin1.size.keys(): if len(bin1.size[key]): newsize[key] = bin1.size[key] bin1.size = newsize newsize = segments.segmentlistdict() for key in bin2.size.keys(): if len(bin2.size[key]): newsize[key] = bin2.size[key] bin2.size = newsize # # find which of the objects in bigbin.objects intersect the two # smaller bins # for cache in bigbin.objects: thisseglistdict = cache.to_segmentlistdict() coinc1 = 0 coinc2 = 0 for offset_vector in cafepacker.offset_vectors: # # loop over offset vectors updating the smaller # bins and the object we are checking # bin1.size.offsets.update(offset_vector) bin2.size.offsets.update(offset_vector) thisseglistdict.offsets.update(offset_vector) if not coinc1 and bin1.size.is_coincident(thisseglistdict, keys = offset_vector.keys()): # # object is coicident with bin1 # coinc1 = 1 bin1.objects.append(cache) if not coinc2 and bin2.size.is_coincident(thisseglistdict, keys = offset_vector.keys()): # # object is coincident with bin2 # coinc2 = 1 bin2.objects.append(cache) # # end loop if known to be coincident with both # bins # if coinc1 and coinc2: break # # clear offsets applied to object # thisseglistdict.offsets.clear() # # clear offsets applied to bins # bin1.size.offsets.clear() bin2.size.offsets.clear() # # append smaller bins to list of bins # cafepacker.bins.append(bin1) cafepacker.bins.append(bin2) # # do not increment idx as we popped the large bin out of # cafepacker.bins # # # sort the bins in cafepacker # cafepacker.bins.sort() return cafepacker
if abs(cafepacker.bins[idx].extent) <= extentlimit:
origbin = cafepacker.bins[idx] n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1:
def split_bins(cafepacker, extentlimit): """ Split bins of stored in CafePacker until each bin has an extent no longer than extentlimit. """ # # loop overall the bins in cafepacker.bins. we pop items out of # cafepacker.bins and append new ones to the end so need a while loop # checking the extent of each bin in cafepacker.bins until all bins are # done being split # idx = 0 while idx < len(cafepacker.bins): if abs(cafepacker.bins[idx].extent) <= extentlimit: # # bin doesn't need splitting so move to next # idx += 1 continue # # split this bin so pop it out of the list # bigbin = cafepacker.bins.pop(idx) # # calculate the central time of the union of all the input # files in the bin # splittime = lsctables.LIGOTimeGPS(bigbin.extent[0] + (bigbin.extent[1] - bigbin.extent[0])/2) # # split the segmentlistdict at this time # splitseglistdict = segments.segmentlistdict() for key in bigbin.size.keys(): splitseglistdict[key] = segments.segmentlist([segments.segment(-segments.infinity(),splittime)]) # # create bins for the first and second halves # bin1 = LALCacheBin() bin1.size = bigbin.size & splitseglistdict bin1.extent = bigbin.extent & splitseglistdict.values()[0][0] bin2 = LALCacheBin() bin2.size = bigbin.size & ~splitseglistdict bin2.extent = bigbin.extent & (~splitseglistdict.values()[0])[0] # # remove unused keys from the smaller bins' segmentlistdicts # newsize = segments.segmentlistdict() for key in bin1.size.keys(): if len(bin1.size[key]): newsize[key] = bin1.size[key] bin1.size = newsize newsize = segments.segmentlistdict() for key in bin2.size.keys(): if len(bin2.size[key]): newsize[key] = bin2.size[key] bin2.size = newsize # # find which of the objects in bigbin.objects intersect the two # smaller bins # for cache in bigbin.objects: thisseglistdict = cache.to_segmentlistdict() coinc1 = 0 coinc2 = 0 for offset_vector in cafepacker.offset_vectors: # # loop over offset vectors updating the smaller # bins and the object we are checking # bin1.size.offsets.update(offset_vector) bin2.size.offsets.update(offset_vector) thisseglistdict.offsets.update(offset_vector) if not coinc1 and bin1.size.is_coincident(thisseglistdict, keys = offset_vector.keys()): # # object is coicident with bin1 # coinc1 = 1 bin1.objects.append(cache) if not coinc2 and bin2.size.is_coincident(thisseglistdict, keys = offset_vector.keys()): # # object is coincident with bin2 # coinc2 = 1 bin2.objects.append(cache) # # end loop if known to be coincident with both # bins # if coinc1 and coinc2: break # # clear offsets applied to object # thisseglistdict.offsets.clear() # # clear offsets applied to bins # bin1.size.offsets.clear() bin2.size.offsets.clear() # # append smaller bins to list of bins # cafepacker.bins.append(bin1) cafepacker.bins.append(bin2) # # do not increment idx as we popped the large bin out of # cafepacker.bins # # # sort the bins in cafepacker # cafepacker.bins.sort() return cafepacker
bigbin = cafepacker.bins.pop(idx) splittime = lsctables.LIGOTimeGPS(bigbin.extent[0] + (bigbin.extent[1] - bigbin.extent[0])/2) splitseglistdict = segments.segmentlistdict() for key in bigbin.size.keys(): splitseglistdict[key] = segments.segmentlist([segments.segment(-segments.infinity(),splittime)]) bin1 = LALCacheBin() bin1.size = bigbin.size & splitseglistdict bin1.extent = bigbin.extent & splitseglistdict.values()[0][0] bin2 = LALCacheBin() bin2.size = bigbin.size & ~splitseglistdict bin2.extent = bigbin.extent & (~splitseglistdict.values()[0])[0] newsize = segments.segmentlistdict() for key in bin1.size.keys(): if len(bin1.size[key]): newsize[key] = bin1.size[key] bin1.size = newsize newsize = segments.segmentlistdict() for key in bin2.size.keys(): if len(bin2.size[key]): newsize[key] = bin2.size[key] bin2.size = newsize for cache in bigbin.objects: thisseglistdict = cache.to_segmentlistdict() coinc1 = 0 coinc2 = 0 for offset_vector in cafepacker.offset_vectors:
splits = [-segments.infinity()] + [lsctables.LIGOTimeGPS(origbin.extent[0] + i * float(origbin.extent[1] - origbin.extent[0]) / n) for i in range(1, n)] + [+segments.infinity()] if verbose: print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(split) for split in splits[1:-1])) splits = [segments.segmentlist([segments.segment(*bounds)]) for bounds in zip(splits[:-1], splits[1:])] splits = [segments.segmentlistdict.fromkeys(origbin.size, seglist) for seglist in splits] newbins = [] for split in splits: newbins.append(LALCacheBin()) newbins[-1].size = origbin.size & split for key in tuple(newbins[-1].size): if not newbins[-1].size[key]: del newbins[-1].size[key] newbins[-1].extent = newbins[-1].size.extent_all() for bin in newbins: for cache_entry in origbin.objects:
def split_bins(cafepacker, extentlimit): """ Split bins of stored in CafePacker until each bin has an extent no longer than extentlimit. """ # # loop overall the bins in cafepacker.bins. we pop items out of # cafepacker.bins and append new ones to the end so need a while loop # checking the extent of each bin in cafepacker.bins until all bins are # done being split # idx = 0 while idx < len(cafepacker.bins): if abs(cafepacker.bins[idx].extent) <= extentlimit: # # bin doesn't need splitting so move to next # idx += 1 continue # # split this bin so pop it out of the list # bigbin = cafepacker.bins.pop(idx) # # calculate the central time of the union of all the input # files in the bin # splittime = lsctables.LIGOTimeGPS(bigbin.extent[0] + (bigbin.extent[1] - bigbin.extent[0])/2) # # split the segmentlistdict at this time # splitseglistdict = segments.segmentlistdict() for key in bigbin.size.keys(): splitseglistdict[key] = segments.segmentlist([segments.segment(-segments.infinity(),splittime)]) # # create bins for the first and second halves # bin1 = LALCacheBin() bin1.size = bigbin.size & splitseglistdict bin1.extent = bigbin.extent & splitseglistdict.values()[0][0] bin2 = LALCacheBin() bin2.size = bigbin.size & ~splitseglistdict bin2.extent = bigbin.extent & (~splitseglistdict.values()[0])[0] # # remove unused keys from the smaller bins' segmentlistdicts # newsize = segments.segmentlistdict() for key in bin1.size.keys(): if len(bin1.size[key]): newsize[key] = bin1.size[key] bin1.size = newsize newsize = segments.segmentlistdict() for key in bin2.size.keys(): if len(bin2.size[key]): newsize[key] = bin2.size[key] bin2.size = newsize # # find which of the objects in bigbin.objects intersect the two # smaller bins # for cache in bigbin.objects: thisseglistdict = cache.to_segmentlistdict() coinc1 = 0 coinc2 = 0 for offset_vector in cafepacker.offset_vectors: # # loop over offset vectors updating the smaller # bins and the object we are checking # bin1.size.offsets.update(offset_vector) bin2.size.offsets.update(offset_vector) thisseglistdict.offsets.update(offset_vector) if not coinc1 and bin1.size.is_coincident(thisseglistdict, keys = offset_vector.keys()): # # object is coicident with bin1 # coinc1 = 1 bin1.objects.append(cache) if not coinc2 and bin2.size.is_coincident(thisseglistdict, keys = offset_vector.keys()): # # object is coincident with bin2 # coinc2 = 1 bin2.objects.append(cache) # # end loop if known to be coincident with both # bins # if coinc1 and coinc2: break # # clear offsets applied to object # thisseglistdict.offsets.clear() # # clear offsets applied to bins # bin1.size.offsets.clear() bin2.size.offsets.clear() # # append smaller bins to list of bins # cafepacker.bins.append(bin1) cafepacker.bins.append(bin2) # # do not increment idx as we popped the large bin out of # cafepacker.bins # # # sort the bins in cafepacker # cafepacker.bins.sort() return cafepacker
bin1.size.offsets.update(offset_vector) bin2.size.offsets.update(offset_vector) thisseglistdict.offsets.update(offset_vector) if not coinc1 and bin1.size.is_coincident(thisseglistdict, keys = offset_vector.keys()):
if cache_entry.segment.protract(cafepacker.max_gap).disjoint(bin.extent): continue cache_entry_segs = cache_entry.to_segmentlistdict() for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector)
def split_bins(cafepacker, extentlimit): """ Split bins of stored in CafePacker until each bin has an extent no longer than extentlimit. """ # # loop overall the bins in cafepacker.bins. we pop items out of # cafepacker.bins and append new ones to the end so need a while loop # checking the extent of each bin in cafepacker.bins until all bins are # done being split # idx = 0 while idx < len(cafepacker.bins): if abs(cafepacker.bins[idx].extent) <= extentlimit: # # bin doesn't need splitting so move to next # idx += 1 continue # # split this bin so pop it out of the list # bigbin = cafepacker.bins.pop(idx) # # calculate the central time of the union of all the input # files in the bin # splittime = lsctables.LIGOTimeGPS(bigbin.extent[0] + (bigbin.extent[1] - bigbin.extent[0])/2) # # split the segmentlistdict at this time # splitseglistdict = segments.segmentlistdict() for key in bigbin.size.keys(): splitseglistdict[key] = segments.segmentlist([segments.segment(-segments.infinity(),splittime)]) # # create bins for the first and second halves # bin1 = LALCacheBin() bin1.size = bigbin.size & splitseglistdict bin1.extent = bigbin.extent & splitseglistdict.values()[0][0] bin2 = LALCacheBin() bin2.size = bigbin.size & ~splitseglistdict bin2.extent = bigbin.extent & (~splitseglistdict.values()[0])[0] # # remove unused keys from the smaller bins' segmentlistdicts # newsize = segments.segmentlistdict() for key in bin1.size.keys(): if len(bin1.size[key]): newsize[key] = bin1.size[key] bin1.size = newsize newsize = segments.segmentlistdict() for key in bin2.size.keys(): if len(bin2.size[key]): newsize[key] = bin2.size[key] bin2.size = newsize # # find which of the objects in bigbin.objects intersect the two # smaller bins # for cache in bigbin.objects: thisseglistdict = cache.to_segmentlistdict() coinc1 = 0 coinc2 = 0 for offset_vector in cafepacker.offset_vectors: # # loop over offset vectors updating the smaller # bins and the object we are checking # bin1.size.offsets.update(offset_vector) bin2.size.offsets.update(offset_vector) thisseglistdict.offsets.update(offset_vector) if not coinc1 and bin1.size.is_coincident(thisseglistdict, keys = offset_vector.keys()): # # object is coicident with bin1 # coinc1 = 1 bin1.objects.append(cache) if not coinc2 and bin2.size.is_coincident(thisseglistdict, keys = offset_vector.keys()): # # object is coincident with bin2 # coinc2 = 1 bin2.objects.append(cache) # # end loop if known to be coincident with both # bins # if coinc1 and coinc2: break # # clear offsets applied to object # thisseglistdict.offsets.clear() # # clear offsets applied to bins # bin1.size.offsets.clear() bin2.size.offsets.clear() # # append smaller bins to list of bins # cafepacker.bins.append(bin1) cafepacker.bins.append(bin2) # # do not increment idx as we popped the large bin out of # cafepacker.bins # # # sort the bins in cafepacker # cafepacker.bins.sort() return cafepacker
coinc1 = 1 bin1.objects.append(cache) if not coinc2 and bin2.size.is_coincident(thisseglistdict, keys = offset_vector.keys()): coinc2 = 1 bin2.objects.append(cache) if coinc1 and coinc2: break thisseglistdict.offsets.clear() bin1.size.offsets.clear() bin2.size.offsets.clear() cafepacker.bins.append(bin1) cafepacker.bins.append(bin2) cafepacker.bins.sort() return cafepacker
if cache_entry_segs.intersects_segment(bin.extent): bin.objects.append(cache_entry) break cafepacker.bins[idx:idx+1] = newbins idx += len(newbins)
def split_bins(cafepacker, extentlimit): """ Split bins of stored in CafePacker until each bin has an extent no longer than extentlimit. """ # # loop overall the bins in cafepacker.bins. we pop items out of # cafepacker.bins and append new ones to the end so need a while loop # checking the extent of each bin in cafepacker.bins until all bins are # done being split # idx = 0 while idx < len(cafepacker.bins): if abs(cafepacker.bins[idx].extent) <= extentlimit: # # bin doesn't need splitting so move to next # idx += 1 continue # # split this bin so pop it out of the list # bigbin = cafepacker.bins.pop(idx) # # calculate the central time of the union of all the input # files in the bin # splittime = lsctables.LIGOTimeGPS(bigbin.extent[0] + (bigbin.extent[1] - bigbin.extent[0])/2) # # split the segmentlistdict at this time # splitseglistdict = segments.segmentlistdict() for key in bigbin.size.keys(): splitseglistdict[key] = segments.segmentlist([segments.segment(-segments.infinity(),splittime)]) # # create bins for the first and second halves # bin1 = LALCacheBin() bin1.size = bigbin.size & splitseglistdict bin1.extent = bigbin.extent & splitseglistdict.values()[0][0] bin2 = LALCacheBin() bin2.size = bigbin.size & ~splitseglistdict bin2.extent = bigbin.extent & (~splitseglistdict.values()[0])[0] # # remove unused keys from the smaller bins' segmentlistdicts # newsize = segments.segmentlistdict() for key in bin1.size.keys(): if len(bin1.size[key]): newsize[key] = bin1.size[key] bin1.size = newsize newsize = segments.segmentlistdict() for key in bin2.size.keys(): if len(bin2.size[key]): newsize[key] = bin2.size[key] bin2.size = newsize # # find which of the objects in bigbin.objects intersect the two # smaller bins # for cache in bigbin.objects: thisseglistdict = cache.to_segmentlistdict() coinc1 = 0 coinc2 = 0 for offset_vector in cafepacker.offset_vectors: # # loop over offset vectors updating the smaller # bins and the object we are checking # bin1.size.offsets.update(offset_vector) bin2.size.offsets.update(offset_vector) thisseglistdict.offsets.update(offset_vector) if not coinc1 and bin1.size.is_coincident(thisseglistdict, keys = offset_vector.keys()): # # object is coicident with bin1 # coinc1 = 1 bin1.objects.append(cache) if not coinc2 and bin2.size.is_coincident(thisseglistdict, keys = offset_vector.keys()): # # object is coincident with bin2 # coinc2 = 1 bin2.objects.append(cache) # # end loop if known to be coincident with both # bins # if coinc1 and coinc2: break # # clear offsets applied to object # thisseglistdict.offsets.clear() # # clear offsets applied to bins # bin1.size.offsets.clear() bin2.size.offsets.clear() # # append smaller bins to list of bins # cafepacker.bins.append(bin1) cafepacker.bins.append(bin2) # # do not increment idx as we popped the large bin out of # cafepacker.bins # # # sort the bins in cafepacker # cafepacker.bins.sort() return cafepacker
pattern = "%%s%%0%dd.cache" % int(log10(len(bins)) + 1)
pattern = "%%s%%0%dd.cache" % int(math.log10(len(bins)) + 1)
def write_caches(base, bins, instruments, verbose = False): filenames = [] if len(bins): pattern = "%%s%%0%dd.cache" % int(log10(len(bins)) + 1) for n, bin in enumerate(bins): filename = pattern % (base, n) filenames.append(filename) if verbose: print >>sys.stderr, "writing %s ..." % filename f = file(filename, "w") for cacheentry in bin.objects: if instruments & set(cacheentry.to_segmentlistdict().keys()): print >>f, str(cacheentry) return filenames
raise ValueError r'param-name cannot have "\n","\t", "DROP", or "DELETE" in it'
raise ValueError, r'param-name cannot have "\n","\t", "DROP", or "DELETE" in it'
def __init__( self, table_name, table_param, param_ranges_opt, verbose = False ): """ Parse --param-ranges option. Creates self.param which is the table_name and the table_param appended together (with a '.') and self.param_ranges, which is a list of tuples that give the lower parameter value, whether it is an open or closed boundary, and the same for the upper parameter. For example, if table_name is coinc_inspiral, table_param is mchirp and param_ranges_opt is '[2,8);[8,17]' will get: self.param = 'coinc_inspiral.mchirp' self.param_ranges = [ ( ('>=',2.0), ('<',8.0) ), ( ('>=',8.0), ('<=', 17.0) ) ]
os.path.join("bin", "OddsPostProc.py"),
def run(self): # remove the automatically generated user env scripts for script in ["pylal-user-env.sh", "pylal-user-env.csh"]: log.info("removing " + script ) try: os.unlink(os.path.join("etc", script)) except: pass
def detector_thresholds(min_threshold, ifos, RA, dec, gps_time, sensitivities=None):
def detector_thresholds(ifos, RA, dec, gps_time, sensitivities=None, min_threshold=4.5, max_threshold=7.5):
def detector_thresholds(min_threshold, ifos, RA, dec, gps_time, sensitivities=None): """ Return a dictionary of sensitivity thresholds for each detector, based on a minimum threshold of min_threshold in the least sensitive one, for a source at position (RA,dec) specified in radians at time gps_time. Specifying a dictionary of sensitivities allows one to weight also by the relative SNR of a reference system in each detector to handle different noise curves. """ # Recurse if multiple RA, dec and GPS times are specified if type(gps_time)!=float or type(RA)!=float or type(dec)!=float: assert len(gps_time)==len(RA),len(gps_time)==len(dec) return map(lambda (a,b,c): detector_thresholds(min_threshold,ifos,a,b,c,sensitivities), zip(RA,dec,gps_time)) from pylal import antenna # Sensitivies specifies relative SNRs of a reference signal (BNS) if sensitivities is None: sensitivities={} for det in ifos: sensitivies[det]=1.0 else: assert len(ifos)==len(sensitivites) # Normalise sensitivities minsens=min(sensitivities.values()) for det in ifos: sensitivities[det]/=minsens resps={} threshs={} # Make a dictionary of average responses for det in ifos: resps[det]=antenna.response(gps_time,RA,dec,0,0,'radians',det)[2] worst_resp=min(resps.values()) # Assuming that lowest threshold is in worst detector, return thresholds for det in ifos: threshs[det]=min_threshold*(resps[det]/worst_resp)*sensitivities[det] return threshs
return map(lambda (a,b,c): detector_thresholds(min_threshold,ifos,a,b,c,sensitivities), zip(RA,dec,gps_time))
return map(lambda (a,b,c): detector_thresholds(ifos,a,b,c,sensitivities,min_threshold=min_threshold,max_threshold=max_threshold), zip(RA,dec,gps_time))
def detector_thresholds(min_threshold, ifos, RA, dec, gps_time, sensitivities=None): """ Return a dictionary of sensitivity thresholds for each detector, based on a minimum threshold of min_threshold in the least sensitive one, for a source at position (RA,dec) specified in radians at time gps_time. Specifying a dictionary of sensitivities allows one to weight also by the relative SNR of a reference system in each detector to handle different noise curves. """ # Recurse if multiple RA, dec and GPS times are specified if type(gps_time)!=float or type(RA)!=float or type(dec)!=float: assert len(gps_time)==len(RA),len(gps_time)==len(dec) return map(lambda (a,b,c): detector_thresholds(min_threshold,ifos,a,b,c,sensitivities), zip(RA,dec,gps_time)) from pylal import antenna # Sensitivies specifies relative SNRs of a reference signal (BNS) if sensitivities is None: sensitivities={} for det in ifos: sensitivies[det]=1.0 else: assert len(ifos)==len(sensitivites) # Normalise sensitivities minsens=min(sensitivities.values()) for det in ifos: sensitivities[det]/=minsens resps={} threshs={} # Make a dictionary of average responses for det in ifos: resps[det]=antenna.response(gps_time,RA,dec,0,0,'radians',det)[2] worst_resp=min(resps.values()) # Assuming that lowest threshold is in worst detector, return thresholds for det in ifos: threshs[det]=min_threshold*(resps[det]/worst_resp)*sensitivities[det] return threshs
sensitivies[det]=1.0
sensitivities[det]=1.0
def detector_thresholds(min_threshold, ifos, RA, dec, gps_time, sensitivities=None): """ Return a dictionary of sensitivity thresholds for each detector, based on a minimum threshold of min_threshold in the least sensitive one, for a source at position (RA,dec) specified in radians at time gps_time. Specifying a dictionary of sensitivities allows one to weight also by the relative SNR of a reference system in each detector to handle different noise curves. """ # Recurse if multiple RA, dec and GPS times are specified if type(gps_time)!=float or type(RA)!=float or type(dec)!=float: assert len(gps_time)==len(RA),len(gps_time)==len(dec) return map(lambda (a,b,c): detector_thresholds(min_threshold,ifos,a,b,c,sensitivities), zip(RA,dec,gps_time)) from pylal import antenna # Sensitivies specifies relative SNRs of a reference signal (BNS) if sensitivities is None: sensitivities={} for det in ifos: sensitivies[det]=1.0 else: assert len(ifos)==len(sensitivites) # Normalise sensitivities minsens=min(sensitivities.values()) for det in ifos: sensitivities[det]/=minsens resps={} threshs={} # Make a dictionary of average responses for det in ifos: resps[det]=antenna.response(gps_time,RA,dec,0,0,'radians',det)[2] worst_resp=min(resps.values()) # Assuming that lowest threshold is in worst detector, return thresholds for det in ifos: threshs[det]=min_threshold*(resps[det]/worst_resp)*sensitivities[det] return threshs
assert len(ifos)==len(sensitivites)
assert len(ifos)==len(sensitivities)
def detector_thresholds(min_threshold, ifos, RA, dec, gps_time, sensitivities=None): """ Return a dictionary of sensitivity thresholds for each detector, based on a minimum threshold of min_threshold in the least sensitive one, for a source at position (RA,dec) specified in radians at time gps_time. Specifying a dictionary of sensitivities allows one to weight also by the relative SNR of a reference system in each detector to handle different noise curves. """ # Recurse if multiple RA, dec and GPS times are specified if type(gps_time)!=float or type(RA)!=float or type(dec)!=float: assert len(gps_time)==len(RA),len(gps_time)==len(dec) return map(lambda (a,b,c): detector_thresholds(min_threshold,ifos,a,b,c,sensitivities), zip(RA,dec,gps_time)) from pylal import antenna # Sensitivies specifies relative SNRs of a reference signal (BNS) if sensitivities is None: sensitivities={} for det in ifos: sensitivies[det]=1.0 else: assert len(ifos)==len(sensitivites) # Normalise sensitivities minsens=min(sensitivities.values()) for det in ifos: sensitivities[det]/=minsens resps={} threshs={} # Make a dictionary of average responses for det in ifos: resps[det]=antenna.response(gps_time,RA,dec,0,0,'radians',det)[2] worst_resp=min(resps.values()) # Assuming that lowest threshold is in worst detector, return thresholds for det in ifos: threshs[det]=min_threshold*(resps[det]/worst_resp)*sensitivities[det] return threshs
resps[det]=antenna.response(gps_time,RA,dec,0,0,'radians',det)[2]
resps[det]=antenna.response(gps_time,RA,dec,0,0,'radians',det)[2]*sensitivities[det]
def detector_thresholds(min_threshold, ifos, RA, dec, gps_time, sensitivities=None): """ Return a dictionary of sensitivity thresholds for each detector, based on a minimum threshold of min_threshold in the least sensitive one, for a source at position (RA,dec) specified in radians at time gps_time. Specifying a dictionary of sensitivities allows one to weight also by the relative SNR of a reference system in each detector to handle different noise curves. """ # Recurse if multiple RA, dec and GPS times are specified if type(gps_time)!=float or type(RA)!=float or type(dec)!=float: assert len(gps_time)==len(RA),len(gps_time)==len(dec) return map(lambda (a,b,c): detector_thresholds(min_threshold,ifos,a,b,c,sensitivities), zip(RA,dec,gps_time)) from pylal import antenna # Sensitivies specifies relative SNRs of a reference signal (BNS) if sensitivities is None: sensitivities={} for det in ifos: sensitivies[det]=1.0 else: assert len(ifos)==len(sensitivites) # Normalise sensitivities minsens=min(sensitivities.values()) for det in ifos: sensitivities[det]/=minsens resps={} threshs={} # Make a dictionary of average responses for det in ifos: resps[det]=antenna.response(gps_time,RA,dec,0,0,'radians',det)[2] worst_resp=min(resps.values()) # Assuming that lowest threshold is in worst detector, return thresholds for det in ifos: threshs[det]=min_threshold*(resps[det]/worst_resp)*sensitivities[det] return threshs
threshs[det]=min_threshold*(resps[det]/worst_resp)*sensitivities[det]
threshs[det]=min_threshold*(resps[det]/worst_resp) if threshs[det]>max_threshold: threshs[det]=max_threshold
def detector_thresholds(min_threshold, ifos, RA, dec, gps_time, sensitivities=None): """ Return a dictionary of sensitivity thresholds for each detector, based on a minimum threshold of min_threshold in the least sensitive one, for a source at position (RA,dec) specified in radians at time gps_time. Specifying a dictionary of sensitivities allows one to weight also by the relative SNR of a reference system in each detector to handle different noise curves. """ # Recurse if multiple RA, dec and GPS times are specified if type(gps_time)!=float or type(RA)!=float or type(dec)!=float: assert len(gps_time)==len(RA),len(gps_time)==len(dec) return map(lambda (a,b,c): detector_thresholds(min_threshold,ifos,a,b,c,sensitivities), zip(RA,dec,gps_time)) from pylal import antenna # Sensitivies specifies relative SNRs of a reference signal (BNS) if sensitivities is None: sensitivities={} for det in ifos: sensitivies[det]=1.0 else: assert len(ifos)==len(sensitivites) # Normalise sensitivities minsens=min(sensitivities.values()) for det in ifos: sensitivities[det]/=minsens resps={} threshs={} # Make a dictionary of average responses for det in ifos: resps[det]=antenna.response(gps_time,RA,dec,0,0,'radians',det)[2] worst_resp=min(resps.values()) # Assuming that lowest threshold is in worst detector, return thresholds for det in ifos: threshs[det]=min_threshold*(resps[det]/worst_resp)*sensitivities[det] return threshs
if triple_coinc is True: for ifo_3 in ifos: if ifos.index(ifo_3)>ifos.index(ifo_2): triples.append(ifo_1+ifo_2+ifo_3)
for ifo_3 in ifos: if ifos.index(ifo_3)>ifos.index(ifo_2): triples.append(ifo_1+ifo_2+ifo_3)
def coinc_segments(start,end,ifos): #== first, construct doubles and triples lists doubles=[] triples=[] for ifo_1 in ifos: for ifo_2 in ifos: if ifos.index(ifo_2)>ifos.index(ifo_1): doubles.append(ifo_1+ifo_2) if triple_coinc is True: for ifo_3 in ifos: if ifos.index(ifo_3)>ifos.index(ifo_2): triples.append(ifo_1+ifo_2+ifo_3) segments={} double_segments={} triple_segments={} #== grab science data for each ifo science_flag = {'H1':'H1:DMT-SCIENCE',\ 'H2':'H2:DMT-SCIENCE',\ 'L1':'L1:DMT-SCIENCE',\ 'V1':'V1:ITF_SCIENCEMODE'} for ifo in ifos: segments[ifo] = grab_segments(gps_start,\ gps_end,\ science_flag[ifo]) #== grab double-coincidence segments for double in doubles: ifo_1 = double[0:2] ifo_2 = double[2:4] double_segments[double] = segments[ifo_1] & segments[ifo_2] #== grab triple-coincidence segments for triple in triples: ifo_1=triple[0:2] ifo_2=triple[2:4] ifo_3=triple[4:6] #== grab concident segments triple_segments[triple] = segments[ifo_1] & segments[ifo_2] \ & segments[ifo_3] return segments,double_segments,triple_segments
segments[ifo] = grab_segments(gps_start,\ gps_end,\
segments[ifo] = grab_segments(start,\ end,\
def coinc_segments(start,end,ifos): #== first, construct doubles and triples lists doubles=[] triples=[] for ifo_1 in ifos: for ifo_2 in ifos: if ifos.index(ifo_2)>ifos.index(ifo_1): doubles.append(ifo_1+ifo_2) if triple_coinc is True: for ifo_3 in ifos: if ifos.index(ifo_3)>ifos.index(ifo_2): triples.append(ifo_1+ifo_2+ifo_3) segments={} double_segments={} triple_segments={} #== grab science data for each ifo science_flag = {'H1':'H1:DMT-SCIENCE',\ 'H2':'H2:DMT-SCIENCE',\ 'L1':'L1:DMT-SCIENCE',\ 'V1':'V1:ITF_SCIENCEMODE'} for ifo in ifos: segments[ifo] = grab_segments(gps_start,\ gps_end,\ science_flag[ifo]) #== grab double-coincidence segments for double in doubles: ifo_1 = double[0:2] ifo_2 = double[2:4] double_segments[double] = segments[ifo_1] & segments[ifo_2] #== grab triple-coincidence segments for triple in triples: ifo_1=triple[0:2] ifo_2=triple[2:4] ifo_3=triple[4:6] #== grab concident segments triple_segments[triple] = segments[ifo_1] & segments[ifo_2] \ & segments[ifo_3] return segments,double_segments,triple_segments
maxdx=max(xrange(0,N),key=lambda i:dot(sampcart,skycarts[i]))
maxdot=0 for i in range(0,N): thisdot=dot(sampcart,skycarts[i]) if thisdot>maxdot: maxdot=thisdot maxdx=i
def skyhist_cart(skycarts,samples): """ Histogram the list of samples into bins defined by Cartesian vectors in skycarts """ dot=numpy.dot N=len(skycarts) print 'operating on %d sky points'%(N) bins=zeros(N) for sample in samples: sampcart=pol2cart(sample[RAdim],sample[decdim]) maxdx=max(xrange(0,N),key=lambda i:dot(sampcart,skycarts[i])) bins[maxdx]+=1 return (skycarts,bins)
segtest = Popen('FrCheck -i '+frame,shell=True,stdout=PIPE) if os.waitpid(segtest.pid,0)[1]==11: continue
segtest = subprocess.Popen([frcheck,"-i",frame],stdout=subprocess.PIPE) if os.waitpid(segtest.pid,0)[1]==11: print >>sys.stderr, "Warning. Segmentation fault detected with command:" print >>sys.stderr, "FrCheck -i "+frame continue
def grab_data(start,end,channel,type,\ nds=False,verbose=False,dmt=False): """ This function will return the frame data for the given channel of the given type in the given [start,end] time range and will construct a gps time vector to go with it. The nds option is not yet supported, and the dmt option will return data for dmt channels in frames not found by ligo_data_find. >>>grab_data(960000000,960000001,'H1:IFO-SV_STATE_VECTOR','H1_RDS_R_L3') ([960000000.0,960000001.0,960000002.0,960000003.0,960000004.0,960000005.0], [15.0, 14.125, 13.0, 13.0, 13.0, 13.0]) """ time = [] data = [] #== generate framecache if verbose: print >>sys.stdout, "Generating framecache..." sys.stdout.flush() if not dmt: cache = generate_cache(start,end,channel[0:1],type,return_files=True) else: cache = dmt_cache(start,end,channel[0:1],type) #== loop over frames in cache for frame in cache: #== check frame file exists if not os.path.isfile(frame): continue #== check for Segmentation fault segtest = Popen('FrCheck -i '+frame,shell=True,stdout=PIPE) if os.waitpid(segtest.pid,0)[1]==11: continue segtest.stdout.close() #== try to extract data from frame try: frame_data,data_start,_,dt,_,_ = Fr.frgetvect1d(frame,channel) if frame_data==[]: print >>sys.stderr, "No data for "+channel+" in "+frame continue #== construct time array frame_length = float(dt)*len(frame_data) frame_time = data_start+dt*numpy.arange(len(frame_data)) #== discard frame data outside of time span for i in range(len(frame_data)): if frame_time[i] < start: continue if frame_time[i] > end: continue time.append(frame_time[i]) data.append(frame_data[i]) except: print >>sys.stderr, "Failed to access frame:\n"+frame continue return time,data
ldf_exe,sourced = GetCommandOutput('which ligo_data_find') if ldf_exe.find('ligo_data_find')==-1: print "Cannot find ligo_data_find. Please ensure, lscsoft is sourced." sys.exit()
def generate_cache(start_time,end_time,ifos,types,return_files=False): """ This function will return a cache of files as found by ligo_data_find, given start and end time, and lists of ifos and types. If the return_files option is given as 'True' the function will return a list of frames with absolute paths, otherwise it will return a frame cache (as used in wpipline, for example). Example: >>>generate_cache(961977615,962582415,R,H) ['H R 961977600 962000000 32 /archive/frames/S6/L0/LHO/H-R-9619' 'H R 962000000 962064032 32 /archive/frames/S6/L0/LHO/H-R-9620'] >>>generate_cache(961977615,962582415,R,H,return_files=True) [/archive/frames/S6/L0/LHO/H-R-9619/H-R-961977600-32.gwf, /archive/frames/S6/L0/LHO/H-R-9619/H-R-961977632-32.gwf, ... /archive/frames/S6/L0/LHO/H-R-9620/H-R-962064000-32.gwf] """ cache = [] #== if given strings, make single-element lists if isinstance(ifos,str): ifos=[ifos] if isinstance(types,str): types=[types] #== loop over each ifo for ifo in ifos: #== loop over each frame type for type in types: ldf_exe,sourced = GetCommandOutput('which ligo_data_find') if ldf_exe.find('ligo_data_find')==-1: print "Cannot find ligo_data_find. Please ensure, lscsoft is sourced." sys.exit() try: data_find_cmd = ldf_exe.replace('\n','')+\ ''' --gps-start-time '''+str(start_time)+\ ''' --gps-end-time '''+str(end_time)+\ ''' --observatory '''+ifo[0:1]+\ ''' --type '''+type+\ ''' --url-type file '''+\ ''' --frame-cache | sort''' #== run ligo_data_find and append each frame to the cache cache_out = Popen(data_find_cmd,shell=True,stdout=PIPE) for line in cache_out.stdout.readlines(): #== if line is not recognised in standard frame cache format, skip if len(line.split(' '))!=6: continue cache.append(line.replace('\n','')) cache_out.stdout.close() except: continue #== if no files: if cache==[]: print >>sys.stderr, "Warning: no frames found." #== if asked for the files, expand the cache if return_files: cache = expand_cache(cache) return cache
data_find_cmd = ldf_exe.replace('\n','')+\
data_find_cmd = ldf+\
def generate_cache(start_time,end_time,ifos,types,return_files=False): """ This function will return a cache of files as found by ligo_data_find, given start and end time, and lists of ifos and types. If the return_files option is given as 'True' the function will return a list of frames with absolute paths, otherwise it will return a frame cache (as used in wpipline, for example). Example: >>>generate_cache(961977615,962582415,R,H) ['H R 961977600 962000000 32 /archive/frames/S6/L0/LHO/H-R-9619' 'H R 962000000 962064032 32 /archive/frames/S6/L0/LHO/H-R-9620'] >>>generate_cache(961977615,962582415,R,H,return_files=True) [/archive/frames/S6/L0/LHO/H-R-9619/H-R-961977600-32.gwf, /archive/frames/S6/L0/LHO/H-R-9619/H-R-961977632-32.gwf, ... /archive/frames/S6/L0/LHO/H-R-9620/H-R-962064000-32.gwf] """ cache = [] #== if given strings, make single-element lists if isinstance(ifos,str): ifos=[ifos] if isinstance(types,str): types=[types] #== loop over each ifo for ifo in ifos: #== loop over each frame type for type in types: ldf_exe,sourced = GetCommandOutput('which ligo_data_find') if ldf_exe.find('ligo_data_find')==-1: print "Cannot find ligo_data_find. Please ensure, lscsoft is sourced." sys.exit() try: data_find_cmd = ldf_exe.replace('\n','')+\ ''' --gps-start-time '''+str(start_time)+\ ''' --gps-end-time '''+str(end_time)+\ ''' --observatory '''+ifo[0:1]+\ ''' --type '''+type+\ ''' --url-type file '''+\ ''' --frame-cache | sort''' #== run ligo_data_find and append each frame to the cache cache_out = Popen(data_find_cmd,shell=True,stdout=PIPE) for line in cache_out.stdout.readlines(): #== if line is not recognised in standard frame cache format, skip if len(line.split(' '))!=6: continue cache.append(line.replace('\n','')) cache_out.stdout.close() except: continue #== if no files: if cache==[]: print >>sys.stderr, "Warning: no frames found." #== if asked for the files, expand the cache if return_files: cache = expand_cache(cache) return cache
cache_out = Popen(data_find_cmd,shell=True,stdout=PIPE)
cache_out = subprocess.Popen(data_find_cmd,shell=True,\ stdout=subprocess.PIPE)
def generate_cache(start_time,end_time,ifos,types,return_files=False): """ This function will return a cache of files as found by ligo_data_find, given start and end time, and lists of ifos and types. If the return_files option is given as 'True' the function will return a list of frames with absolute paths, otherwise it will return a frame cache (as used in wpipline, for example). Example: >>>generate_cache(961977615,962582415,R,H) ['H R 961977600 962000000 32 /archive/frames/S6/L0/LHO/H-R-9619' 'H R 962000000 962064032 32 /archive/frames/S6/L0/LHO/H-R-9620'] >>>generate_cache(961977615,962582415,R,H,return_files=True) [/archive/frames/S6/L0/LHO/H-R-9619/H-R-961977600-32.gwf, /archive/frames/S6/L0/LHO/H-R-9619/H-R-961977632-32.gwf, ... /archive/frames/S6/L0/LHO/H-R-9620/H-R-962064000-32.gwf] """ cache = [] #== if given strings, make single-element lists if isinstance(ifos,str): ifos=[ifos] if isinstance(types,str): types=[types] #== loop over each ifo for ifo in ifos: #== loop over each frame type for type in types: ldf_exe,sourced = GetCommandOutput('which ligo_data_find') if ldf_exe.find('ligo_data_find')==-1: print "Cannot find ligo_data_find. Please ensure, lscsoft is sourced." sys.exit() try: data_find_cmd = ldf_exe.replace('\n','')+\ ''' --gps-start-time '''+str(start_time)+\ ''' --gps-end-time '''+str(end_time)+\ ''' --observatory '''+ifo[0:1]+\ ''' --type '''+type+\ ''' --url-type file '''+\ ''' --frame-cache | sort''' #== run ligo_data_find and append each frame to the cache cache_out = Popen(data_find_cmd,shell=True,stdout=PIPE) for line in cache_out.stdout.readlines(): #== if line is not recognised in standard frame cache format, skip if len(line.split(' '))!=6: continue cache.append(line.replace('\n','')) cache_out.stdout.close() except: continue #== if no files: if cache==[]: print >>sys.stderr, "Warning: no frames found." #== if asked for the files, expand the cache if return_files: cache = expand_cache(cache) return cache
ldf_exe='ligo_data_find' ldf,ldf_status = GetCommandOutput('which '+ldf_exe) if ldf_status != 0: print >>sys.stderr, \ "Error: ligo_data_find not found. Please ensure lscsoftrc is sourced" sys.exit()
p = subprocess.Popen(["which", "ligo_data_find"], stdout=subprocess.PIPE) ldf = p.communicate()[0].replace('\n','') if p.returncode != 0: raise ValueError, "ligo_data_find" p.stdout.close() ldf = os.path.realpath(ldf)
def find_types(types,search='standard'): """ This function will return a valid list of LIGO frame types given the list of type strings. The search option defines the breadth of the search, to speed up the search, the following search options are supported: 'standard','short','full'. The 'R', 'T', and 'M' (raw, raw second trends, and raw minute trends) are treated as special cases, so as not to return all types containing those letters. Example: >>>find_types('H1_RDS') ['H1_RDS_C01_LX', 'H1_RDS_C02_LX', 'H1_RDS_C03_L1', 'H1_RDS_C03_L2', 'H1_RDS_C03_L2_ET', 'H1_RDS_C03_L2_ET2', 'H1_RDS_C03_L2_ET30', 'H1_RDS_C04_LX', 'H1_RDS_R_L1', 'H1_RDS_R_L3', 'H1_RDS_R_L4'] >>>find_types(['H1_RDS','R'],search='short') ['H1_RDS_R_L1', 'H1_RDS_R_L3', 'H1_RDS_R_L4', 'R'] """ #== check for ldf ldf_exe='ligo_data_find' ldf,ldf_status = GetCommandOutput('which '+ldf_exe) if ldf_status != 0: print >>sys.stderr, \ "Error: ligo_data_find not found. Please ensure lscsoftrc is sourced" sys.exit() #== make sure types is a list if types is None: types = [] if isinstance(types,str): types = [types] #== set up search command find_cmd = ldf_exe+" -y | egrep " #== treat 'R','M' and 'T' as special cases, #== so not to grep for all types containing 'R' special_types = ['M','R','T'] special_cases=[] #== set list of ignored strings in `ligo_data_find -y` #== there are thousands of GRBXXXXXX frame types, so ignore them if search!='full': vgrep_list = ['GRB'] if search=='short': #== all of these strings are part of frame types that can be ignored for a #== short search short_ignore_list = ['CAL','BRST','Mon','SG','IMR','DuoTone','Concat',\ 'BH','WNB','Lock','_M','_S5','Multi','Noise','_C0'] vgrep_list.extend(short_ignore_list) #== add each of those ignored strings to a vgrep command find_cmd+="-v '" for vstring in vgrep_list: find_cmd+=vstring+'|' #== take off last '|' find_cmd = find_cmd[0:-1] + "'" #== if given types if types: find_cmd+=' | egrep ' for type in types: #== if type is one of the special cases, save. if type in special_types: special_cases.append(type) #== otherwise add it to the grep command else: if not find_cmd.endswith('|'): find_cmd+= "'" find_cmd+=type+"|" #== take of the extra character if find_cmd[-1]=="|": find_cmd = find_cmd[0:-1] + "'" found_types = [] #== if not searching only for special types, run the grep command if find_cmd != ldf_exe+" -y | egrep '": found_types_out = Popen(find_cmd,shell=True,stdout=PIPE) for line in found_types_out.stdout.readlines(): if line=='\n': continue found_type = line.replace('\n','') found_types.append(found_type) found_types_out.stdout.close() #== append all special cases to the list for type in special_cases: found_types.append(type) if found_types == ['']: print >>sys.stderr, "No data types found, exiting." return found_types
find_cmd = ldf_exe+" -y | egrep "
find_cmd = ldf+" -y | egrep "
def find_types(types,search='standard'): """ This function will return a valid list of LIGO frame types given the list of type strings. The search option defines the breadth of the search, to speed up the search, the following search options are supported: 'standard','short','full'. The 'R', 'T', and 'M' (raw, raw second trends, and raw minute trends) are treated as special cases, so as not to return all types containing those letters. Example: >>>find_types('H1_RDS') ['H1_RDS_C01_LX', 'H1_RDS_C02_LX', 'H1_RDS_C03_L1', 'H1_RDS_C03_L2', 'H1_RDS_C03_L2_ET', 'H1_RDS_C03_L2_ET2', 'H1_RDS_C03_L2_ET30', 'H1_RDS_C04_LX', 'H1_RDS_R_L1', 'H1_RDS_R_L3', 'H1_RDS_R_L4'] >>>find_types(['H1_RDS','R'],search='short') ['H1_RDS_R_L1', 'H1_RDS_R_L3', 'H1_RDS_R_L4', 'R'] """ #== check for ldf ldf_exe='ligo_data_find' ldf,ldf_status = GetCommandOutput('which '+ldf_exe) if ldf_status != 0: print >>sys.stderr, \ "Error: ligo_data_find not found. Please ensure lscsoftrc is sourced" sys.exit() #== make sure types is a list if types is None: types = [] if isinstance(types,str): types = [types] #== set up search command find_cmd = ldf_exe+" -y | egrep " #== treat 'R','M' and 'T' as special cases, #== so not to grep for all types containing 'R' special_types = ['M','R','T'] special_cases=[] #== set list of ignored strings in `ligo_data_find -y` #== there are thousands of GRBXXXXXX frame types, so ignore them if search!='full': vgrep_list = ['GRB'] if search=='short': #== all of these strings are part of frame types that can be ignored for a #== short search short_ignore_list = ['CAL','BRST','Mon','SG','IMR','DuoTone','Concat',\ 'BH','WNB','Lock','_M','_S5','Multi','Noise','_C0'] vgrep_list.extend(short_ignore_list) #== add each of those ignored strings to a vgrep command find_cmd+="-v '" for vstring in vgrep_list: find_cmd+=vstring+'|' #== take off last '|' find_cmd = find_cmd[0:-1] + "'" #== if given types if types: find_cmd+=' | egrep ' for type in types: #== if type is one of the special cases, save. if type in special_types: special_cases.append(type) #== otherwise add it to the grep command else: if not find_cmd.endswith('|'): find_cmd+= "'" find_cmd+=type+"|" #== take of the extra character if find_cmd[-1]=="|": find_cmd = find_cmd[0:-1] + "'" found_types = [] #== if not searching only for special types, run the grep command if find_cmd != ldf_exe+" -y | egrep '": found_types_out = Popen(find_cmd,shell=True,stdout=PIPE) for line in found_types_out.stdout.readlines(): if line=='\n': continue found_type = line.replace('\n','') found_types.append(found_type) found_types_out.stdout.close() #== append all special cases to the list for type in special_cases: found_types.append(type) if found_types == ['']: print >>sys.stderr, "No data types found, exiting." return found_types
if find_cmd != ldf_exe+" -y | egrep '": found_types_out = Popen(find_cmd,shell=True,stdout=PIPE)
if find_cmd != ldf+" -y | egrep '": found_types_out = subprocess.Popen(find_cmd,shell=True,\ stdout=subprocess.PIPE)
def find_types(types,search='standard'): """ This function will return a valid list of LIGO frame types given the list of type strings. The search option defines the breadth of the search, to speed up the search, the following search options are supported: 'standard','short','full'. The 'R', 'T', and 'M' (raw, raw second trends, and raw minute trends) are treated as special cases, so as not to return all types containing those letters. Example: >>>find_types('H1_RDS') ['H1_RDS_C01_LX', 'H1_RDS_C02_LX', 'H1_RDS_C03_L1', 'H1_RDS_C03_L2', 'H1_RDS_C03_L2_ET', 'H1_RDS_C03_L2_ET2', 'H1_RDS_C03_L2_ET30', 'H1_RDS_C04_LX', 'H1_RDS_R_L1', 'H1_RDS_R_L3', 'H1_RDS_R_L4'] >>>find_types(['H1_RDS','R'],search='short') ['H1_RDS_R_L1', 'H1_RDS_R_L3', 'H1_RDS_R_L4', 'R'] """ #== check for ldf ldf_exe='ligo_data_find' ldf,ldf_status = GetCommandOutput('which '+ldf_exe) if ldf_status != 0: print >>sys.stderr, \ "Error: ligo_data_find not found. Please ensure lscsoftrc is sourced" sys.exit() #== make sure types is a list if types is None: types = [] if isinstance(types,str): types = [types] #== set up search command find_cmd = ldf_exe+" -y | egrep " #== treat 'R','M' and 'T' as special cases, #== so not to grep for all types containing 'R' special_types = ['M','R','T'] special_cases=[] #== set list of ignored strings in `ligo_data_find -y` #== there are thousands of GRBXXXXXX frame types, so ignore them if search!='full': vgrep_list = ['GRB'] if search=='short': #== all of these strings are part of frame types that can be ignored for a #== short search short_ignore_list = ['CAL','BRST','Mon','SG','IMR','DuoTone','Concat',\ 'BH','WNB','Lock','_M','_S5','Multi','Noise','_C0'] vgrep_list.extend(short_ignore_list) #== add each of those ignored strings to a vgrep command find_cmd+="-v '" for vstring in vgrep_list: find_cmd+=vstring+'|' #== take off last '|' find_cmd = find_cmd[0:-1] + "'" #== if given types if types: find_cmd+=' | egrep ' for type in types: #== if type is one of the special cases, save. if type in special_types: special_cases.append(type) #== otherwise add it to the grep command else: if not find_cmd.endswith('|'): find_cmd+= "'" find_cmd+=type+"|" #== take of the extra character if find_cmd[-1]=="|": find_cmd = find_cmd[0:-1] + "'" found_types = [] #== if not searching only for special types, run the grep command if find_cmd != ldf_exe+" -y | egrep '": found_types_out = Popen(find_cmd,shell=True,stdout=PIPE) for line in found_types_out.stdout.readlines(): if line=='\n': continue found_type = line.replace('\n','') found_types.append(found_type) found_types_out.stdout.close() #== append all special cases to the list for type in special_cases: found_types.append(type) if found_types == ['']: print >>sys.stderr, "No data types found, exiting." return found_types
ldf_exe='ligo_data_find' ldf_status = GetCommandOutput('which '+ldf_exe)[1] if ldf_status != 0: print >>sys.stderr, "Error: ligo_data_find not found. "+\ "Please ensure lscsoftrc is sourced" sys.exit()
p = subprocess.Popen(["which", "ligo_data_find"], stdout=subprocess.PIPE) ldf = p.communicate()[0].replace('\n','') if p.returncode != 0: raise ValueError, "ligo_data_find" p.stdout.close() ldf = os.path.realpath(ldf)
def find_channels(channels=None,\ types=None,\ ifos=None,\ ex_channels=None,\ ignore=[],\ match=False,\ time=None,\ unique=False,\ verbose=False): """ This function will use FrChannels to return all LIGO data channels matching the given list of 'channels' strings, whilst exluding the 'ex_channels' strings. Using find_ifos() and find_types() in the same module (if required), the search is performed over the given ifos for each given type. Use match=True to restrict the search to find channels that exactly match the given 'channels' list (i.e. not a partial match). Use time=True to search for channels in frame types defined at the given epoch. Use unique=True to return a unique list of channels, parsed using the parse_unique_channels() function, otherwise can return multiple instance of the same name string in different types. Returns a list of dqFrameUtils.Channel instances. Examples: >>>channels = find_channels(channels='DARM',types='H1_RDS_R_L1') >>>for channel in channels: print channel.name,channel.type,channel.sampling H1:LSC-DARM_CTRL H1_RDS_R_L1 16384.0 H1:LSC-DARM_ERR H1_RDS_R_L1 16384.0 H1:LSC-DARM_CTRL_EXC_DAQ H1_RDS_R_L1 16384.0 H1:LSC-DARM_GAIN H1_RDS_R_L1 16.0 >>>channels = find_channels(channels='DARM_ERR',types=['H1_RDS_R_L1','H1_RDS_R_L3']) >>>for channel in channels: print channel.name,channel.type,channel.sampling H1:LSC-DARM_ERR H1_RDS_R_L1 16384.0 H1:LSC-DARM_ERR H1_RDS_R_L3 16384.0 >>>channels = find_channels(channels='DARM_ERR',types=['H1_RDS_R_L1','H1_RDS_R_L3'],unique=True) >>>for channel in channels: print channel.name,channel.type,channel.sampling H1:LSC-DARM_ERR H1_RDS_R_L1 16384.0 """ #== check for ldf ldf_exe='ligo_data_find' ldf_status = GetCommandOutput('which '+ldf_exe)[1] if ldf_status != 0: print >>sys.stderr, "Error: ligo_data_find not found. "+\ "Please ensure lscsoftrc is sourced" sys.exit() #== cannot work with no ifos if ifos is None: ifos = find_ifos(channels,types,ifos) if types is None: types = find_types(types) #== check list status if isinstance(channels,str): channels = [channels] if isinstance(types,str): types = [types] if isinstance(ifos,str): ifos = [ifos] found_channels=[] #== loop over each ifo for ifo in ifos: #== set ligo_data_find frame search time if time is None: time = \ str(GetCommandOutput('tconvert now -2 days')[0]).replace('\n','') if verbose: print_statement = \ "Searching "+str(len(types))+" frame types for: " if channels is not None: for channel in channels: print_statement += channel+', ' print_statement += " in ifo "+ifo else: print_statement+= "all channels, in ifo "+ifo print print_statement for type in types: count=0 #== skip empty frame types or those set for ignorance if type in ignore: continue if type == '': continue if verbose: print >>sys.stdout, " Searching "+str(type)+"...", sys.stdout.flush() #== find first frame file for type frame_cmd = ldf_exe+''' --observatory '''+ifo[0:1]+\ ''' --type='''+type+\ ''' --gps-start-time '''+str(time)+\ ''' --gps-end-time '''+str(time)+\ ''' --url-type file''' frame_out = Popen(frame_cmd,shell=True,stdout=PIPE,stderr=PIPE) frame_status = 0 frame='' for line in frame_out.stdout.readlines(): if line[0:7]=='file://': frame = line break frame_out.stdout.close() frame = frame.replace('\n','') #== if frame is found: if frame_status == 0 and frame != "": info = frame.split(' ') frame = info[-1].replace('file://localhost','') #== get channels contained in frame, grepping for input channel string channel_find_cmd = "FrChannels "+frame+" | grep "+ifo #== add grep options for each included channel if channels is not None: channel_find_cmd += " | egrep '" for channel in channels: channel_find_cmd += channel+"|" channel_find_cmd = channel_find_cmd[0:-1]+"'" #== add grep options for each excluded channel if ex_channels is not None: channel_find_cmd += " | egrep -v '" for ex_channel in ex_channels: channel_find_cmd += ex_channel+"|" channel_find_cmd = channel_find_cmd[0:-1]+"'" #== grab channels try: channel_list_out = Popen(channel_find_cmd,shell=True,stdout=PIPE) for line in channel_list_out.stdout.readlines(): data = line.replace('\n','') name,sampling = data.split(' ') #== if asked for exact match, check: if match: if name not in channels: continue #== generate structure and append to list found_channel = Channel(name,type=type,sampling=sampling) found_channels.append(found_channel) count+=1 sys.stdout.flush() channel_list_out.stdout.close() except: print " Failed to find channels for type "+type+", using the"+\ " following frame\n"+frame continue #== print channel count for data type if verbose: print >>sys.stdout, count,"channels found" if verbose: print >>sys.stdout if unique: found_channels = parse_unique_channels(found_channels) return found_channels
frame_cmd = ldf_exe+''' --observatory '''+ifo[0:1]+\
frame_cmd = ldf+''' --observatory '''+ifo[0:1]+\
def find_channels(channels=None,\ types=None,\ ifos=None,\ ex_channels=None,\ ignore=[],\ match=False,\ time=None,\ unique=False,\ verbose=False): """ This function will use FrChannels to return all LIGO data channels matching the given list of 'channels' strings, whilst exluding the 'ex_channels' strings. Using find_ifos() and find_types() in the same module (if required), the search is performed over the given ifos for each given type. Use match=True to restrict the search to find channels that exactly match the given 'channels' list (i.e. not a partial match). Use time=True to search for channels in frame types defined at the given epoch. Use unique=True to return a unique list of channels, parsed using the parse_unique_channels() function, otherwise can return multiple instance of the same name string in different types. Returns a list of dqFrameUtils.Channel instances. Examples: >>>channels = find_channels(channels='DARM',types='H1_RDS_R_L1') >>>for channel in channels: print channel.name,channel.type,channel.sampling H1:LSC-DARM_CTRL H1_RDS_R_L1 16384.0 H1:LSC-DARM_ERR H1_RDS_R_L1 16384.0 H1:LSC-DARM_CTRL_EXC_DAQ H1_RDS_R_L1 16384.0 H1:LSC-DARM_GAIN H1_RDS_R_L1 16.0 >>>channels = find_channels(channels='DARM_ERR',types=['H1_RDS_R_L1','H1_RDS_R_L3']) >>>for channel in channels: print channel.name,channel.type,channel.sampling H1:LSC-DARM_ERR H1_RDS_R_L1 16384.0 H1:LSC-DARM_ERR H1_RDS_R_L3 16384.0 >>>channels = find_channels(channels='DARM_ERR',types=['H1_RDS_R_L1','H1_RDS_R_L3'],unique=True) >>>for channel in channels: print channel.name,channel.type,channel.sampling H1:LSC-DARM_ERR H1_RDS_R_L1 16384.0 """ #== check for ldf ldf_exe='ligo_data_find' ldf_status = GetCommandOutput('which '+ldf_exe)[1] if ldf_status != 0: print >>sys.stderr, "Error: ligo_data_find not found. "+\ "Please ensure lscsoftrc is sourced" sys.exit() #== cannot work with no ifos if ifos is None: ifos = find_ifos(channels,types,ifos) if types is None: types = find_types(types) #== check list status if isinstance(channels,str): channels = [channels] if isinstance(types,str): types = [types] if isinstance(ifos,str): ifos = [ifos] found_channels=[] #== loop over each ifo for ifo in ifos: #== set ligo_data_find frame search time if time is None: time = \ str(GetCommandOutput('tconvert now -2 days')[0]).replace('\n','') if verbose: print_statement = \ "Searching "+str(len(types))+" frame types for: " if channels is not None: for channel in channels: print_statement += channel+', ' print_statement += " in ifo "+ifo else: print_statement+= "all channels, in ifo "+ifo print print_statement for type in types: count=0 #== skip empty frame types or those set for ignorance if type in ignore: continue if type == '': continue if verbose: print >>sys.stdout, " Searching "+str(type)+"...", sys.stdout.flush() #== find first frame file for type frame_cmd = ldf_exe+''' --observatory '''+ifo[0:1]+\ ''' --type='''+type+\ ''' --gps-start-time '''+str(time)+\ ''' --gps-end-time '''+str(time)+\ ''' --url-type file''' frame_out = Popen(frame_cmd,shell=True,stdout=PIPE,stderr=PIPE) frame_status = 0 frame='' for line in frame_out.stdout.readlines(): if line[0:7]=='file://': frame = line break frame_out.stdout.close() frame = frame.replace('\n','') #== if frame is found: if frame_status == 0 and frame != "": info = frame.split(' ') frame = info[-1].replace('file://localhost','') #== get channels contained in frame, grepping for input channel string channel_find_cmd = "FrChannels "+frame+" | grep "+ifo #== add grep options for each included channel if channels is not None: channel_find_cmd += " | egrep '" for channel in channels: channel_find_cmd += channel+"|" channel_find_cmd = channel_find_cmd[0:-1]+"'" #== add grep options for each excluded channel if ex_channels is not None: channel_find_cmd += " | egrep -v '" for ex_channel in ex_channels: channel_find_cmd += ex_channel+"|" channel_find_cmd = channel_find_cmd[0:-1]+"'" #== grab channels try: channel_list_out = Popen(channel_find_cmd,shell=True,stdout=PIPE) for line in channel_list_out.stdout.readlines(): data = line.replace('\n','') name,sampling = data.split(' ') #== if asked for exact match, check: if match: if name not in channels: continue #== generate structure and append to list found_channel = Channel(name,type=type,sampling=sampling) found_channels.append(found_channel) count+=1 sys.stdout.flush() channel_list_out.stdout.close() except: print " Failed to find channels for type "+type+", using the"+\ " following frame\n"+frame continue #== print channel count for data type if verbose: print >>sys.stdout, count,"channels found" if verbose: print >>sys.stdout if unique: found_channels = parse_unique_channels(found_channels) return found_channels
frame_out = Popen(frame_cmd,shell=True,stdout=PIPE,stderr=PIPE)
frame_out = subprocess.Popen(frame_cmd,shell=True,stdout=subprocess.PIPE,\ stderr=subprocess.PIPE)
def find_channels(channels=None,\ types=None,\ ifos=None,\ ex_channels=None,\ ignore=[],\ match=False,\ time=None,\ unique=False,\ verbose=False): """ This function will use FrChannels to return all LIGO data channels matching the given list of 'channels' strings, whilst exluding the 'ex_channels' strings. Using find_ifos() and find_types() in the same module (if required), the search is performed over the given ifos for each given type. Use match=True to restrict the search to find channels that exactly match the given 'channels' list (i.e. not a partial match). Use time=True to search for channels in frame types defined at the given epoch. Use unique=True to return a unique list of channels, parsed using the parse_unique_channels() function, otherwise can return multiple instance of the same name string in different types. Returns a list of dqFrameUtils.Channel instances. Examples: >>>channels = find_channels(channels='DARM',types='H1_RDS_R_L1') >>>for channel in channels: print channel.name,channel.type,channel.sampling H1:LSC-DARM_CTRL H1_RDS_R_L1 16384.0 H1:LSC-DARM_ERR H1_RDS_R_L1 16384.0 H1:LSC-DARM_CTRL_EXC_DAQ H1_RDS_R_L1 16384.0 H1:LSC-DARM_GAIN H1_RDS_R_L1 16.0 >>>channels = find_channels(channels='DARM_ERR',types=['H1_RDS_R_L1','H1_RDS_R_L3']) >>>for channel in channels: print channel.name,channel.type,channel.sampling H1:LSC-DARM_ERR H1_RDS_R_L1 16384.0 H1:LSC-DARM_ERR H1_RDS_R_L3 16384.0 >>>channels = find_channels(channels='DARM_ERR',types=['H1_RDS_R_L1','H1_RDS_R_L3'],unique=True) >>>for channel in channels: print channel.name,channel.type,channel.sampling H1:LSC-DARM_ERR H1_RDS_R_L1 16384.0 """ #== check for ldf ldf_exe='ligo_data_find' ldf_status = GetCommandOutput('which '+ldf_exe)[1] if ldf_status != 0: print >>sys.stderr, "Error: ligo_data_find not found. "+\ "Please ensure lscsoftrc is sourced" sys.exit() #== cannot work with no ifos if ifos is None: ifos = find_ifos(channels,types,ifos) if types is None: types = find_types(types) #== check list status if isinstance(channels,str): channels = [channels] if isinstance(types,str): types = [types] if isinstance(ifos,str): ifos = [ifos] found_channels=[] #== loop over each ifo for ifo in ifos: #== set ligo_data_find frame search time if time is None: time = \ str(GetCommandOutput('tconvert now -2 days')[0]).replace('\n','') if verbose: print_statement = \ "Searching "+str(len(types))+" frame types for: " if channels is not None: for channel in channels: print_statement += channel+', ' print_statement += " in ifo "+ifo else: print_statement+= "all channels, in ifo "+ifo print print_statement for type in types: count=0 #== skip empty frame types or those set for ignorance if type in ignore: continue if type == '': continue if verbose: print >>sys.stdout, " Searching "+str(type)+"...", sys.stdout.flush() #== find first frame file for type frame_cmd = ldf_exe+''' --observatory '''+ifo[0:1]+\ ''' --type='''+type+\ ''' --gps-start-time '''+str(time)+\ ''' --gps-end-time '''+str(time)+\ ''' --url-type file''' frame_out = Popen(frame_cmd,shell=True,stdout=PIPE,stderr=PIPE) frame_status = 0 frame='' for line in frame_out.stdout.readlines(): if line[0:7]=='file://': frame = line break frame_out.stdout.close() frame = frame.replace('\n','') #== if frame is found: if frame_status == 0 and frame != "": info = frame.split(' ') frame = info[-1].replace('file://localhost','') #== get channels contained in frame, grepping for input channel string channel_find_cmd = "FrChannels "+frame+" | grep "+ifo #== add grep options for each included channel if channels is not None: channel_find_cmd += " | egrep '" for channel in channels: channel_find_cmd += channel+"|" channel_find_cmd = channel_find_cmd[0:-1]+"'" #== add grep options for each excluded channel if ex_channels is not None: channel_find_cmd += " | egrep -v '" for ex_channel in ex_channels: channel_find_cmd += ex_channel+"|" channel_find_cmd = channel_find_cmd[0:-1]+"'" #== grab channels try: channel_list_out = Popen(channel_find_cmd,shell=True,stdout=PIPE) for line in channel_list_out.stdout.readlines(): data = line.replace('\n','') name,sampling = data.split(' ') #== if asked for exact match, check: if match: if name not in channels: continue #== generate structure and append to list found_channel = Channel(name,type=type,sampling=sampling) found_channels.append(found_channel) count+=1 sys.stdout.flush() channel_list_out.stdout.close() except: print " Failed to find channels for type "+type+", using the"+\ " following frame\n"+frame continue #== print channel count for data type if verbose: print >>sys.stdout, count,"channels found" if verbose: print >>sys.stdout if unique: found_channels = parse_unique_channels(found_channels) return found_channels
channel_list_out = Popen(channel_find_cmd,shell=True,stdout=PIPE)
channel_list_out = subprocess.Popen(channel_find_cmd,shell=True,\ stdout=subprocess.PIPE)
def find_channels(channels=None,\ types=None,\ ifos=None,\ ex_channels=None,\ ignore=[],\ match=False,\ time=None,\ unique=False,\ verbose=False): """ This function will use FrChannels to return all LIGO data channels matching the given list of 'channels' strings, whilst exluding the 'ex_channels' strings. Using find_ifos() and find_types() in the same module (if required), the search is performed over the given ifos for each given type. Use match=True to restrict the search to find channels that exactly match the given 'channels' list (i.e. not a partial match). Use time=True to search for channels in frame types defined at the given epoch. Use unique=True to return a unique list of channels, parsed using the parse_unique_channels() function, otherwise can return multiple instance of the same name string in different types. Returns a list of dqFrameUtils.Channel instances. Examples: >>>channels = find_channels(channels='DARM',types='H1_RDS_R_L1') >>>for channel in channels: print channel.name,channel.type,channel.sampling H1:LSC-DARM_CTRL H1_RDS_R_L1 16384.0 H1:LSC-DARM_ERR H1_RDS_R_L1 16384.0 H1:LSC-DARM_CTRL_EXC_DAQ H1_RDS_R_L1 16384.0 H1:LSC-DARM_GAIN H1_RDS_R_L1 16.0 >>>channels = find_channels(channels='DARM_ERR',types=['H1_RDS_R_L1','H1_RDS_R_L3']) >>>for channel in channels: print channel.name,channel.type,channel.sampling H1:LSC-DARM_ERR H1_RDS_R_L1 16384.0 H1:LSC-DARM_ERR H1_RDS_R_L3 16384.0 >>>channels = find_channels(channels='DARM_ERR',types=['H1_RDS_R_L1','H1_RDS_R_L3'],unique=True) >>>for channel in channels: print channel.name,channel.type,channel.sampling H1:LSC-DARM_ERR H1_RDS_R_L1 16384.0 """ #== check for ldf ldf_exe='ligo_data_find' ldf_status = GetCommandOutput('which '+ldf_exe)[1] if ldf_status != 0: print >>sys.stderr, "Error: ligo_data_find not found. "+\ "Please ensure lscsoftrc is sourced" sys.exit() #== cannot work with no ifos if ifos is None: ifos = find_ifos(channels,types,ifos) if types is None: types = find_types(types) #== check list status if isinstance(channels,str): channels = [channels] if isinstance(types,str): types = [types] if isinstance(ifos,str): ifos = [ifos] found_channels=[] #== loop over each ifo for ifo in ifos: #== set ligo_data_find frame search time if time is None: time = \ str(GetCommandOutput('tconvert now -2 days')[0]).replace('\n','') if verbose: print_statement = \ "Searching "+str(len(types))+" frame types for: " if channels is not None: for channel in channels: print_statement += channel+', ' print_statement += " in ifo "+ifo else: print_statement+= "all channels, in ifo "+ifo print print_statement for type in types: count=0 #== skip empty frame types or those set for ignorance if type in ignore: continue if type == '': continue if verbose: print >>sys.stdout, " Searching "+str(type)+"...", sys.stdout.flush() #== find first frame file for type frame_cmd = ldf_exe+''' --observatory '''+ifo[0:1]+\ ''' --type='''+type+\ ''' --gps-start-time '''+str(time)+\ ''' --gps-end-time '''+str(time)+\ ''' --url-type file''' frame_out = Popen(frame_cmd,shell=True,stdout=PIPE,stderr=PIPE) frame_status = 0 frame='' for line in frame_out.stdout.readlines(): if line[0:7]=='file://': frame = line break frame_out.stdout.close() frame = frame.replace('\n','') #== if frame is found: if frame_status == 0 and frame != "": info = frame.split(' ') frame = info[-1].replace('file://localhost','') #== get channels contained in frame, grepping for input channel string channel_find_cmd = "FrChannels "+frame+" | grep "+ifo #== add grep options for each included channel if channels is not None: channel_find_cmd += " | egrep '" for channel in channels: channel_find_cmd += channel+"|" channel_find_cmd = channel_find_cmd[0:-1]+"'" #== add grep options for each excluded channel if ex_channels is not None: channel_find_cmd += " | egrep -v '" for ex_channel in ex_channels: channel_find_cmd += ex_channel+"|" channel_find_cmd = channel_find_cmd[0:-1]+"'" #== grab channels try: channel_list_out = Popen(channel_find_cmd,shell=True,stdout=PIPE) for line in channel_list_out.stdout.readlines(): data = line.replace('\n','') name,sampling = data.split(' ') #== if asked for exact match, check: if match: if name not in channels: continue #== generate structure and append to list found_channel = Channel(name,type=type,sampling=sampling) found_channels.append(found_channel) count+=1 sys.stdout.flush() channel_list_out.stdout.close() except: print " Failed to find channels for type "+type+", using the"+\ " following frame\n"+frame continue #== print channel count for data type if verbose: print >>sys.stdout, count,"channels found" if verbose: print >>sys.stdout if unique: found_channels = parse_unique_channels(found_channels) return found_channels
for (frac,size) in skyreses: htmlfile.write('<tr><td>%f</td>%f</td></tr>'%(frac,size))
for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f</td>%f</td></tr>'%(frac,skysize))
def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp()
connection.commit()
database.connection.commit()
def get_likelihood_ratio(coinc_event_id, time_slide_id, row_from_cols = database.sngl_burst_table.row_from_cols, cursor = database.connection.cursor(), offset_vectors = offset_vectors, params_func = params_func, params_func_extra_args = params_func_extra_args): events = map(row_from_cols, cursor.execute("""
sngl_burst.* time_slide.offset,
sngl_burst.*, time_slide.offset
def add_noninjections(self, param_func, database, *args): # iterate over burst<-->burst coincs cursor = database.connection.cursor() for coinc_event_id, time_slide_id in database.connection.cursor().execute("""
self.veto_definer = os.path.basename(definer_file)
def update_veto_lists(self, timeoffset, veto_definer = None):
injectionconfidence=0
def loadDataFile(filename): print filename infile=open(filename,'r') formatstr=infile.readline().lstrip() header=formatstr.split() llines=[] import re dec=re.compile(r'[^\d.-]+') for line in infile: sline=line.split() proceed=True if len(sline)<1: print 'Ignoring empty line in input file: %s'%(sline) proceed=False for s in sline: if dec.search(s) is not None: print 'Warning! Ignoring non-numeric data after the header: %s'%(sline) proceed=False if proceed: llines.append(array(map(float,sline))) flines=array(llines) for i in range(0,len(header)): if header[i].lower().find('log')!=-1 and header[i].lower()!='logl': print 'exponentiating %s'%(header[i]) flines[:,i]=exp(flines[:,i]) header[i]=header[i].replace('log','') if header[i].lower().find('sin')!=-1: print 'asining %s'%(header[i]) flines[:,i]=arcsin(flines[:,i]) header[i]=header[i].replace('sin','') if header[i].lower().find('cos')!=-1: print 'acosing %s'%(header[i]) flines[:,i]=arccos(flines[:,i]) header[i]=header[i].replace('cos','') header[i]=header[i].replace('(','') header[i]=header[i].replace(')','') print 'Read columns %s'%(str(header)) return header,flines
if injectionconfidence:
if injectionconfidence!=0:
def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp()
self.generations[n] = tuple(TimeSlideGraphNode(offset_vector) for offset_vector in ligolw_tisi.time_slide_component_vectors((node.offset_vector for node in self.head), n))
self.generations[n] = tuple(TimeSlideGraphNode(offset_vector) for offset_vector in ligolw_tisi.time_slide_component_vectors((node.offset_vector for node in self.head if len(node.offset_vector) == n), n))
def __init__(self, offset_vector_dict, verbose = False): if verbose: print >>sys.stderr, "constructing coincidence assembly graph for %d target offset vectors ..." % len(offset_vector_dict)
for component in node.components: component_name = vectorstring(component.offset_vector) print >>fileobj, "\t\"%s\" -> \"%s\";" % (component_name, node_name)
if node.components is not None: for component in node.components: print >>fileobj, "\t\"%s\" -> \"%s\";" % (vectorstring(component.offset_vector), node_name)
def write(self, fileobj): """ Write a DOT graph representation of the time slide graph to fileobj. """ vectorstring = lambda offset_vector: ",".join("%s=%g" % (instrument, offset) for instrument, offset in sorted(offset_vector.items()))
component_name = vectorstring(component.offset_vector) print >>fileobj, "\t\"%s\" -> \"%s\";" % (component_name, node_name)
print >>fileobj, "\t\"%s\" -> \"%s\";" % (vectorstring(component.offset_vector), node_name)
def write(self, fileobj): """ Write a DOT graph representation of the time slide graph to fileobj. """ vectorstring = lambda offset_vector: ",".join("%s=%g" % (instrument, offset) for instrument, offset in sorted(offset_vector.items()))
ds = pi*sqrt(2.0)*resolution/180.0
ds = pi*resolution/180.0
def gridsky(resolution): """ grid the sky up into roughly square regions resolution is the length of a side the points get placed at the center of the squares and to first order each square has an area of resolution^2 """ latitude = 0.0 longitude = pi ds = pi*sqrt(2.0)*resolution/180.0 points = [(latitude-0.5*pi, longitude)] while latitude <= pi: latitude += ds longitude = 0.0 points.append((latitude-0.5*pi, longitude)) while longitude <= 2.0*pi: longitude += ds / abs(sin(latitude)) points.append((latitude-0.5*pi, longitude)) #there's some slop so get rid of it and only focus on points on the sphere sphpts = [] for pt in points: if pt[0] > pi/2 or pt[0] < -pi/2 \ or pt[1] > 2*pi or pt[1] < 0: pass else: sphpts.append(pt) return sphpts
fgtemp = finegrid
fgtemp = finegrid[:]
def map_grids(coarsegrid,finegrid,coarseres=4.0): """ takes the two grids (lists of lat/lon tuples) and returns a dictionary where the points in the coarse grid are the keys and lists of tuples of points in the fine grid are the values """ fgtemp = finegrid coarsedict = {} ds = coarseres*pi/180 for cpt in coarsegrid: flist = [] for fpt in fgtemp: if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*abs(sin(fpt[1]))*abs(sin(fpt[1])) \ <= ds*ds/4: flist.append(fpt) coarsedict[cpt] = flist for rpt in flist: fgtemp.remove(rpt) return coarsedict, fgtemp
ds = coarseres*pi/180
ds = coarseres*pi/180.0
def map_grids(coarsegrid,finegrid,coarseres=4.0): """ takes the two grids (lists of lat/lon tuples) and returns a dictionary where the points in the coarse grid are the keys and lists of tuples of points in the fine grid are the values """ fgtemp = finegrid coarsedict = {} ds = coarseres*pi/180 for cpt in coarsegrid: flist = [] for fpt in fgtemp: if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*abs(sin(fpt[1]))*abs(sin(fpt[1])) \ <= ds*ds/4: flist.append(fpt) coarsedict[cpt] = flist for rpt in flist: fgtemp.remove(rpt) return coarsedict, fgtemp
if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*abs(sin(fpt[1]))*abs(sin(fpt[1])) \ <= ds*ds/4:
if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4.0 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*sin(cpt[0])*sin(cpt[0]) \ <= ds*ds/4.0:
def map_grids(coarsegrid,finegrid,coarseres=4.0): """ takes the two grids (lists of lat/lon tuples) and returns a dictionary where the points in the coarse grid are the keys and lists of tuples of points in the fine grid are the values """ fgtemp = finegrid coarsedict = {} ds = coarseres*pi/180 for cpt in coarsegrid: flist = [] for fpt in fgtemp: if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*abs(sin(fpt[1]))*abs(sin(fpt[1])) \ <= ds*ds/4: flist.append(fpt) coarsedict[cpt] = flist for rpt in flist: fgtemp.remove(rpt) return coarsedict, fgtemp
html_tcmp_write+='<td width="30%"><img width="100%" src="2Dkde/'+twoDKdePath+'"/></td>'
html_tcmp_write+='<td width="30%"><img width="100%" src="2Dkde/'+figname+'"/></td>'
def cbcBayesPostProc(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): """ This is a demonstration script for using the functionality/data structures contained in pylal.bayespputils . It will produce a webpage from a file containing posterior samples generated by the parameter estimation codes with 1D/2D plots and stats from the marginal posteriors for each parameter/set of parameters. """ if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # commonOutputFileObj=open(data[0]) #Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() ## Load Bayes factors ## # Add Bayes factor information to summary file # if bayesfactornoise is not None: bfile=open(bayesfactornoise,'r') BSN=bfile.read() bfile.close() print 'BSN: %s'%BSN if bayesfactorcoherent is not None: bfile=open(bayesfactorcoherent,'r') BCI=bfile.read() bfile.close() print 'BCI: %s'%BCI #Create an instance of the posterior class using the posterior values loaded #from the file and any injection information (if given). pos = bppu.Posterior(commonOutputFileObj,SimInspiralTableEntry=injection) if ('mc' in pos.names or 'mchirp' in pos.names) and \ 'eta' in pos.names and \ ('mass1' not in pos.names or 'm1' not in pos.names) and\ ('m2' not in pos.names or 'm2' not in pos.names): if 'mc' in pos.names: mchirp_name='mc' else: mchirp_name='mchirp' if injection: inj_mass1,inj_mass2=bppu.mc2ms(injection.mchirp,injection.eta) mass1_samps,mass2_samps=bppu.mc2ms(pos[mchirp_name].samples,pos['eta'].samples) mass1_pos=bppu.OneDPosterior('m1',mass1_samps,injected_value=inj_mass1) mass2_pos=bppu.OneDPosterior('m2',mass2_samps,injected_value=inj_mass2) pos.append(mass1_pos) pos.append(mass2_pos) ##Print some summary stats for the user...## #Number of samples print "Number of posterior samples: %i"%len(pos) # Means print 'Means:' print str(pos.means) #Median print 'Median:' print str(pos.medians) #maxL print 'maxL:' max_pos,max_pos_co=pos.maxL print max_pos_co #==================================================================# #Create web page #==================================================================# html=bppu.htmlPage('Posterior PDFs') #Create a section for meta-data/run information html_meta=html.add_section('Summary') html_meta.p('Produced from '+str(len(pos))+' posterior samples.') html_meta.p('Samples read from %s'%(data[0])) #Create a section for model selection results (if they exist) if bayesfactornoise is not None: html_model=html.add_section('Model selection') html_model.p('log Bayes factor ( coherent vs gaussian noise) = %s, Bayes factor=%f'%(BSN,exp(float(BSN)))) if bayesfactorcoherent is not None: html_model.p('log Bayes factor ( coherent vs incoherent OR noise ) = %s, Bayes factor=%f'%(BCI,exp(float(BCI)))) #Create a section for summary statistics html_stats=html.add_section('Summary statistics') html_stats.write(str(pos)) #==================================================================# #Generate sky map #==================================================================# #If sky resolution parameter has been specified try and create sky map... skyreses=None sky_injection_cl=None if skyres is not None and 'ra' in pos.names and 'dec' in pos.names: #Greedy bin sky samples (ra,dec) into a grid on the sky which preserves #? top_ranked_sky_pixels,sky_injection_cl,skyreses,injection_area=bppu.greedy_bin_sky(pos,skyres,confidence_levels) print "BCI for sky area:" print skyreses #Create sky map in outdir bppu.plot_sky_map(top_ranked_sky_pixels,outdir) #Create a web page section for sky localization results/plots html_sky=html.add_section('Sky Localization') if injection: if sky_injection_cl: html_sky.p('Injection found at confidence interval %f in sky location'%(sky_injection_cl)) else: html_sky.p('Injection not found in posterior bins in sky location!') html_sky.write('<img width="35%" src="skymap.png"/>') if skyres is not None: html_sky_write='<table border="1"><tr><th>Confidence region</th><th>size (sq. deg)</th></tr>' fracs=skyreses.keys() fracs.sort() skysizes=[skyreses[frac] for frac in fracs] for frac,skysize in zip(fracs,skysizes): html_sky_write+=('<tr><td>%f</td><td>%f</td></tr>'%(frac,skysize)) html_sky_write+=('</table>') html_sky.write(html_sky_write) #==================================================================# #2D posteriors #==================================================================# #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. #Make a folder for the 2D kde plots margdir=os.path.join(outdir,'2Dkde') if not os.path.isdir(margdir): os.makedirs(margdir) twobinsdir=os.path.join(outdir,'2Dbins') if not os.path.isdir(twobinsdir): os.makedirs(twobinsdir) #Add a section to the webpage for a table of the confidence interval #results. html_tcig=html.add_section('2D confidence intervals (greedy binning)') #Generate the top part of the table html_tcig_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_tcig_write+='<th>%f</th>'%cl if injection: html_tcig_write+='<th>Injection Confidence Level</th>' html_tcig_write+='<th>Injection Confidence Interval</th>' html_tcig_write+='</tr>' #= Add a section for a table of 2D marginal PDFs (kde) html_tcmp=html.add_section('2D Marginal PDFs') html_tcmp.br() #Table matter html_tcmp_write='<table border="1" width="100%">' row_count=0 for par1_name,par2_name in twoDGreedyMenu: par1_name=par1_name.lower() par2_name=par2_name.lower() print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) try: pos[par1_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par1_name continue try: pos[par2_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par2_name continue #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Form greedy binning input structure greedy2Params={par1_name:par1_bin,par2_name:par2_bin} #Greedy bin the posterior samples toppoints,injection_cl,reses,injection_area=\ bppu.greedy_bin_two_param(pos,greedy2Params,confidence_levels) print "BCI %s-%s:"%(par1_name,par2_name) print reses #Generate new BCI html table row BCItableline='<tr><td>%s-%s</td>'%(par1_name,par2_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injection_cl is not None: BCItableline+='<td>%f</td>'%injection_cl BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_tcig_write+=BCItableline #= Plot 2D histograms of greedily binned points =# #greedy2PlotFig=bppu.plot_two_param_greedy_bins(np.array(toppoints),pos,greedy2Params) #greedy2PlotFig.savefig(os.path.join(twobinsdir,'%s-%s_greedy2.png'%(par1_name,par2_name))) #= Generate 2D kde plots =# print 'Generating %s-%s plot'%(par1_name,par2_name) par1_pos=pos[par1_name].samples par2_pos=pos[par2_name].samples if (size(np.unique(par1_pos))<2 or size(np.unique(par2_pos))<2): continue plot2DkdeParams={par1_name:50,par2_name:50} myfig=bppu.plot_two_param_kde(pos,plot2DkdeParams) figname=par1_name+'-'+par2_name+'_2Dkernel.png' twoDKdePath=os.path.join(margdir,figname) if row_count==0: html_tcmp_write+='<tr>' html_tcmp_write+='<td width="30%"><img width="100%" src="2Dkde/'+twoDKdePath+'"/></td>' row_count+=1 if row_count==3: html_tcmp_write+='</tr>' row_count=0 myfig.savefig(twoDKdePath) #Finish off the BCI table and write it into the etree html_tcig_write+='</table>' html_tcig.write(html_tcig_write) #Finish off the 2D kde plot table while row_count!=0: html_tcmp_write+='<td/>' row_count+=1 if row_count==3: row_count=0 html_tcmp_write+='</tr>' html_tcmp_write+='</table>' html_tcmp.write(html_tcmp_write) #Add a link to all plots html_tcmp.br() html_tcmp.a("2Dkde/",'All 2D marginal PDFs (kde)') html_tcmp.hr() #==================================================================# #1D posteriors #==================================================================# #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. #Add section for 1D confidence intervals html_ogci=html.add_section('1D confidence intervals (greedy binning)') #Generate the top part of the table html_ogci_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_ogci_write+='<th>%f</th>'%cl if injection: html_ogci_write+='<th>Injection Confidence Level</th>' html_ogci_write+='<th>Injection Confidence Interval</th>' html_ogci_write+='</tr>' #Add section for 1D marginal PDFs and sample plots html_ompdf=html.add_section('1D marginal posterior PDFs') html_ompdf.br() #Table matter html_ompdf_write= '<table><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th></tr>' onepdfdir=os.path.join(outdir,'1Dpdf') if not os.path.isdir(onepdfdir): os.makedirs(onepdfdir) sampsdir=os.path.join(outdir,'1Dsamps') if not os.path.isdir(sampsdir): os.makedirs(sampsdir) for par_name in oneDMenu: par_name=par_name.lower() print "Binning %s to determine confidence levels ..."%par_name try: pos[par_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue binParams={par_name:par_bin} toppoints,injectionconfidence,reses,injection_area=bppu.greedy_bin_one_param(pos,binParams,confidence_levels) oneDContCL,oneDContInj = bppu.contigious_interval_one_param(pos,binParams,confidence_levels) #Generate new BCI html table row BCItableline='<tr><td>%s</td>'%(par_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injectionconfidence is not None and injection_area is not None: BCItableline+='<td>%f</td>'%injectionconfidence BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_ogci_write+=BCItableline #Generate 1D histogram/kde plots print "Generating 1D plot for %s."%par_name oneDPDFParams={par_name:50} rbins,plotFig=bppu.plot_one_param_pdf(pos,oneDPDFParams) figname=par_name+'.png' oneDplotPath=os.path.join(onepdfdir,figname) plotFig.savefig(oneDplotPath) if rbins: print "r of injected value of %s (bins) = %f"%(par_name, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) pos_samps=pos[par_name].samples plt.plot(pos_samps,'.',figure=myfig) injpar=pos[par_name].injval if injpar: if min(pos_samps)<injpar and max(pos_samps)>injpar: plt.plot([0,len(pos_samps)],[injpar,injpar],'r-.') myfig.savefig(os.path.join(sampsdir,figname.replace('.png','_samps.png'))) html_ompdf_write+='<tr><td><img src="1Dpdf/'+figname+'"/></td><td><img src="1Dsamps/'+figname.replace('.png','_samps.png')+'"/></td></tr>' html_ompdf_write+='</table>' html_ompdf.write(html_ompdf_write) html_ogci_write+='</table>' html_ogci.write(html_ogci_write) html_ogci.hr() html_ogci.br() html_ompdf.hr() html_ompdf.br() html_footer=html.add_section('') html_footer.p('Produced using cbcBayesPostProc.py at '+strftime("%Y-%m-%d %H:%M:%S")+' .') html_footer.p(git_version.verbose_msg) #Save results page resultspage=open(os.path.join(outdir,'posplots.html'),'w') resultspage.write(str(html)) # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') input_file=open(data[0]) posfile.write(input_file.read()) # posfilename2=os.path.join(outdir,'posterior_samples2.dat') pos.write_to_file(posfilename2) #Close files input_file.close() posfile.close() resultspage.close()
a minimum threshold of min_threshold in the most sensitive one, for a source
a minimum threshold of min_threshold in the least sensitive one, for a source
def detector_thresholds(min_threshold, ifos, RA, dec, gps_time, sensitivities=None): """ Return a dictionary of sensitivity thresholds for each detector, based on a minimum threshold of min_threshold in the most sensitive one, for a source at position (RA,dec) specified in radians at time gps_time. Specifying a dictionary of sensitivities allows one to weight also by the relative SNR of a reference system in each detector to handle different noise curves. """ # Recurse if multiple RA, dec and GPS times are specified if type(gps_time)!=float or type(RA)!=float or type(dec)!=float: assert len(gps_time)==len(RA),len(gps_time)==len(dec) return map(lambda (a,b,c): detector_threshold(min_threshold,ifos,a,b,c,sensitivities), zip(RA,dec,gps_time)) from pylal import antenna # Sensitivies specifies relative SNRs of a reference signal (BNS) if sensitivities is None: sensitivities={} for det in ifos: sensitivies[det]=1.0 else: assert len(ifos)==len(sensitivites) # Normalise sensitivities minsens=min(sensitivities.values()) for det in ifos: sensitivities[det]/=minsens resps={} threshs={} # Make a dictionary of average responses for det in ifos: resps[det]=antenna.response(gps_time,RA,dec,0,0,'radians',det)[2] worst_resp=min(resps.values()) # Assuming that lowest threshold is in worst detector, return thresholds for det in ifos: threshs[det]=min_threshold*(resps[det]/worst_resp)*sensitivities[det] return threshs
for name, value in params_func(events, offsetdict, *params_func_extra_args).items():
for name, value in sorted(params_func(events, offsetdict, *params_func_extra_args).items()):
def P(self, params_func, events, offsetdict, *params_func_extra_args): P_bak = 1.0 P_inj = 1.0 for name, value in params_func(events, offsetdict, *params_func_extra_args).items(): P_bak *= self.background_rates[name](*value)[0] P_inj *= self.injection_rates[name](*value)[0] return P_bak, P_inj
scatter(plx,ply,s=5,c=numpyasarray(toppoints)[:,2],faceted=False,cmap=matplotlib.cm.jet)
scatter(plx,ply,s=5,c=numpy.asarray(toppoints)[:,2],faceted=False,cmap=matplotlib.cm.jet)
def getinjpar(inj,parnum): if parnum==0: return inj.mchirp if parnum==1: return inj.eta if parnum==2: return inj.get_end() if parnum==3: return inj.phi0 if parnum==4: return inj.distance if parnum==5: return inj.longitude if parnum==6: return inj.latitude if parnum==7: return inj.polarization if parnum==8: return inj.inclination return None
self.scan_type = type
self.scan_type = type.replace("seismic","seis").upper()
def __init__(self, dag, job, cp, opts, ifo, p_nodes=[], type=""):
seps=map(lambda s: ang_dist(sample[6],sample[7],s[1],s[0]),skypoints)
seps=map(lambda s: ang_dist(sample[5],sample[6],s[1],s[0]),skypoints)
def sky_hist(skypoints,samples): N=len(skypoints) print 'operating on %d sky points' % (N) bins=zeros(N) j=0 for sample in samples: seps=map(lambda s: ang_dist(sample[6],sample[7],s[1],s[0]),skypoints) minsep=math.pi for i in range(0,N): if seps[i]<minsep: minsep=seps[i] mindx=i bins[mindx]=bins[mindx]+1 j=j+1 print 'Done %d/%d iterations, minsep=%f degrees'%(j,len(samples),minsep*(180.0/3.1415926)) return (skypoints,bins)
sampcart=pol2cart(sample[6],sample[7])
sampcart=pol2cart(sample[5],sample[6])
def skyhist_cart(skycarts,samples): N=len(skypoints) print 'operating on %d sky points'%(N) bins=zeros(N) j=0 for sample in samples: sampcart=pol2cart(sample[6],sample[7]) dots=map(lambda s: numpy.dot(sampcart,s),skycarts) maxdot=0 for i in range(0,N): if dots[i]>maxdot: maxdot=dots[i] mindx=i bins[mindx]=bins[mindx]+1 j=j+1 # print 'Done %d/%d iterations, minsep=%f degrees'%(j,len(samples),math.acos(maxdot)*(180.0/3.14159)) return (skypoints,bins)
plot(getinjpar(injection,0),getinjpar(injection,1),'go')
plot(getinjpar(injection,0),getinjpar(injection,1),'go',scalex=False,scaley=False)
def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp()
plot(getinjpar(injection,5),getinjpar(injection,6),'go')
plot(getinjpar(injection,5),getinjpar(injection,6),'go',scalex=False,scaley=False)
def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp()
if injection and getinjpar(injection,7)<max(pos[:,7]) and getinjpar(injection,7)>min(pos[:,7]) and getinjpar(injection,8)<max(pos[:,8]) and getinjpar(injection,8)>min(pos[:,8]): plot(getinjpar(injection,7),getinjpar(injection,8),'go')
if injection and getinjpar(injection,7)<max(pos[:,7]) and getinjpar(injection,7)>min(pos[:,7]) and getinjpar(injection,8)<max(pos[:,8]) and getinjpar(injection,8)>min(pos[:,8]): plot(getinjpar(injection,7),getinjpar(injection,8),'go',scalex=False,scaley=False)
def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp()
plot(injection.mass1,injection.mass2,'go')
plot(injection.mass1,injection.mass2,'go',scalex=False,scaley=False)
def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp()
plot(injection.mass1,injection.distance,'go')
plot(injection.mass1,injection.distance,'go',scalex=False,scaley=False)
def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp()
plot(getinjpar(injection,4),getinjpar(injection,8),'go')
plot(getinjpar(injection,4),getinjpar(injection,8),'go',scalex=False,scaley=False)
def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp()
plot(getinjpar(injection,i),getinjpar(injection,j),'go')
plot(getinjpar(injection,i),getinjpar(injection,j),'go',scalex=False,scaley=False)
def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp()
plot([getinjpar(injection,i),getinjpar(injection,i)],[0,max(kdepdf)],'r-.')
plot([getinjpar(injection,i),getinjpar(injection,i)],[0,max(kdepdf)],'r-.',scalex=False,scaley=False)
def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp()
htmlfile.write('<img src="'+paramnames[i]+'.png">')
myfig=figure(figsize=(4,3.5),dpi=80) plot(pos[:,i],'.') if injection and min(pos[:,i])<getinjpar(injection,i) and max(pos[:,i])>getinjpar(injection,i): plot([0,len(pos)],[getinjpar(injection,i),getinjpar(injection,i)],'r-.') myfig.savefig(outdir+'/'+paramnames[i]+'_samps.png') htmlfile.write('<img src="'+paramnames[i]+'.png"><img src="'+paramnames[i]+'_samps.png><br>')
def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/yax.ptp()
["src/xlal/date.c"],
["src/xlal/date.c", "src/xlal/misc.c"],
def run(self): # remove the automatically generated user env scripts for script in ["pylal-user-env.sh", "pylal-user-env.csh"]: log.info("removing " + script ) try: os.unlink(os.path.join("etc", script)) except: pass
y.segment_def_cdb = x.creator_db) AND \ NOT (segment.start_time > %s OR %s > segment.end_time) \
y.segment_def_cdb = x.creator_db) \
def getSciSegs(ifo=None, gpsStart=None, gpsStop=None, cut=bool(False), serverURL=None, segName="DMT-SCIENCE", seglenmin=None, segpading=0
sqlQuery=query01%(segName,ifo,gpsStop,gpsStart,gpsStop,gpsStart)
sqlQuery=query01%(segName,ifo,gpsStop,gpsStart)
def getSciSegs(ifo=None, gpsStart=None, gpsStop=None, cut=bool(False), serverURL=None, segName="DMT-SCIENCE", seglenmin=None, segpading=0
shiftString=shift
shiftString=shiftLabel[shift]
def getFOMLinks(gpsTime=int(0),ifo=("default")): """ Simple method returns a list of links to FOMs ordered by FOM # The list is 2D ie: [['ifo,shift',LINKtoImage,LinktoThumb],['ifo,shift',LinktoImage,LinkToThumb]...] images marked [Eve,Owl,Day] via [p3,p2,p1] in filenames this methd only for S6 and later IFO naming start dates: There were three naming conventions mixed, then p1,p2,p3 and lastly Day,Eve,Owl LHO: 20090724 :: 932428815 LLO: 20090708 :: 931046415 """ urls={ "DEFAULT":"http://www.ligo.caltech.edu/~pshawhan/scilinks.html", "V1":"http://wwwcascina.virgo.infn.it/DetectorOperations/index.htm", "L1":"https://llocds.ligo-la.caltech.edu/scirun/S6/robofom/%s/%s%s_FOM%i%s.gif", "H1":"http://lhocds.ligo-wa.caltech.edu/scirun/S6/robofom/%s/%s%s_FOM%i%s.gif", "H2":"http://lhocds.ligo-wa.caltech.edu/scirun/S6/robofom/%s/%s%s_FOM%i%s.gif" } ifoTag=ifo.upper() shiftDuration=8; #Give the IFO and shift start hour as integer shiftStandardTime={'L1':{'day':14,'eve':22,'owl':6}, 'H1':{'day':16,'eve':0,'owl':8}, 'H2':{'day':16,'eve':0,'owl':8}, 'V1':{'day':6,'eve':14,'owl':22}} shiftOrder=['day','eve','owl'] shiftLabel={'day':'p1','eve':'p3','owl':'p2'} outputURLs=list() if ((ifo==None) or (gpsTime==None)): sys.stdout.write("getFOMLinks called incorrectly \
zero_lag_time_slides = {} background_time_slides = {} for id, instrument, offset, is_background in connection.cursor().execute("""
offset_vectors = {} for id, instrument, offset in connection.cursor().execute("""
def get_time_slides(connection): """ Query the database for the IDs and offsets of all time slides, and return two dictionaries one containing the all-zero time slides and the other containing the not-all-zero time slides. """ zero_lag_time_slides = {} background_time_slides = {} for id, instrument, offset, is_background in connection.cursor().execute("""
offset, EXISTS ( SELECT * FROM time_slide AS a WHERE a.time_slide_id == time_slide.time_slide_id AND a.offset != 0 )
offset
def get_time_slides(connection): """ Query the database for the IDs and offsets of all time slides, and return two dictionaries one containing the all-zero time slides and the other containing the not-all-zero time slides. """ zero_lag_time_slides = {} background_time_slides = {} for id, instrument, offset, is_background in connection.cursor().execute("""
if is_background: if id not in background_time_slides: background_time_slides[id] = {} background_time_slides[id][instrument] = offset else: if id not in zero_lag_time_slides: zero_lag_time_slides[id] = {} zero_lag_time_slides[id][instrument] = offset
if id not in offset_vectors: offset_vectors[id] = {} offset_vectors[id][instrument] = offset zero_lag_time_slides = dict((id, offset_vector) for id, offset_vector in offset_vectors.items() if not any(offset_vector.values())) background_time_slides = dict((id, offset_vector) for id, offset_vector in offset_vectors.items() if any(offset_vector.values()))
def get_time_slides(connection): """ Query the database for the IDs and offsets of all time slides, and return two dictionaries one containing the all-zero time slides and the other containing the not-all-zero time slides. """ zero_lag_time_slides = {} background_time_slides = {} for id, instrument, offset, is_background in connection.cursor().execute("""
return r"%s \\times 10^{%d}" % (m, int(e))
return r"%s \times 10^{%d}" % (m, int(e))
def latexnumber(s): """ Convert a string of the form "d.dddde-dd" to "d.dddd \times 10^{-dd}" """ m, e = floatpattern.match(s).groups() return r"%s \\times 10^{%d}" % (m, int(e))
rate.to_moving_mean_density(binnedarray, filters.get(name, default_filter))
def finish(self, filters = {}, verbose = False): default_filter = rate.gaussian_window(21) # normalizing each array so that its sum is 1 has the # effect of making the integral of P(x) dx equal 1 after # the array is transformed to an array of densities (which # is done by dividing each bin by dx). N = len(self.zero_lag_rates) + len(self.background_rates) + len(self.injection_rates) n = 0 threads = [] for group, (name, binnedarray) in itertools.chain(zip(["zero lag"] * len(self.zero_lag_rates), self.zero_lag_rates.items()), zip(["background"] * len(self.background_rates), self.background_rates.items()), zip(["injections"] * len(self.injection_rates), self.injection_rates.items())): n += 1 if verbose: print >>sys.stderr, "\t%d / %d: %s \"%s\"" % (n, N, group, name) binnedarray.array /= numpy.sum(binnedarray.array) threads.append(threading.Thread(target = rate.to_moving_mean_density, args = (binnedarray, filters.get(name, default_filter)))) threads[-1].start() rate.to_moving_mean_density(binnedarray, filters.get(name, default_filter)) for thread in threads: thread.join() return self
twoDKdePath=os.path.join(margdir,par1_name+'-'+par2_name+'_2Dkernel.png')
figname=par1_name+'-'+par2_name+'_2Dkernel.png' twoDKdePath=os.path.join(margdir,figname)
def cbcBayesPostProc(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): """ This is a demonstration script for using the functionality/data structures contained in pylal.bayespputils . It will produce a webpage from a file containing posterior samples generated by the parameter estimation codes with 1D/2D plots and stats from the marginal posteriors for each parameter/set of parameters. """ if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # commonOutputFileObj=open(data[0]) #Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() ## Load Bayes factors ## # Add Bayes factor information to summary file # if bayesfactornoise is not None: bfile=open(bayesfactornoise,'r') BSN=bfile.read() bfile.close() print 'BSN: %s'%BSN if bayesfactorcoherent is not None: bfile=open(bayesfactorcoherent,'r') BCI=bfile.read() bfile.close() print 'BCI: %s'%BCI #Create an instance of the posterior class using the posterior values loaded #from the file and any injection information (if given). pos = bppu.Posterior(commonOutputFileObj,SimInspiralTableEntry=injection) if ('mc' in pos.names or 'mchirp' in pos.names) and \ 'eta' in pos.names and \ ('mass1' not in pos.names or 'm1' not in pos.names) and\ ('m2' not in pos.names or 'm2' not in pos.names): if 'mc' in pos.names: mchirp_name='mc' else: mchirp_name='mchirp' if injection: inj_mass1,inj_mass2=bppu.mc2ms(injection.mchirp,injection.eta) mass1_samps,mass2_samps=bppu.mc2ms(pos[mchirp_name].samples,pos['eta'].samples) mass1_pos=bppu.OneDPosterior('m1',mass1_samps,injected_value=inj_mass1) mass2_pos=bppu.OneDPosterior('m2',mass2_samps,injected_value=inj_mass2) pos.append(mass1_pos) pos.append(mass2_pos) ##Print some summary stats for the user...## #Number of samples print "Number of posterior samples: %i"%len(pos) # Means print 'Means:' print str(pos.means) #Median print 'Median:' print str(pos.medians) #maxL print 'maxL:' max_pos,max_pos_co=pos.maxL print max_pos_co #==================================================================# #Create web page #==================================================================# html=bppu.htmlPage('Posterior PDFs') #Create a section for meta-data/run information html_meta=html.add_section('Summary') html_meta.p('Produced from '+str(len(pos))+' posterior samples.') html_meta.p('Samples read from %s'%(data[0])) #Create a section for model selection results (if they exist) if bayesfactornoise is not None: html_model=html.add_section('Model selection') html_model.p('log Bayes factor ( coherent vs gaussian noise) = %s, Bayes factor=%f'%(BSN,exp(float(BSN)))) if bayesfactorcoherent is not None: html_model.p('log Bayes factor ( coherent vs incoherent OR noise ) = %s, Bayes factor=%f'%(BCI,exp(float(BCI)))) #Create a section for summary statistics html_stats=html.add_section('Summary statistics') html_stats.write(str(pos)) #==================================================================# #Generate sky map #==================================================================# #If sky resolution parameter has been specified try and create sky map... skyreses=None sky_injection_cl=None if skyres is not None and 'ra' in pos.names and 'dec' in pos.names: #Greedy bin sky samples (ra,dec) into a grid on the sky which preserves #? top_ranked_sky_pixels,sky_injection_cl,skyreses,injection_area=bppu.greedy_bin_sky(pos,skyres,confidence_levels) print "BCI for sky area:" print skyreses #Create sky map in outdir bppu.plot_sky_map(top_ranked_sky_pixels,outdir) #Create a web page section for sky localization results/plots html_sky=html.add_section('Sky Localization') if injection: if sky_injection_cl: html_sky.p('Injection found at confidence interval %f in sky location'%(sky_injection_cl)) else: html_sky.p('Injection not found in posterior bins in sky location!') html_sky.write('<img width="35%" src="skymap.png"/>') if skyres is not None: html_sky_write='<table border="1"><tr><th>Confidence region</th><th>size (sq. deg)</th></tr>' fracs=skyreses.keys() fracs.sort() skysizes=[skyreses[frac] for frac in fracs] for frac,skysize in zip(fracs,skysizes): html_sky_write+=('<tr><td>%f</td><td>%f</td></tr>'%(frac,skysize)) html_sky_write+=('</table>') html_sky.write(html_sky_write) #==================================================================# #2D posteriors #==================================================================# #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. #Make a folder for the 2D kde plots margdir=os.path.join(outdir,'2Dkde') if not os.path.isdir(margdir): os.makedirs(margdir) twobinsdir=os.path.join(outdir,'2Dbins') if not os.path.isdir(twobinsdir): os.makedirs(twobinsdir) #Add a section to the webpage for a table of the confidence interval #results. html_tcig=html.add_section('2D confidence intervals (greedy binning)') #Generate the top part of the table html_tcig_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_tcig_write+='<th>%f</th>'%cl if injection: html_tcig_write+='<th>Injection Confidence Level</th>' html_tcig_write+='<th>Injection Confidence Interval</th>' html_tcig_write+='</tr>' #= Add a section for a table of 2D marginal PDFs (kde) html_tcmp=html.add_section('2D Marginal PDFs') html_tcmp.br() #Table matter html_tcmp_write='<table border="1" width="100%">' row_count=0 for par1_name,par2_name in twoDGreedyMenu: par1_name=par1_name.lower() par2_name=par2_name.lower() print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) try: pos[par1_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par1_name continue try: pos[par2_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par2_name continue #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Form greedy binning input structure greedy2Params={par1_name:par1_bin,par2_name:par2_bin} #Greedy bin the posterior samples toppoints,injection_cl,reses,injection_area=\ bppu.greedy_bin_two_param(pos,greedy2Params,confidence_levels) print "BCI %s-%s:"%(par1_name,par2_name) print reses #Generate new BCI html table row BCItableline='<tr><td>%s-%s</td>'%(par1_name,par2_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injection_cl is not None: BCItableline+='<td>%f</td>'%injection_cl BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_tcig_write+=BCItableline #= Plot 2D histograms of greedily binned points =# #greedy2PlotFig=bppu.plot_two_param_greedy_bins(np.array(toppoints),pos,greedy2Params) #greedy2PlotFig.savefig(os.path.join(twobinsdir,'%s-%s_greedy2.png'%(par1_name,par2_name))) #= Generate 2D kde plots =# print 'Generating %s-%s plot'%(par1_name,par2_name) par1_pos=pos[par1_name].samples par2_pos=pos[par2_name].samples if (size(np.unique(par1_pos))<2 or size(np.unique(par2_pos))<2): continue plot2DkdeParams={par1_name:50,par2_name:50} myfig=bppu.plot_two_param_kde(pos,plot2DkdeParams) twoDKdePath=os.path.join(margdir,par1_name+'-'+par2_name+'_2Dkernel.png') if row_count==0: html_tcmp_write+='<tr>' html_tcmp_write+='<td width="30%"><img width="100%" src="'+twoDKdePath+'"/></td>' row_count+=1 if row_count==3: html_tcmp_write+='</tr>' row_count=0 myfig.savefig(twoDKdePath) #Finish off the BCI table and write it into the etree html_tcig_write+='</table>' html_tcig.write(html_tcig_write) #Finish off the 2D kde plot table while row_count!=0: html_tcmp_write+='<td/>' row_count+=1 if row_count==3: row_count=0 html_tcmp_write+='</tr>' html_tcmp_write+='</table>' html_tcmp.write(html_tcmp_write) #Add a link to all plots html_tcmp.br() html_tcmp.a("2D/",'All 2D Marginal PDFs') html_tcmp.hr() #==================================================================# #1D posteriors #==================================================================# #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. #Add section for 1D confidence intervals html_ogci=html.add_section('1D confidence intervals (greedy binning)') #Generate the top part of the table html_ogci_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_ogci_write+='<th>%f</th>'%cl if injection: html_ogci_write+='<th>Injection Confidence Level</th>' html_ogci_write+='<th>Injection Confidence Interval</th>' html_ogci_write+='</tr>' #Add section for 1D marginal PDFs and sample plots html_ompdf=html.add_section('1D marginal posterior PDFs') html_ompdf.br() #Table matter html_ompdf_write= '<table><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th></tr>' onepdfdir=os.path.join(outdir,'1Dpdf') if not os.path.isdir(onepdfdir): os.makedirs(onepdfdir) sampsdir=os.path.join(outdir,'1Dsamps') if not os.path.isdir(sampsdir): os.makedirs(sampsdir) for par_name in oneDMenu: par_name=par_name.lower() print "Binning %s to determine confidence levels ..."%par_name try: pos[par_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue binParams={par_name:par_bin} toppoints,injectionconfidence,reses,injection_area=bppu.greedy_bin_one_param(pos,binParams,confidence_levels) oneDContCL,oneDContInj = bppu.contigious_interval_one_param(pos,binParams,confidence_levels) #Generate new BCI html table row BCItableline='<tr><td>%s</td>'%(par_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injectionconfidence is not None and injection_area is not None: BCItableline+='<td>%f</td>'%injectionconfidence BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_ogci_write+=BCItableline #Generate 1D histogram/kde plots print "Generating 1D plot for %s."%par_name oneDPDFParams={par_name:50} rbins,plotFig=bppu.plot_one_param_pdf(pos,oneDPDFParams) figname=par_name+'.png' oneDplotPath=os.path.join(onepdfdir,figname) plotFig.savefig(oneDplotPath) if rbins: print "r of injected value of %s (bins) = %f"%(par_name, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) pos_samps=pos[par_name].samples plt.plot(pos_samps,'.',figure=myfig) injpar=pos[par_name].injval if injpar: if min(pos_samps)<injpar and max(pos_samps)>injpar: plt.plot([0,len(pos_samps)],[injpar,injpar],'r-.') myfig.savefig(os.path.join(sampsdir,figname.replace('.png','_samps.png'))) html_ompdf_write+='<tr><td><img src="1Dpdf/'+figname+'"/></td><td><img src="1Dsamps/'+figname.replace('.png','_samps.png')+'"/></td></tr>' html_ompdf_write+='</table>' html_ompdf.write(html_ompdf_write) html_ogci_write+='</table>' html_ogci.write(html_ogci_write) html_ogci.hr() html_ogci.br() html_ompdf.hr() html_ompdf.br() html_footer=html.add_section('') html_footer.p('Produced using cbcBayesPostProc.py at '+strftime("%Y-%m-%d %H:%M:%S")+' .') html_footer.p(git_version.verbose_msg) #Save results page resultspage=open(os.path.join(outdir,'posplots.html'),'w') resultspage.write(str(html)) # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') input_file=open(data[0]) posfile.write(input_file.read()) # posfilename2=os.path.join(outdir,'posterior_samples2.dat') pos.write_to_file(posfilename2) #Close files input_file.close() posfile.close() resultspage.close()
html_tcmp_write+='<td width="30%"><img width="100%" src="'+twoDKdePath+'"/></td>'
html_tcmp_write+='<td width="30%"><img width="100%" src="2Dkde/'+twoDKdePath+'"/></td>'
def cbcBayesPostProc(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): """ This is a demonstration script for using the functionality/data structures contained in pylal.bayespputils . It will produce a webpage from a file containing posterior samples generated by the parameter estimation codes with 1D/2D plots and stats from the marginal posteriors for each parameter/set of parameters. """ if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # commonOutputFileObj=open(data[0]) #Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() ## Load Bayes factors ## # Add Bayes factor information to summary file # if bayesfactornoise is not None: bfile=open(bayesfactornoise,'r') BSN=bfile.read() bfile.close() print 'BSN: %s'%BSN if bayesfactorcoherent is not None: bfile=open(bayesfactorcoherent,'r') BCI=bfile.read() bfile.close() print 'BCI: %s'%BCI #Create an instance of the posterior class using the posterior values loaded #from the file and any injection information (if given). pos = bppu.Posterior(commonOutputFileObj,SimInspiralTableEntry=injection) if ('mc' in pos.names or 'mchirp' in pos.names) and \ 'eta' in pos.names and \ ('mass1' not in pos.names or 'm1' not in pos.names) and\ ('m2' not in pos.names or 'm2' not in pos.names): if 'mc' in pos.names: mchirp_name='mc' else: mchirp_name='mchirp' if injection: inj_mass1,inj_mass2=bppu.mc2ms(injection.mchirp,injection.eta) mass1_samps,mass2_samps=bppu.mc2ms(pos[mchirp_name].samples,pos['eta'].samples) mass1_pos=bppu.OneDPosterior('m1',mass1_samps,injected_value=inj_mass1) mass2_pos=bppu.OneDPosterior('m2',mass2_samps,injected_value=inj_mass2) pos.append(mass1_pos) pos.append(mass2_pos) ##Print some summary stats for the user...## #Number of samples print "Number of posterior samples: %i"%len(pos) # Means print 'Means:' print str(pos.means) #Median print 'Median:' print str(pos.medians) #maxL print 'maxL:' max_pos,max_pos_co=pos.maxL print max_pos_co #==================================================================# #Create web page #==================================================================# html=bppu.htmlPage('Posterior PDFs') #Create a section for meta-data/run information html_meta=html.add_section('Summary') html_meta.p('Produced from '+str(len(pos))+' posterior samples.') html_meta.p('Samples read from %s'%(data[0])) #Create a section for model selection results (if they exist) if bayesfactornoise is not None: html_model=html.add_section('Model selection') html_model.p('log Bayes factor ( coherent vs gaussian noise) = %s, Bayes factor=%f'%(BSN,exp(float(BSN)))) if bayesfactorcoherent is not None: html_model.p('log Bayes factor ( coherent vs incoherent OR noise ) = %s, Bayes factor=%f'%(BCI,exp(float(BCI)))) #Create a section for summary statistics html_stats=html.add_section('Summary statistics') html_stats.write(str(pos)) #==================================================================# #Generate sky map #==================================================================# #If sky resolution parameter has been specified try and create sky map... skyreses=None sky_injection_cl=None if skyres is not None and 'ra' in pos.names and 'dec' in pos.names: #Greedy bin sky samples (ra,dec) into a grid on the sky which preserves #? top_ranked_sky_pixels,sky_injection_cl,skyreses,injection_area=bppu.greedy_bin_sky(pos,skyres,confidence_levels) print "BCI for sky area:" print skyreses #Create sky map in outdir bppu.plot_sky_map(top_ranked_sky_pixels,outdir) #Create a web page section for sky localization results/plots html_sky=html.add_section('Sky Localization') if injection: if sky_injection_cl: html_sky.p('Injection found at confidence interval %f in sky location'%(sky_injection_cl)) else: html_sky.p('Injection not found in posterior bins in sky location!') html_sky.write('<img width="35%" src="skymap.png"/>') if skyres is not None: html_sky_write='<table border="1"><tr><th>Confidence region</th><th>size (sq. deg)</th></tr>' fracs=skyreses.keys() fracs.sort() skysizes=[skyreses[frac] for frac in fracs] for frac,skysize in zip(fracs,skysizes): html_sky_write+=('<tr><td>%f</td><td>%f</td></tr>'%(frac,skysize)) html_sky_write+=('</table>') html_sky.write(html_sky_write) #==================================================================# #2D posteriors #==================================================================# #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. #Make a folder for the 2D kde plots margdir=os.path.join(outdir,'2Dkde') if not os.path.isdir(margdir): os.makedirs(margdir) twobinsdir=os.path.join(outdir,'2Dbins') if not os.path.isdir(twobinsdir): os.makedirs(twobinsdir) #Add a section to the webpage for a table of the confidence interval #results. html_tcig=html.add_section('2D confidence intervals (greedy binning)') #Generate the top part of the table html_tcig_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_tcig_write+='<th>%f</th>'%cl if injection: html_tcig_write+='<th>Injection Confidence Level</th>' html_tcig_write+='<th>Injection Confidence Interval</th>' html_tcig_write+='</tr>' #= Add a section for a table of 2D marginal PDFs (kde) html_tcmp=html.add_section('2D Marginal PDFs') html_tcmp.br() #Table matter html_tcmp_write='<table border="1" width="100%">' row_count=0 for par1_name,par2_name in twoDGreedyMenu: par1_name=par1_name.lower() par2_name=par2_name.lower() print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) try: pos[par1_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par1_name continue try: pos[par2_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par2_name continue #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Form greedy binning input structure greedy2Params={par1_name:par1_bin,par2_name:par2_bin} #Greedy bin the posterior samples toppoints,injection_cl,reses,injection_area=\ bppu.greedy_bin_two_param(pos,greedy2Params,confidence_levels) print "BCI %s-%s:"%(par1_name,par2_name) print reses #Generate new BCI html table row BCItableline='<tr><td>%s-%s</td>'%(par1_name,par2_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injection_cl is not None: BCItableline+='<td>%f</td>'%injection_cl BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_tcig_write+=BCItableline #= Plot 2D histograms of greedily binned points =# #greedy2PlotFig=bppu.plot_two_param_greedy_bins(np.array(toppoints),pos,greedy2Params) #greedy2PlotFig.savefig(os.path.join(twobinsdir,'%s-%s_greedy2.png'%(par1_name,par2_name))) #= Generate 2D kde plots =# print 'Generating %s-%s plot'%(par1_name,par2_name) par1_pos=pos[par1_name].samples par2_pos=pos[par2_name].samples if (size(np.unique(par1_pos))<2 or size(np.unique(par2_pos))<2): continue plot2DkdeParams={par1_name:50,par2_name:50} myfig=bppu.plot_two_param_kde(pos,plot2DkdeParams) twoDKdePath=os.path.join(margdir,par1_name+'-'+par2_name+'_2Dkernel.png') if row_count==0: html_tcmp_write+='<tr>' html_tcmp_write+='<td width="30%"><img width="100%" src="'+twoDKdePath+'"/></td>' row_count+=1 if row_count==3: html_tcmp_write+='</tr>' row_count=0 myfig.savefig(twoDKdePath) #Finish off the BCI table and write it into the etree html_tcig_write+='</table>' html_tcig.write(html_tcig_write) #Finish off the 2D kde plot table while row_count!=0: html_tcmp_write+='<td/>' row_count+=1 if row_count==3: row_count=0 html_tcmp_write+='</tr>' html_tcmp_write+='</table>' html_tcmp.write(html_tcmp_write) #Add a link to all plots html_tcmp.br() html_tcmp.a("2D/",'All 2D Marginal PDFs') html_tcmp.hr() #==================================================================# #1D posteriors #==================================================================# #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. #Add section for 1D confidence intervals html_ogci=html.add_section('1D confidence intervals (greedy binning)') #Generate the top part of the table html_ogci_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_ogci_write+='<th>%f</th>'%cl if injection: html_ogci_write+='<th>Injection Confidence Level</th>' html_ogci_write+='<th>Injection Confidence Interval</th>' html_ogci_write+='</tr>' #Add section for 1D marginal PDFs and sample plots html_ompdf=html.add_section('1D marginal posterior PDFs') html_ompdf.br() #Table matter html_ompdf_write= '<table><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th></tr>' onepdfdir=os.path.join(outdir,'1Dpdf') if not os.path.isdir(onepdfdir): os.makedirs(onepdfdir) sampsdir=os.path.join(outdir,'1Dsamps') if not os.path.isdir(sampsdir): os.makedirs(sampsdir) for par_name in oneDMenu: par_name=par_name.lower() print "Binning %s to determine confidence levels ..."%par_name try: pos[par_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue binParams={par_name:par_bin} toppoints,injectionconfidence,reses,injection_area=bppu.greedy_bin_one_param(pos,binParams,confidence_levels) oneDContCL,oneDContInj = bppu.contigious_interval_one_param(pos,binParams,confidence_levels) #Generate new BCI html table row BCItableline='<tr><td>%s</td>'%(par_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injectionconfidence is not None and injection_area is not None: BCItableline+='<td>%f</td>'%injectionconfidence BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_ogci_write+=BCItableline #Generate 1D histogram/kde plots print "Generating 1D plot for %s."%par_name oneDPDFParams={par_name:50} rbins,plotFig=bppu.plot_one_param_pdf(pos,oneDPDFParams) figname=par_name+'.png' oneDplotPath=os.path.join(onepdfdir,figname) plotFig.savefig(oneDplotPath) if rbins: print "r of injected value of %s (bins) = %f"%(par_name, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) pos_samps=pos[par_name].samples plt.plot(pos_samps,'.',figure=myfig) injpar=pos[par_name].injval if injpar: if min(pos_samps)<injpar and max(pos_samps)>injpar: plt.plot([0,len(pos_samps)],[injpar,injpar],'r-.') myfig.savefig(os.path.join(sampsdir,figname.replace('.png','_samps.png'))) html_ompdf_write+='<tr><td><img src="1Dpdf/'+figname+'"/></td><td><img src="1Dsamps/'+figname.replace('.png','_samps.png')+'"/></td></tr>' html_ompdf_write+='</table>' html_ompdf.write(html_ompdf_write) html_ogci_write+='</table>' html_ogci.write(html_ogci_write) html_ogci.hr() html_ogci.br() html_ompdf.hr() html_ompdf.br() html_footer=html.add_section('') html_footer.p('Produced using cbcBayesPostProc.py at '+strftime("%Y-%m-%d %H:%M:%S")+' .') html_footer.p(git_version.verbose_msg) #Save results page resultspage=open(os.path.join(outdir,'posplots.html'),'w') resultspage.write(str(html)) # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') input_file=open(data[0]) posfile.write(input_file.read()) # posfilename2=os.path.join(outdir,'posterior_samples2.dat') pos.write_to_file(posfilename2) #Close files input_file.close() posfile.close() resultspage.close()
html_tcmp.a("2D/",'All 2D Marginal PDFs')
html_tcmp.a("2Dkde/",'All 2D marginal PDFs (kde)')
def cbcBayesPostProc(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): """ This is a demonstration script for using the functionality/data structures contained in pylal.bayespputils . It will produce a webpage from a file containing posterior samples generated by the parameter estimation codes with 1D/2D plots and stats from the marginal posteriors for each parameter/set of parameters. """ if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # commonOutputFileObj=open(data[0]) #Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() ## Load Bayes factors ## # Add Bayes factor information to summary file # if bayesfactornoise is not None: bfile=open(bayesfactornoise,'r') BSN=bfile.read() bfile.close() print 'BSN: %s'%BSN if bayesfactorcoherent is not None: bfile=open(bayesfactorcoherent,'r') BCI=bfile.read() bfile.close() print 'BCI: %s'%BCI #Create an instance of the posterior class using the posterior values loaded #from the file and any injection information (if given). pos = bppu.Posterior(commonOutputFileObj,SimInspiralTableEntry=injection) if ('mc' in pos.names or 'mchirp' in pos.names) and \ 'eta' in pos.names and \ ('mass1' not in pos.names or 'm1' not in pos.names) and\ ('m2' not in pos.names or 'm2' not in pos.names): if 'mc' in pos.names: mchirp_name='mc' else: mchirp_name='mchirp' if injection: inj_mass1,inj_mass2=bppu.mc2ms(injection.mchirp,injection.eta) mass1_samps,mass2_samps=bppu.mc2ms(pos[mchirp_name].samples,pos['eta'].samples) mass1_pos=bppu.OneDPosterior('m1',mass1_samps,injected_value=inj_mass1) mass2_pos=bppu.OneDPosterior('m2',mass2_samps,injected_value=inj_mass2) pos.append(mass1_pos) pos.append(mass2_pos) ##Print some summary stats for the user...## #Number of samples print "Number of posterior samples: %i"%len(pos) # Means print 'Means:' print str(pos.means) #Median print 'Median:' print str(pos.medians) #maxL print 'maxL:' max_pos,max_pos_co=pos.maxL print max_pos_co #==================================================================# #Create web page #==================================================================# html=bppu.htmlPage('Posterior PDFs') #Create a section for meta-data/run information html_meta=html.add_section('Summary') html_meta.p('Produced from '+str(len(pos))+' posterior samples.') html_meta.p('Samples read from %s'%(data[0])) #Create a section for model selection results (if they exist) if bayesfactornoise is not None: html_model=html.add_section('Model selection') html_model.p('log Bayes factor ( coherent vs gaussian noise) = %s, Bayes factor=%f'%(BSN,exp(float(BSN)))) if bayesfactorcoherent is not None: html_model.p('log Bayes factor ( coherent vs incoherent OR noise ) = %s, Bayes factor=%f'%(BCI,exp(float(BCI)))) #Create a section for summary statistics html_stats=html.add_section('Summary statistics') html_stats.write(str(pos)) #==================================================================# #Generate sky map #==================================================================# #If sky resolution parameter has been specified try and create sky map... skyreses=None sky_injection_cl=None if skyres is not None and 'ra' in pos.names and 'dec' in pos.names: #Greedy bin sky samples (ra,dec) into a grid on the sky which preserves #? top_ranked_sky_pixels,sky_injection_cl,skyreses,injection_area=bppu.greedy_bin_sky(pos,skyres,confidence_levels) print "BCI for sky area:" print skyreses #Create sky map in outdir bppu.plot_sky_map(top_ranked_sky_pixels,outdir) #Create a web page section for sky localization results/plots html_sky=html.add_section('Sky Localization') if injection: if sky_injection_cl: html_sky.p('Injection found at confidence interval %f in sky location'%(sky_injection_cl)) else: html_sky.p('Injection not found in posterior bins in sky location!') html_sky.write('<img width="35%" src="skymap.png"/>') if skyres is not None: html_sky_write='<table border="1"><tr><th>Confidence region</th><th>size (sq. deg)</th></tr>' fracs=skyreses.keys() fracs.sort() skysizes=[skyreses[frac] for frac in fracs] for frac,skysize in zip(fracs,skysizes): html_sky_write+=('<tr><td>%f</td><td>%f</td></tr>'%(frac,skysize)) html_sky_write+=('</table>') html_sky.write(html_sky_write) #==================================================================# #2D posteriors #==================================================================# #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. #Make a folder for the 2D kde plots margdir=os.path.join(outdir,'2Dkde') if not os.path.isdir(margdir): os.makedirs(margdir) twobinsdir=os.path.join(outdir,'2Dbins') if not os.path.isdir(twobinsdir): os.makedirs(twobinsdir) #Add a section to the webpage for a table of the confidence interval #results. html_tcig=html.add_section('2D confidence intervals (greedy binning)') #Generate the top part of the table html_tcig_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_tcig_write+='<th>%f</th>'%cl if injection: html_tcig_write+='<th>Injection Confidence Level</th>' html_tcig_write+='<th>Injection Confidence Interval</th>' html_tcig_write+='</tr>' #= Add a section for a table of 2D marginal PDFs (kde) html_tcmp=html.add_section('2D Marginal PDFs') html_tcmp.br() #Table matter html_tcmp_write='<table border="1" width="100%">' row_count=0 for par1_name,par2_name in twoDGreedyMenu: par1_name=par1_name.lower() par2_name=par2_name.lower() print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) try: pos[par1_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par1_name continue try: pos[par2_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par2_name continue #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Form greedy binning input structure greedy2Params={par1_name:par1_bin,par2_name:par2_bin} #Greedy bin the posterior samples toppoints,injection_cl,reses,injection_area=\ bppu.greedy_bin_two_param(pos,greedy2Params,confidence_levels) print "BCI %s-%s:"%(par1_name,par2_name) print reses #Generate new BCI html table row BCItableline='<tr><td>%s-%s</td>'%(par1_name,par2_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injection_cl is not None: BCItableline+='<td>%f</td>'%injection_cl BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_tcig_write+=BCItableline #= Plot 2D histograms of greedily binned points =# #greedy2PlotFig=bppu.plot_two_param_greedy_bins(np.array(toppoints),pos,greedy2Params) #greedy2PlotFig.savefig(os.path.join(twobinsdir,'%s-%s_greedy2.png'%(par1_name,par2_name))) #= Generate 2D kde plots =# print 'Generating %s-%s plot'%(par1_name,par2_name) par1_pos=pos[par1_name].samples par2_pos=pos[par2_name].samples if (size(np.unique(par1_pos))<2 or size(np.unique(par2_pos))<2): continue plot2DkdeParams={par1_name:50,par2_name:50} myfig=bppu.plot_two_param_kde(pos,plot2DkdeParams) twoDKdePath=os.path.join(margdir,par1_name+'-'+par2_name+'_2Dkernel.png') if row_count==0: html_tcmp_write+='<tr>' html_tcmp_write+='<td width="30%"><img width="100%" src="'+twoDKdePath+'"/></td>' row_count+=1 if row_count==3: html_tcmp_write+='</tr>' row_count=0 myfig.savefig(twoDKdePath) #Finish off the BCI table and write it into the etree html_tcig_write+='</table>' html_tcig.write(html_tcig_write) #Finish off the 2D kde plot table while row_count!=0: html_tcmp_write+='<td/>' row_count+=1 if row_count==3: row_count=0 html_tcmp_write+='</tr>' html_tcmp_write+='</table>' html_tcmp.write(html_tcmp_write) #Add a link to all plots html_tcmp.br() html_tcmp.a("2D/",'All 2D Marginal PDFs') html_tcmp.hr() #==================================================================# #1D posteriors #==================================================================# #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. #Add section for 1D confidence intervals html_ogci=html.add_section('1D confidence intervals (greedy binning)') #Generate the top part of the table html_ogci_write='<table width="100%" border="1"><tr><th/>' confidence_levels.sort() for cl in confidence_levels: html_ogci_write+='<th>%f</th>'%cl if injection: html_ogci_write+='<th>Injection Confidence Level</th>' html_ogci_write+='<th>Injection Confidence Interval</th>' html_ogci_write+='</tr>' #Add section for 1D marginal PDFs and sample plots html_ompdf=html.add_section('1D marginal posterior PDFs') html_ompdf.br() #Table matter html_ompdf_write= '<table><tr><th>Histogram and Kernel Density Estimate</th><th>Samples used</th></tr>' onepdfdir=os.path.join(outdir,'1Dpdf') if not os.path.isdir(onepdfdir): os.makedirs(onepdfdir) sampsdir=os.path.join(outdir,'1Dsamps') if not os.path.isdir(sampsdir): os.makedirs(sampsdir) for par_name in oneDMenu: par_name=par_name.lower() print "Binning %s to determine confidence levels ..."%par_name try: pos[par_name.lower()] except KeyError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue binParams={par_name:par_bin} toppoints,injectionconfidence,reses,injection_area=bppu.greedy_bin_one_param(pos,binParams,confidence_levels) oneDContCL,oneDContInj = bppu.contigious_interval_one_param(pos,binParams,confidence_levels) #Generate new BCI html table row BCItableline='<tr><td>%s</td>'%(par_name) cls=reses.keys() cls.sort() for cl in cls: BCItableline+='<td>%f</td>'%reses[cl] if injection is not None and injectionconfidence is not None and injection_area is not None: BCItableline+='<td>%f</td>'%injectionconfidence BCItableline+='<td>%f</td>'%injection_area BCItableline+='</tr>' #Append new table line to section html html_ogci_write+=BCItableline #Generate 1D histogram/kde plots print "Generating 1D plot for %s."%par_name oneDPDFParams={par_name:50} rbins,plotFig=bppu.plot_one_param_pdf(pos,oneDPDFParams) figname=par_name+'.png' oneDplotPath=os.path.join(onepdfdir,figname) plotFig.savefig(oneDplotPath) if rbins: print "r of injected value of %s (bins) = %f"%(par_name, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) pos_samps=pos[par_name].samples plt.plot(pos_samps,'.',figure=myfig) injpar=pos[par_name].injval if injpar: if min(pos_samps)<injpar and max(pos_samps)>injpar: plt.plot([0,len(pos_samps)],[injpar,injpar],'r-.') myfig.savefig(os.path.join(sampsdir,figname.replace('.png','_samps.png'))) html_ompdf_write+='<tr><td><img src="1Dpdf/'+figname+'"/></td><td><img src="1Dsamps/'+figname.replace('.png','_samps.png')+'"/></td></tr>' html_ompdf_write+='</table>' html_ompdf.write(html_ompdf_write) html_ogci_write+='</table>' html_ogci.write(html_ogci_write) html_ogci.hr() html_ogci.br() html_ompdf.hr() html_ompdf.br() html_footer=html.add_section('') html_footer.p('Produced using cbcBayesPostProc.py at '+strftime("%Y-%m-%d %H:%M:%S")+' .') html_footer.p(git_version.verbose_msg) #Save results page resultspage=open(os.path.join(outdir,'posplots.html'),'w') resultspage.write(str(html)) # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') input_file=open(data[0]) posfile.write(input_file.read()) # posfilename2=os.path.join(outdir,'posterior_samples2.dat') pos.write_to_file(posfilename2) #Close files input_file.close() posfile.close() resultspage.close()
inspiral.InspiralNode.__init__(self,job)
pipeline.CondorDAGNode.__init__(self,job) pipeline.AnalysisNode.__init__(self)
def __init__(self, dag, job, cp, opts, sngl, frame_cache, chia, tag, p_nodes=[]):
self.add_var_opt(param,value)
def __init__(self, dag, job, cp, opts, sngl, frame_cache, chia, tag, p_nodes=[]):
inspiral.ChiaNode.__init__(self,job)
pipeline.CondorDAGNode.__init__(self,job) pipeline.AnalysisNode.__init__(self)
def __init__(self, dag, job, cp, opts, coinc, inspiral_node_dict, chia_node =None, p_nodes = []):
f = open(opts.galaxy_priors_dir+'/'+str(int(mineffD))+'Mpc.pkl','r')
f = open(opts.galaxy_priors_dir+'/galaxy_prior_'+str(int(mineffD))+'Mpc.pkl','r')
def get_unique_filename(name): """ use this to avoid name collisions """ counter = 1 base_name, ext = os.path.splitext(name) while os.path.isfile(name): name = base_name + '_' + str(counter) + ext counter += 1 return name
if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4.0 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*sin(cpt[0])*sin(cpt[0]) <= ds*ds/4.0:
if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) - ds*ds/4.0 <= epsilon and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*sin(cpt[0])*sin(cpt[0]) \ - ds*ds/4.0 <= epsilon:
def map_grids(coarsegrid,finegrid,coarseres=4.0): """ takes the two grids (lists of lat/lon tuples) and returns a dictionary where the points in the coarse grid are the keys and lists of tuples of points in the fine grid are the values """ fgtemp = finegrid[:] coarsedict = {} ds = coarseres*pi/180.0 for cpt in coarsegrid: flist = [] for fpt in fgtemp: if (cpt[0]-fpt[0])*(cpt[0]-fpt[0]) <= ds*ds/4.0 and \ (cpt[1]-fpt[1])*(cpt[1]-fpt[1])*sin(cpt[0])*sin(cpt[0]) <= ds*ds/4.0: flist.append(fpt) coarsedict[cpt] = flist for rpt in flist: fgtemp.remove(rpt) return coarsedict, fgtemp
mv $1 $2/. tar -xzvf $2/$1
currentPath=`pwd` ; mv $1 $2/. ; cd $2 ; tar -xzvf $1 ; cd $currentPath ;
def setup_distrib_script(self,tag_base): distrib_script = open('distribRemoteScan_'+tag_base+'.sh','w') distrib_script.write("""#!/bin/bash
rm $2/$1
rm $2/$1 ;
def setup_distrib_script(self,tag_base): distrib_script = open('distribRemoteScan_'+tag_base+'.sh','w') distrib_script.write("""#!/bin/bash
raise NotImplemented
raise NotImplementedError
def add_content(self, data, label="_nolabel_"): """ Stub. Replace with a method that appends values or lists of values to self.data_sets and appends labels to self.data_labels. Feel free to accept complicated inputs, but try to store only the raw numbers that will enter the plot. """ raise NotImplemented
raise NotImplemented
raise NotImplementedError
def finalize(self): """ Stub. Replace with a function that creates and makes your plot pretty. Do not do I/O here. """ raise NotImplemented
for sngl in coincEvent.sngl_inspiral.itervalues(): myArgString=myArgString+"%s,"%sngl.ifo
if hasattr(coincEvent, "sngl_inspiral"): for sngl in coincEvent.sngl_inspiral.itervalues(): myArgString=myArgString+"%s,"%sngl.ifo elif hasattr(coincEvent, "ifos_list"): for ifo in coincEvent.ifos_list: myArgString=myArgString+"%s,"%ifo
def __init__(self, dag, job, cp, opts, coincEvent=None): """ """ self.__conditionalLoadDefaults__(findFlagsNode.defaults,cp) pipeline.CondorDAGNode.__init__(self,job) self.add_var_opt("trigger-time",coincEvent.time) #Output filename oFilename="%s-findFlags_%s_%s.wiki"%(coincEvent.instruments, coincEvent.ifos, coincEvent.time) self.add_var_opt("output-file",job.outputPath+'/DataProducts/'+oFilename) self.add_var_opt("segment-url",cp.get('findFlags','segment-url')) self.add_var_opt("output-format",cp.get('findFlags','output-format')) self.add_var_opt("window",cp.get('findFlags','window')) #IFO arg string myArgString="" for sngl in coincEvent.sngl_inspiral.itervalues(): myArgString=myArgString+"%s,"%sngl.ifo myArgString=myArgString.rstrip(",") self.add_var_opt("ifo-list",myArgString) if not opts.disable_dag_categories: self.set_category(job.name.lower())
for sngl in coincEvent.sngl_inspiral.itervalues(): myArgString=myArgString+"%s,"%sngl.ifo
if hasattr(coincEvent, "sngl_inspiral"): for sngl in coincEvent.sngl_inspiral.itervalues(): myArgString=myArgString+"%s,"%sngl.ifo elif hasattr(coincEvent, "ifos_list"): for ifo in coincEvent.ifos_list: myArgString=myArgString+"%s,"%ifo
def __init__(self, dag, job, cp, opts, coincEvent=None): """ """ self.__conditionalLoadDefaults__(findVetosNode.defaults,cp) pipeline.CondorDAGNode.__init__(self,job) self.add_var_opt("trigger-time",coincEvent.time) #Output filename oFilename="%s-findVetos_%s_%s.wiki"%(coincEvent.instruments, coincEvent.ifos, coincEvent.time) self.add_var_opt("output-file",job.outputPath+'/DataProducts/'+oFilename) self.add_var_opt("segment-url",cp.get('findFlags','segment-url')) self.add_var_opt("output-format",cp.get('findFlags','output-format')) self.add_var_opt("window",cp.get('findFlags','window')) #IFO arg string myArgString="" for sngl in coincEvent.sngl_inspiral.itervalues(): myArgString=myArgString+"%s,"%sngl.ifo myArgString=myArgString.rstrip(",") self.add_var_opt("ifo-list",myArgString) if not opts.disable_dag_categories: self.set_category(job.name.lower()) if not opts.no_findVetoes: dag.add_node(self) self.validate() else: self.invalidate()
maxbin=0
def getinjpar(inj,parnum): if parnum==0: return inj.mchirp if parnum==1: return inj.eta if parnum==2: return inj.get_end() if parnum==3: return inj.phi0 if parnum==4: return inj.distance if parnum==5: return inj.longitude if parnum==6: return inj.latitude if parnum==7: return inj.polarization if parnum==8: return inj.inclination return None
hist[maxbin]=0 frac=frac+(maxbin/len(pos))
hist[maxpos]=0 frac=frac+(float(maxbin)/float(len(pos)))
def getinjpar(inj,parnum): if parnum==0: return inj.mchirp if parnum==1: return inj.eta if parnum==2: return inj.get_end() if parnum==3: return inj.phi0 if parnum==4: return inj.distance if parnum==5: return inj.longitude if parnum==6: return inj.latitude if parnum==7: return inj.polarization if parnum==8: return inj.inclination return None
htmlfile.write('<table border=1><tr>')
htmlfile.write('<table border=1 width=100%><tr>')
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
oneDplotPath=os.path.join(outdir,param+'.png')
figname=param+'.png' oneDplotPath=os.path.join(outdir,figname)
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
myfig.savefig(os.path.join(outdir,param+'_samps.png'))
myfig.savefig(os.path.join(outdir,figname.replace('.png','_samps.png')))
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
oneDplotPaths.append(oneDplotPath)
oneDplotPaths.append(figname)
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
oneDMenu=['mtotal','m1','m2','mchirp','mc','distance','distMPC','dist','iota','eta','RA','dec','a1','a2','phi1','theta1','phi2','theta2']
oneDMenu=['mtotal','m1','m2','mchirp','mc','distance','distMPC','dist','iota','psi','eta','RA','dec','a1','a2','phi1','theta1','phi2','theta2']
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
twoDplots=[['mc','eta'],['mchirp','eta'],['m1','m2'],['mtotal','eta'],['distance','iota'],['dist','iota'],['RA','dec'],['m1','dist'],['m2','dist']]
twoDplots=[['mc','eta'],['mchirp','eta'],['m1','m2'],['mtotal','eta'],['distance','iota'],['dist','iota'],['RA','dec'],['m1','dist'],['m2','dist'],['psi','iota'],['psi','distance'],['psi','dist'],['psi','phi0']]
def cbcBayesSkyRes(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None): if eventnum is not None and injfile is None: print "You specified an event number but no injection file. Ignoring!" if data is None: print 'You must specify an input data file' exit(1) # if outdir is None: print "You must specify an output directory." exit(1) if not os.path.isdir(outdir): os.makedirs(outdir) # summary_fo=open(os.path.join(outdir,'summary.ini'),'w') summary_file=ConfigParser() summary_file.add_section('metadata') summary_file.set('metadata','group_id','X') if eventnum: summary_file.set('metadata','event_id',str(eventnum)) summary_file.add_section('Confidence levels') summary_file.set('Confidence levels','confidence levels',str(confidence_levels)) # Load in the main data paramnames, pos=loadDataFile(data[0]) #Generate any required derived parameters if "m1" not in paramnames and "m2" not in paramnames and "mchirp" in paramnames and "eta" in paramnames: (m1,m2)=bppu.mc2ms(pos[:,paramnames.index('mchirp')],pos[:,paramnames.index('eta')]) pos=np.column_stack((pos,m1,m2)) paramnames.append("m1") paramnames.append("m2") # Nd=len(paramnames) print "Number of posterior samples: " + str(size(pos,0)) # Calculate means means = mean(pos,axis=0) meanStr=map(str,means) out=reduce(lambda a,b:a+'||'+b,meanStr) print 'Means:' print '||'+out+'||' RAdim=paramnames.index('RA') decdim=paramnames.index('dec') injection=None # Select injections using tc +/- 0.1s if it exists or eventnum from the injection file if injfile: import itertools injections = SimInspiralUtils.ReadSimInspiralFromFiles([injfile]) if(eventnum is not None): if(len(injections)<eventnum): print "Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injfile,len(injections)) sys.exit(1) else: injection=injections[eventnum] else: if(len(injections)<1): print 'Warning: Cannot find injection with end time %f' %(means[2]) else: injection = itertools.ifilter(lambda a: abs(a.get_end() - means[2]) < 0.1, injections).next() #If injection parameter passed load object representation of injection #table entries. if injection: injpoint=map(lambda a: getinjpar(paramnames,injection,a),range(len(paramnames))) injvals=map(str,injpoint) out=reduce(lambda a,b:a+'||'+b,injvals) print 'Injected values:' print out #Add injection values to output file summary_file.add_section('Injection values') for parnum in range(len(paramnames)): summary_file.set('Injection values',paramnames[parnum],getinjpar(paramnames,injection,parnum)) # #If sky resolution parameter has been specified try and create sky map. skyreses=None if skyres is not None: RAvec=array(pos)[:,paramnames.index('RA')] decvec=array(pos)[:,paramnames.index('dec')] skypos=column_stack([RAvec,decvec]) injvalues=None if injection: injvalues=(injpoint[RAdim],injpoint[decdim]) skyreses,skyinjectionconfidence=bppu.plotSkyMap(skypos,skyres,injvalues,confidence_levels,outdir) #Loop over parameter pairs in twoDGreedyMenu and bin the sample pairs #using a greedy algorithm . The ranked pixels (toppoints) are used #to plot 2D histograms and evaluate Bayesian confidence intervals. summary_file.add_section('2D greedy cl') summary_file.add_section('2D greedy cl inj') ncon=len(confidence_levels) pos_array=np.array(pos) twoDGreedyCL={} twoDGreedyInj={} for par1_name,par2_name in twoDGreedyMenu: print "Binning %s-%s to determine confidence levels ..."%(par1_name,par2_name) #Bin sizes try: par1_bin=GreedyRes[par1_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_bin=GreedyRes[par2_name] except KeyError: print "Bin size is not set for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue #Get posterior samples try: par1_index=paramnames.index(par1_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par1_name,par1_name,par2_name) continue try: par2_index=paramnames.index(par2_name) except ValueError: print "No input chain for %s, skipping %s/%s binning."%(par2_name,par1_name,par2_name) continue pars_name=("%s,%s"%(par1_name,par2_name)).lower() par1pos=pos_array[:,par1_index] par2pos=pos_array[:,par2_index] injection_=None if injection: par1_injvalue=np.array(injpoint)[par1_index] par2_injvalue=np.array(injpoint)[par2_index] injection_=(par1_injvalue,par2_injvalue) posterior_array=column_stack([par1pos,par2pos]) toppoints,injectionconfidence,twoDGreedyCL[pars_name],twoDGreedyInj[pars_name]=bppu.greedyBin2(posterior_array,(par1_bin,par2_bin),confidence_levels,par_names=(par1_name,par2_name),injection=injection_) #Plot 2D histograms of greedily binned points if injection is not None and par1_injvalue is not None and par2_injvalue is not None: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name),injpoint=[par1_injvalue,par2_injvalue]) else: bppu.plot2Dbins(np.array(toppoints),(par1_bin,par2_bin),outdir,par_names=(par1_name,par2_name)) # summaryString='[' for frac,area in twoDGreedyCL[pars_name].items(): summaryString+=str(area) summary_file.set('2D greedy cl',pars_name,summaryString+']') summary_file.set('2D greedy cl inj',pars_name,str(injectionconfidence)) if not os.path.exists(os.path.join(outdir,'pickle')): os.makedirs(os.path.join(outdir,'pickle')) pickle_to_file(twoDGreedyCL,os.path.join(outdir,'pickle','GreedyCL2.pickle')) pickle_to_file(twoDGreedyInj,os.path.join(outdir,'pickle','GreedyInj2.pickle')) for par in twoDGreedyCL.keys(): oneD_dict_to_file(twoDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy2.dat')) #1D binning summary_file.add_section('1D mean') summary_file.add_section('1D median') summary_file.add_section('1D mode') summary_file.add_section('1D contigious cl') summary_file.add_section('1D greedy cl') summary_file.add_section('1D stacc') oneDStats={} oneDGreedyCL={} oneDContCL={} oneDGreedyInj={} oneDContInj={} max_pos,max_i=posMode(pos_array) #Loop over each parameter and determine the contigious and greedy #confidence levels and some statistics. for par_name in oneDMenu: print "Binning %s to determine confidence levels ..."%par_name try: par_index=paramnames.index(par_name) except ValueError: print "No input chain for %s, skipping binning."%par_name continue try: par_bin=GreedyRes[par_name] except KeyError: print "Bin size is not set for %s, skipping binning."%par_name continue oneDGreedyCL[par_name]={} oneDStats[par_name]={} oneDContCL[par_name]={} oneDGreedyInj[par_name]={} oneDContInj[par_name]={} par_samps=pos_array[:,par_index] summary_file.set('1D mode',par_name,str(par_samps[max_i])) summary_file.set("1D mean",par_name,str(np.mean(par_samps))) summary_file.set("1D median",par_name,str(np.median(par_samps))) oneDStats[par_name]['mode']=par_samps[max_i] oneDStats[par_name]['mean']=np.mean(par_samps) oneDStats[par_name]['median']=np.median(par_samps) par_injvalue_=None if injection: par_injvalue_=np.array(injpoint)[par_index] oneDGreedyCL[par_name],oneDGreedyInj[par_name],toppoints,injectionconfidence = bppu.greedyBin1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) oneDContCL[par_name],oneDContInj[par_name]=bppu.contCL1(par_samps,par_bin,confidence_levels,par_injvalue=par_injvalue_) #Ilya's standard accuracy statistic if injection: injvalue=np.array(injpoint)[par_index] if injvalue: stacc=bppu.stacc_stat(par_samps,injvalue) summary_file.set('1D stacc',par_name,str(stacc)) oneDStats[par_name]['stacc']=stacc pickle_to_file(oneDGreedyCL,os.path.join(outdir,'pickle','GreedyCL1.pickle')) pickle_to_file(oneDContCL,os.path.join(outdir,'pickle','ContCL1.pickle')) pickle_to_file(oneDStats,os.path.join(outdir,'pickle','Stats1.pickle')) pickle_to_file(oneDContInj,os.path.join(outdir,'pickle','ContInj1.pickle')) pickle_to_file(oneDGreedyInj,os.path.join(outdir,'pickle','GreedyInj1.pickle')) for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDGreedyCL[par],os.path.join(outdir,str(par)+'_greedy1.dat')) # for par in oneDGreedyCL.keys(): oneD_dict_to_file(oneDContCL[par],os.path.join(outdir,str(par)+'_cont.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDStats[par],os.path.join(outdir,str(par)+'_stats.dat')) # for par in oneDStats.keys(): oneD_dict_to_file(oneDContInj[par],os.path.join(outdir,str(par)+'_cont_inj.dat')) # #####Generate 2D kde plots and webpage######## margdir=os.path.join(outdir,'2D') if not os.path.isdir(margdir): os.makedirs(margdir) twoDKdePaths=[] for par1,par2 in twoDplots: try: i=paramnames.index(par1) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par1,par1,par2) continue try: j=paramnames.index(par2) except ValueError: print "No input chain for %s, skipping 2D plot of %s-%s."%(par2,par1,par2) continue print 'Generating %s-%s plot'%(paramnames[i],paramnames[j]) if (size(np.unique(pos[:,i]))<2 or size(np.unique(pos[:,j]))<2): continue par_injvalues_=None if injection and reduce (lambda a,b: a and b, map(lambda idx: getinjpar(paramnames,injection,idx)>min(pos[:,idx]) and getinjpar(paramnames,injection,idx)<max(pos[:,idx]),[i,j])) : if getinjpar(paramnames,injection,i) is not None and getinjpar(paramnames,injection,j) is not None: par_injvalues_=( getinjpar(paramnames,injection,i) , getinjpar(paramnames,injection,j) ) myfig=bppu.plot2Dkernel(pos[:,i],pos[:,j],50,50,par_names=(par1,par2),par_injvalues=par_injvalues_) twoDKdePath=os.path.join(margdir,paramnames[i]+'-'+paramnames[j]+'_2Dkernel.png') twoDKdePaths.append(twoDKdePath) myfig.savefig(twoDKdePath) htmlfile=open(os.path.join(outdir,'posplots.html'),'w') htmlfile.write('<HTML><HEAD><TITLE>Posterior PDFs</TITLE></HEAD><BODY><h3>'+str(means[2])+' Posterior PDFs</h3>') if(skyres is not None): htmlfile.write('<table border=1><tr><td>Confidence region<td>size (sq. deg)</tr>') for (frac,skysize) in skyreses: htmlfile.write('<tr><td>%f<td>%f</tr>'%(frac,skysize)) htmlfile.write('</table>') htmlfile.write('Produced from '+str(size(pos,0))+' posterior samples.<br>') htmlfile.write('Samples read from %s<br>'%(data[0])) htmlfile.write('<h4>Mean parameter estimates</h4>') htmlfile.write('<table border=1><tr>') paramline=reduce(lambda a,b:a+'<td>'+b,paramnames) htmlfile.write('<td>'+paramline+'<td>logLmax</tr><tr>') meanline=reduce(lambda a,b:a+'<td>'+b,meanStr) htmlfile.write('<td>'+meanline+'</tr>') if injection: htmlfile.write('<tr><th colspan='+str(len(paramnames))+'>Injected values</tr>') injline=reduce(lambda a,b:a+'<td>'+b,injvals) htmlfile.write('<td>'+injline+'<td></tr>') htmlfile.write('</table>') if injection: if skyinjectionconfidence: htmlfile.write('<p>Injection found at confidence interval %f in sky location</p>'%(skyinjectionconfidence)) else: htmlfile.write('<p>Injection not found in posterior bins in sky location!</p>') htmlfile.write('<h5>2D Marginal PDFs</h5><br>') htmlfile.write('<table border=1><tr>') #htmlfile.write('<td width=30%><img width=100% src="m1m2.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="RAdec.png"></td>') #htmlfile.write('<td width=30%><img width=100% src="Meta.png"></td>') #htmlfile.write('</tr><tr><td width=30%><img width=100% src="2D/Mchirp (Msun)-geocenter time ISCO_2Dkernel.png"</td>') if skyres is not None: htmlfile.write('<td width=30%><img width=100% src="skymap.png"></td>') else: htmlfile.write('<td width=30%><img width=100% src="m1dist.png:></td>') #htmlfile.write('<td width=30%><img width=100% src="m2dist.png"></td>') row_switch=1 for par1,par2 in twoDplots: if row_switch==3: row_switch=0 plot_path=None if os.path.isfile(os.path.join(outdir,'2D',par1+'-'+par2+'_2Dkernel.png')): plot_path='2D/'+par1+'-'+par2+'_2Dkernel.png' elif os.path.isfile(os.path.join(outdir,'2D',par2+'-'+par1+'_2Dkernel.png')): plot_path='2D/'+par2+'-'+par1+'_2Dkernel.png' if plot_path: if row_switch==0: htmlfile.write('<tr>') htmlfile.write('<td width=30%><img width=100% src="'+plot_path+'"></td>') if row_switch==2: htmlfile.write('</tr>') row_switch+=1 # if row_switch==2: htmlfile.write('<td></td></tr>') elif row_switch==1: htmlfile.write('<td></td><td></td></tr>') htmlfile.write('</table>') htmlfile.write('<br><a href="2D/">All 2D Marginal PDFs</a><hr><h5>1D marginal posterior PDFs</h5><br>') summary_file.add_section('1D ranking kde') summary_file.add_section('1D ranking bins') oneDplotPaths=[] for param in oneDMenu: try: par_index=paramnames.index(param) i=par_index except ValueError: print "No input chain for %s, skipping 1D plot."%param continue pos_samps=pos[:,i] injpar_=None if injection: injpar_=getinjpar(paramnames,injection,i) print "Generating 1D plot for %s."%param rbins,plotFig=bppu.plot1DPDF(pos_samps,param,injpar=injpar_) oneDplotPath=os.path.join(outdir,param+'.png') plotFig.savefig(os.path.join(outdir,param+'.png')) if rbins: print "r of injected value of %s (bins) = %f"%(param, rbins) ##Produce plot of raw samples myfig=plt.figure(figsize=(4,3.5),dpi=80) plt.plot(pos_samps,'.',figure=myfig) if injpar_: if min(pos_samps)<injpar_ and max(pos_samps)>injpar_: plt.plot([0,len(pos_samps)],[injpar_,injpar_],'r-.') myfig.savefig(os.path.join(outdir,param+'_samps.png')) #summary_file.set('1D ranking kde',param,rkde) summary_file.set('1D ranking bins',param,rbins) oneDplotPaths.append(oneDplotPath) for plotPath in oneDplotPaths: htmlfile.write('<img src="'+plotPath+'"><img src="'+plotPath.replace('.png','_samps.png')+'"><br>') htmlfile.write('<hr><br />Produced using cbcBayesSkyRes.py at '+strftime("%Y-%m-%d %H:%M:%S")) htmlfile.write('</BODY></HTML>') htmlfile.close() # Save posterior samples too... posfilename=os.path.join(outdir,'posterior_samples.dat') posfile=open(posfilename,'w') for row in pos: for i in row: posfile.write('%f\t'%(i)) posfile.write('\n') # #Close files posfile.close() summary_file.write(summary_fo)
if pt[3] <= dtrss_inj and pt[3] != 0.0:
if pt[3] <= dtrss_inj:
def get_unique_filename(name): """ use this to avoid name collisions """ counter = 1 base_name, ext = os.path.splitext(name) while os.path.isfile(base_name): base_name = base_name + '_' + str(counter) + ext counter += 1 return base_name + ext
self._InspiralAnalysisNode__pad_data = 0
self._AnalysisNode__pad_data = 0
def __init__(self, options, cp, dir='', tag_base=''): """ """ self.__conditionalLoadDefaults__(followUpChiaJob.defaults,cp) #self.__prog__ = 'followUpChiaJob' self.__executable = string.strip(cp.get('condor','chia')) self.__universe = "standard" pipeline.CondorDAGJob.__init__(self,self.__universe,self.__executable) self.add_condor_cmd('getenv','True') self._InspiralAnalysisNode__pad_data = 0
self._InspiralAnalysisNode__pad_data = int(value)
self._AnalysisNode__pad_data = int(value)
def __init__(self, dag, job, cp, opts, sngl, frame_cache, chia, tag, p_nodes=[]):
self._InspiralAnalysisNode__pad_data = 0
self._AnalysisNode__pad_data = 0
def __init__(self, dag, job, cp, opts, coinc, inspiral_node_dict, chia_node =None, p_nodes = []):