bugged stringlengths 4 228k | fixed stringlengths 0 96.3M | __index_level_0__ int64 0 481k |
|---|---|---|
def _prepare_header(output, in_size, basename, mtime): """Returns a prepared gzip header StringIO. The gzip header is defined in RFC 1952. """ output.write("\x1f\x8b\x08") # Gzip-deflate identification flags = FEXTRA if basename: flags |= FNAME output.write(chr(flags)) # The mtime will be undefined if it does not fit... | def _prepare_header(output, in_size, basename, mtime): """Returns a prepared gzip header StringIO. The gzip header is defined in RFC 1952. """ output.write("\x1f\x8b\x08") # Gzip-deflate identification flags = FEXTRA if basename: flags |= FNAME output.write(chr(flags)) # The mtime will be undefined if it does not fit... | 479,200 |
def _write_extra_fields(output, in_size): """Writes the dictzip extra field. It will be initiated with zeros in chunk lengths. See man dictzip. """ num_chunks = in_size // CHUNK_LENGTH if in_size % CHUNK_LENGTH != 0: num_chunks += 1 field_length = 3*2 + 2 * num_chunks extra_length = 2*2 + field_length assert extra_len... | def _write_extra_fields(output, in_size): """Writes the dictzip extra field. It will be initiated with zeros in chunk lengths. See man dictzip. """ num_chunks = in_size // CHUNK_LENGTH if in_size % CHUNK_LENGTH != 0: num_chunks += 1 field_length = 3*2 + 2 * num_chunks extra_length = 2*2 + field_length assert extra_len... | 479,201 |
def _write_extra_fields(output, in_size): """Writes the dictzip extra field. It will be initiated with zeros in chunk lengths. See man dictzip. """ num_chunks = in_size // CHUNK_LENGTH if in_size % CHUNK_LENGTH != 0: num_chunks += 1 field_length = 3*2 + 2 * num_chunks extra_length = 2*2 + field_length assert extra_len... | def _write_extra_fields(output, in_size): """Writes the dictzip extra field. It will be initiated with zeros in chunk lengths. See man dictzip. """ num_chunks = in_size // CHUNK_LENGTH if in_size % CHUNK_LENGTH != 0: num_chunks += 1 field_length = 3*2 + 2 * num_chunks extra_length = 2*2 + field_length assert extra_len... | 479,202 |
def main(): args = sys.argv[1:] if len(args) == 0: print >>sys.stderr, __doc__ sys.exit(1) for filename in args: input = open(filename, "rb") inputinfo = os.fstat(input.fileno()) basename = os.path.basename(filename) output = open(filename + ".gz", "wb") compressor.compress(input, inputinfo.st_size, output, basename,... | def main(): args = sys.argv[1:] if len(args) == 0: print >>sys.stderr, __doc__ sys.exit(1) for filename in args: input = open(filename, "rb") inputinfo = os.fstat(input.fileno()) basename = os.path.basename(filename) output = open(filename + SUFFIX, "wb") compressor.compress(input, inputinfo.st_size, output, basename... | 479,203 |
def hipe_pfn_cache(cachename,globpat): """ create and return the name of a pfn cache containing files that match globpat. This is needed to manage the .input files that hipe creates. cachename = the name of the pfn cache file globpat = the pattern to search for """ cache_fh = open(cachename,"w") for file in glob.glob(... | def hipe_pfn_cache(cachename,globpat): """ create and return the name of a pfn cache containing files that match globpat. This is needed to manage the .input files that hipe creates. cachename = the name of the pfn cache file globpat = the pattern to search for """ cache_fh = open(cachename,"w") for file in glob.glob(... | 479,204 |
def set_name(self, *args): pipeline.CondorDAGNode.set_name(self, *args) self.cache_name = os.path.join(self._CondorDAGNode__job.cache_dir, "%s.cache" % self.get_name()) | def set_name(self, *args): pipeline.CondorDAGNode.set_name(self, *args) self.cache_name = os.path.join(self._CondorDAGNode__job.cache_dir, "%s.cache" % self.get_name()) | 479,205 |
def add_input_cache(self, cache): if self.output_cache: raise AttributeError, "cannot change attributes after computing output cache" self.input_cache.extend(cache) for c in cache: filename = c.path() pipeline.CondorDAGNode.add_file_arg(self, filename) self.add_output_file(filename) | def add_input_cache(self, cache): if self.output_cache: raise AttributeError, "cannot change attributes after computing output cache" self.input_cache.extend(cache) for c in cache: filename = c.path() pipeline.CondorDAGNode.add_file_arg(self, filename) self.add_output_file(filename) | 479,206 |
def set_output(self, description): if self.output_cache: raise AttributeError, "cannot change attributes after computing output cache" cache_entry = power.make_cache_entry(self.input_cache, description, "") filename = os.path.join(self.output_dir, "%s-STRING_LIKELIHOOD_%s-%d-%d.xml.gz" % (cache_entry.observatory, cache... | def set_output(self, description): if self.output_cache: raise AttributeError, "cannot change attributes after computing output cache" cache_entry = power.make_cache_entry(self.input_cache, description, "") filename = os.path.join(self.output_dir, "%s-STRING_LIKELIHOOD_%s-%d-%d.xml.gz" % (cache_entry.observatory, cache... | 479,207 |
def write_input_files(self, *args): # oh. my. god. this is fscked. for arg in self.get_args(): if "--add-from-cache" in arg: f = file(self.cache_name, "w") for c in self.input_cache: print >>f, str(c) pipeline.CondorDAGNode.write_input_files(self, *args) break | def write_input_files(self, *args): # oh. my. god. this is fscked. for arg in self.get_args(): if "--add-from-cache" in arg: f = file(self.cache_name, "w") for c in self.input_cache: print >>f, str(c) pipeline.CondorDAGNode.write_input_files(self, *args) break | 479,208 |
def loadDataFile(filename): print filename infile=open(filename,'r') formatstr=infile.readline().lstrip() header=formatstr.split() llines=[] import re dec=re.compile(r'[^\d.-]+') for line in infile: sline=line.split() proceed=True if len(sline)<1: print 'Ignoring empty line in input file: %s'%(sline) proceed=False for ... | definjectionconfidence=0 loadDataFile(filename):injectionconfidence=0 printinjectionconfidence=0 filenameinjectionconfidence=0 infile=open(filename,'r')injectionconfidence=0 formatstr=infile.readline().lstrip()injectionconfidence=0 header=formatstr.split()injectionconfidence=0 llines=[]injectionconfidence=0 importinjec... | 479,209 |
def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/y... | def plot2Dkernel(xdat,ydat,Nx,Ny): xax=linspace(min(xdat),max(xdat),Nx) yax=linspace(min(ydat),max(ydat),Ny) x,y=numpy.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = numpy.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/y... | 479,210 |
def cbcBayesPostProc(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): """ This is a demonstration script for using the functionality/data structures contained in pylal.bayespputils . It will produce a webpag... | def cbcBayesPostProc(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): """ This is a demonstration script for using the functionality/data structures contained in pylal.bayespputils . It will produce a webpag... | 479,211 |
def cbcBayesPostProc(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): """ This is a demonstration script for using the functionality/data structures contained in pylal.bayespputils . It will produce a webpag... | def cbcBayesPostProc(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): """ This is a demonstration script for using the functionality/data structures contained in pylal.bayespputils . It will produce a webpag... | 479,212 |
def cbcBayesPostProc(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): """ This is a demonstration script for using the functionality/data structures contained in pylal.bayespputils . It will produce a webpag... | def cbcBayesPostProc(outdir,data,oneDMenu,twoDGreedyMenu,GreedyRes,confidence_levels,twoDplots,injfile=None,eventnum=None,skyres=None,bayesfactornoise=None,bayesfactorcoherent=None): """ This is a demonstration script for using the functionality/data structures contained in pylal.bayespputils . It will produce a webpag... | 479,213 |
def get_all_files_in_range(dirname, starttime, endtime, pad=64): """Returns all files in dirname and all its subdirectories whose names indicate that they contain segments in the range starttime to endtime""" ret = [] # Maybe the user just wants one file... if os.path.isfile(dirname): if re.match('.*-[0-9]*-[0-9]*\.x... | def get_all_files_in_range(dirname, starttime, endtime, pad=64): """Returns all files in dirname and all its subdirectories whose names indicate that they contain segments in the range starttime to endtime""" ret = [] # Maybe the user just wants one file... if os.path.isfile(dirname): if re.match('.*-[0-9]*-[0-9]*\.x... | 479,214 |
def get_all_files_in_range(dirname, starttime, endtime, pad=64): """Returns all files in dirname and all its subdirectories whose names indicate that they contain segments in the range starttime to endtime""" ret = [] # Maybe the user just wants one file... if os.path.isfile(dirname): if re.match('.*-[0-9]*-[0-9]*\.x... | def get_all_files_in_range(dirname, starttime, endtime, pad=64): """Returns all files in dirname and all its subdirectories whose names indicate that they contain segments in the range starttime to endtime""" ret = [] # Maybe the user just wants one file... if os.path.isfile(dirname): if re.match('.*-[0-9]*-[0-9]*\.x... | 479,215 |
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dag_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dag_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 479,216 |
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dag_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 479,217 |
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dag_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dag_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 479,218 |
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dag_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dag_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 479,219 |
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dag_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dag_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 479,220 |
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dag_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dag_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 479,221 |
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dag_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dag_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 479,222 |
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dag_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so just return return try: dagfile = open( self.__dag_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 479,223 |
def histN(mat,N): Nd=size(N) histo=zeros(N) scale=array(map(lambda a,b:a/b,map(lambda a,b:(1*a)-b,map(max,mat),map(min,mat)),N)) axes=array(map(lambda a,N:linspace(min(a),max(a),N),mat,N)) bins=floor(map(lambda a,b:a/b , map(lambda a,b:a-b, mat, map(min,mat) ),scale*1.01)) hbins=reshape(map(int,bins.flat),bins.shape) ... | def histN(mat,N): Nd=size(N) histo=zeros(N) scale=array(map(lambda a,b:a/b,map(lambda a,b:(1*a)-b,map(max,mat),map(min,mat)),N)) axes=array(map(lambda a,N:linspace(min(a),max(a),N),mat,N)) bins=floor(map(lambda a,b:a/b , map(lambda a,b:a-b, mat, map(min,mat) ),scale*1.01)) hbins=reshape(map(int,bins.flat),bins.shape) ... | 479,224 |
def skyhist_cart(skycarts,sky_samples): """ Histogram the list of samples into bins defined by Cartesian vectors in skycarts """ dot=np.dot N=len(skycarts) print 'operating on %d sky points'%(N) bins=np.zeros(N) for RAsample,decsample in sky_samples: sampcart=pol2cart(RAsample,decsample) maxdx=-1 maxvalue=-1 for i in x... | def skyhist_cart(skycarts,sky_samples): """ Histogram the list of samples into bins defined by Cartesian vectors in skycarts """ dot=np.dot N=len(skycarts) print 'operating on %d sky points'%(N) bins=np.zeros(N) for RAsample,decsample in sky_samples: sampcart=pol2cart(RAsample,decsample) maxdx=-1 maxvalue=-1 for i in x... | 479,225 |
def loadDataFile(filename): print filename infile=open(filename,'r') formatstr=infile.readline().lstrip() formatstr=formatstr.replace('#','') header=formatstr.split() llines=[] import re dec=re.compile(r'[^\d.-]+') line_count=0 for line in infile: sline=line.split() proceed=True if len(sline)<1: print 'Ignoring empty ... | def loadDataFile(filename): print filename infile=open(filename,'r') formatstr=infile.readline().lstrip() formatstr=formatstr.replace('#','') header=formatstr.split() llines=[] import re dec=re.compile(r'[^\d.-]+') line_count=0 for line in infile: sline=line.split() proceed=True if len(sline)<1: print 'Ignoring empty ... | 479,226 |
def loadDataFile(filename): print filename infile=open(filename,'r') formatstr=infile.readline().lstrip() formatstr=formatstr.replace('#','') header=formatstr.split() llines=[] import re dec=re.compile(r'[^\d.-]+') line_count=0 for line in infile: sline=line.split() proceed=True if len(sline)<1: print 'Ignoring empty ... | def loadDataFile(filename): print filename infile=open(filename,'r') formatstr=infile.readline().lstrip() formatstr=formatstr.replace('#','') header=formatstr.split() llines=[] import re dec=re.compile(r'[^\d.-]+') line_count=0 for line in infile: sline=line.split() proceed=True if len(sline)<1: print 'Ignoring empty ... | 479,227 |
def loadDataFile(filename): print filename infile=open(filename,'r') formatstr=infile.readline().lstrip() formatstr=formatstr.replace('#','') header=formatstr.split() llines=[] import re dec=re.compile(r'[^\d.-]+') line_count=0 for line in infile: sline=line.split() proceed=True if len(sline)<1: print 'Ignoring empty ... | def loadDataFile(filename): print filename infile=open(filename,'r') formatstr=infile.readline().lstrip() formatstr=formatstr.replace('#','') header=formatstr.split() llines=[] import re dec=re.compile(r'[^\d.-]+') line_count=0 for line in infile: sline=line.split() proceed=True if len(sline)<1: print 'Ignoring empty ... | 479,228 |
def getinjpar(inj,parnum): if paramnames[parnum]=='mchirp' or paramnames[parnum]=='mc': return inj.mchirp if paramnames[parnum]=='mass1' or paramnames[parnum]=='m1': (m1,m2)=mc2ms(inj.mchirp,inj.eta) return m1 if paramnames[parnum]=='mass2' or paramnames[parnum]=='m2': (m1,m2)=mc2ms(inj.mchirp,inj.eta) return m2 if par... | def getinjpar(inj,parnum): if paramnames[parnum]=='mchirp' or paramnames[parnum]=='mc': return inj.mchirp if paramnames[parnum]=='mass1' or paramnames[parnum]=='m1': (m1,m2)=mc2ms(inj.mchirp,inj.eta) return m1 if paramnames[parnum]=='mass2' or paramnames[parnum]=='m2': (m1,m2)=mc2ms(inj.mchirp,inj.eta) return m2 if par... | 479,229 |
def plot2Dbins(toppoints,cl,par1_name,par1_bin,par2_name,par2_bin,injpoint): #Work out good bin size xbins=int(ceil((max(toppoints[:,0])-min(toppoints[:,0]))/par1_bin)) ybins=int(ceil((max(toppoints[:,1])-min(toppoints[:,1]))/par2_bin)) _dpi=120 xsize_in_inches=6. xsize_points = xsize_in_inches * _dpi points_per_bin_... | def plot2Dbins(toppoints,par1_bin,par2_bin,outdir,par1name=None,par2name=None,injpoint=None): #Work out good bin size xbins=int(ceil((max(toppoints[:,0])-min(toppoints[:,0]))/par1_bin)) ybins=int(ceil((max(toppoints[:,1])-min(toppoints[:,1]))/par2_bin)) _dpi=120 xsize_in_inches=6. xsize_points = xsize_in_inches * _d... | 479,230 |
def plot2Dbins(toppoints,cl,par1_name,par1_bin,par2_name,par2_bin,injpoint): #Work out good bin size xbins=int(ceil((max(toppoints[:,0])-min(toppoints[:,0]))/par1_bin)) ybins=int(ceil((max(toppoints[:,1])-min(toppoints[:,1]))/par2_bin)) _dpi=120 xsize_in_inches=6. xsize_points = xsize_in_inches * _dpi points_per_bin_... | def plot2Dbins(toppoints,cl,par1_name,par1_bin,par2_name,par2_bin,injpoint): #Work out good bin size xbins=int(ceil((max(toppoints[:,0])-min(toppoints[:,0]))/par1_bin)) ybins=int(ceil((max(toppoints[:,1])-min(toppoints[:,1]))/par2_bin)) _dpi=120 xsize_in_inches=6. xsize_points = xsize_in_inches * _dpi points_per_bin_... | 479,231 |
def plot2Dbins(toppoints,cl,par1_name,par1_bin,par2_name,par2_bin,injpoint): #Work out good bin size xbins=int(ceil((max(toppoints[:,0])-min(toppoints[:,0]))/par1_bin)) ybins=int(ceil((max(toppoints[:,1])-min(toppoints[:,1]))/par2_bin)) _dpi=120 xsize_in_inches=6. xsize_points = xsize_in_inches * _dpi points_per_bin_... | def plot2Dbins(toppoints,cl,par1_name,par1_bin,par2_name,par2_bin,injpoint): #Work out good bin size xbins=int(ceil((max(toppoints[:,0])-min(toppoints[:,0]))/par1_bin)) ybins=int(ceil((max(toppoints[:,1])-min(toppoints[:,1]))/par2_bin)) _dpi=120 xsize_in_inches=6. xsize_points = xsize_in_inches * _dpi points_per_bin_... | 479,232 |
def plot2Dbins(toppoints,cl,par1_name,par1_bin,par2_name,par2_bin,injpoint): #Work out good bin size xbins=int(ceil((max(toppoints[:,0])-min(toppoints[:,0]))/par1_bin)) ybins=int(ceil((max(toppoints[:,1])-min(toppoints[:,1]))/par2_bin)) _dpi=120 xsize_in_inches=6. xsize_points = xsize_in_inches * _dpi points_per_bin_... | def plot2Dbins(toppoints,cl,par1_name,par1_bin,par2_name,par2_bin,injpoint): #Work out good bin size xbins=int(ceil((max(toppoints[:,0])-min(toppoints[:,0]))/par1_bin)) ybins=int(ceil((max(toppoints[:,1])-min(toppoints[:,1]))/par2_bin)) _dpi=120 xsize_in_inches=6. xsize_points = xsize_in_inches * _dpi points_per_bin_... | 479,233 |
def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | 479,234 |
def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | 479,235 |
def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | 479,236 |
def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | 479,237 |
def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | 479,238 |
def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | 479,239 |
def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | 479,240 |
def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | 479,241 |
def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | 479,242 |
def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | def plotSkyMap(skypos,skyres,sky_injpoint): from pylal import skylocutils from mpl_toolkits.basemap import Basemap skypoints=array(skylocutils.gridsky(float(skyres))) skycarts=map(lambda s: pol2cart(s[1],s[0]),skypoints) skyinjectionconfidence=None shist=bayespputils.skyhist_cart(array(skycarts),skypos) #shist=skyhi... | 479,243 |
def plot2Dkernel(xdat,ydat,Nx,Ny): xax=np.linspace(min(xdat),max(xdat),Nx) yax=np.linspace(min(ydat),max(ydat),Ny) x,y=np.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = np.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/... | def plot2Dkernel(xdat,ydat,Nx,Ny): xax=np.linspace(min(xdat),max(xdat),Nx) yax=np.linspace(min(ydat),max(ydat),Ny) x,y=np.meshgrid(xax,yax) samp=array([xdat,ydat]) kde=stats.kde.gaussian_kde(samp) grid_coords = np.append(x.reshape(-1,1),y.reshape(-1,1),axis=1) z = kde(grid_coords.T) z = z.reshape(Nx,Ny) asp=xax.ptp()/... | 479,244 |
def __init__(self, dag, job, cp, opts, time, ifo, p_nodes=[], type="ht", variety="fg"): | def __init__(self, dag, job, cp, opts, time, ifo, p_nodes=[], type="ht", variety="fg"): | 479,245 |
def make_binj_fragment(dag, seg, tag, offset, flow = None, fhigh = None): # one injection every time-step / pi seconds period = float(binjjob.get_opts()["time-step"]) / math.pi # adjust start time to be commensurate with injection period start = seg[0] - seg[0] % period + period * offset node = BurstInjNode(binjjob) ... | def make_binj_fragment(dag, seg, tag, offset, flow = None, fhigh = None): # one injection every time-step / pi seconds period = float(binjjob.get_opts()["time-step"]) # adjust start time to be commensurate with injection period start = seg[0] - seg[0] % period + period * offset node = BurstInjNode(binjjob) node.set_s... | 479,246 |
def split_bins(cafepacker, extentlimit): """ Split bins of stored in CafePacker until each bin has an extent no longer than extentlimit. """ # # loop overall the bins in cafepacker.bins. we pop items out of # cafepacker.bins and append new ones to the end so need a while loop # checking the extent of each bin in cafep... | def split_bins(cafepacker, extentlimit): """ Split bins of stored in CafePacker until each bin has an extent no longer than extentlimit. """ # # loop overall the bins in cafepacker.bins. we pop items out of # cafepacker.bins and append new ones to the end so need a while loop # checking the extent of each bin in cafep... | 479,247 |
def write_sub_file(self): """ Write a submit file for this Condor job. """ if not self.__log_file: raise CondorSubmitError, "Log file not specified." if not self.__err_file: raise CondorSubmitError, "Error file not specified." if not self.__out_file: raise CondorSubmitError, "Output file not specified." | def write_sub_file(self): """ Write a submit file for this Condor job. """ if not self.__log_file: raise CondorSubmitError, "Log file not specified." if not self.__err_file: raise CondorSubmitError, "Error file not specified." if not self.__out_file: raise CondorSubmitError, "Output file not specified." | 479,248 |
def write_sub_file(self): """ Write a submit file for this Condor job. """ if not self.__log_file: raise CondorSubmitError, "Log file not specified." if not self.__err_file: raise CondorSubmitError, "Error file not specified." if not self.__out_file: raise CondorSubmitError, "Output file not specified." | def write_sub_file(self): """ Write a submit file for this Condor job. """ if not self.__log_file: raise CondorSubmitError, "Log file not specified." if not self.__err_file: raise CondorSubmitError, "Error file not specified." if not self.__out_file: raise CondorSubmitError, "Output file not specified." | 479,249 |
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 479,250 |
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 479,251 |
def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | def write_abstract_dag(self): """ Write all the nodes in the workflow to the DAX file. """ if not self.__dax_file_path: # this workflow is not dax-compatible, so don't write a dax return try: dagfile = open( self.__dax_file_path, 'w' ) except: raise CondorDAGError, "Cannot open file " + self.__dag_file_path | 479,252 |
def set_cache(self,filename): """ Set the LAL frame cache to to use. The frame cache is passed to the job with the --frame-cache argument. @param filename: calibration file to use. """ if isinstance( filename, str ): # the name of a lal cache file created by a datafind node self.add_var_opt('frame-cache', filename) sel... | def set_cache(self,filename): """ Set the LAL frame cache to to use. The frame cache is passed to the job with the --frame-cache argument. @param filename: calibration file to use. """ if isinstance( filename, str ): # the name of a lal cache file created by a datafind node self.add_var_opt('frame-cache', filename) sel... | 479,253 |
def parse(self): """ Each line of the frame cache file is like the following: | def parse(self,type_regex=None): """ Each line of the frame cache file is like the following: | 479,254 |
def parse(self): """ Each line of the frame cache file is like the following: | def parse(self): """ Each line of the frame cache file is like the following: | 479,255 |
def parse(self): """ Each line of the frame cache file is like the following: | def parse(self): """ Each line of the frame cache file is like the following: | 479,256 |
def parse(self): """ Each line of the frame cache file is like the following: | def parse(self): """ Each line of the frame cache file is like the following: | 479,257 |
def parse(self): """ Each line of the frame cache file is like the following: | def parse(self): """ Each line of the frame cache file is like the following: | 479,258 |
def parse(self): """ Each line of the frame cache file is like the following: | def parse(self): """ Each line of the frame cache file is like the following: | 479,259 |
def __init__(self,cache_dir,log_dir,config_file,dax=0,lsync_cache_file=None): """ @param cache_dir: the directory to write the output lal cache files to. @param log_dir: the directory to write the stderr file to. @param config_file: ConfigParser object containing the path to the LSCdataFind executable in the [condor] s... | def __init__(self,cache_dir,log_dir,config_file,dax=0,lsync_cache_file=None): """ @param cache_dir: the directory to write the output lal cache files to. @param log_dir: the directory to write the stderr file to. @param config_file: ConfigParser object containing the path to the LSCdataFind executable in the [condor] s... | 479,260 |
def get_output(self): """ Return the output file, i.e. the file containing the frame cache data. or the files itself as tuple (for DAX) """ if self.__dax: # we are a dax running in grid mode so we need to resolve the # frame file metadata into LFNs so pegasus can query the RLS if self.__lfn_list is None: | def get_output(self): """ Return the output file, i.e. the file containing the frame cache data. or the files itself as tuple (for DAX) """ if self.__dax: # we are a dax running in grid mode so we need to resolve the # frame file metadata into LFNs so pegasus can query the RLS if self.__lfn_list is None: | 479,261 |
def get_coincs(self, eventlists, event_comparefunc, thresholds, verbose = False): # # has this node already been visited? if so, return the # answer we already know # | def get_coincs(self, eventlists, event_comparefunc, thresholds, verbose = False): # # has this node already been visited? if so, return the # answer we already know # | 479,262 |
def get_analyzeQscan_RDS(self): """ """ #analyseQscan.py_FG_RDS_full_data/H1-analyseQscan_H1_931176926_116_rds-unspecified-gpstime.cache cacheList=list() cacheFiles=list() for sngl in self.coinc.sngls: timeString=str(float(sngl.time)).replace(".","_") myCacheMask="*%s*/%s-analyseQscan_%s_%s_rds*.cache"%\ (self.coint.ty... | def get_analyzeQscan_RDS(self): """ """ #analyseQscan.py_FG_RDS_full_data/H1-analyseQscan_H1_931176926_116_rds-unspecified-gpstime.cache cacheList=list() cacheFiles=list() for sngl in self.coinc.sngls: timeString=str(float(sngl.time)).replace(".","_") myCacheMask="*%s*/%s-analyseQscan_%s_%s_rds*.cache"%\ (self.coinc.ty... | 479,263 |
def __init__(self,type=None,ifo=None,time=None,snr=None,chisqr=None,mass1=None,mass2=None,mchirp=None): """ """ self.type=str(type) self.ifo=str(ifo) self.time=float(time) self.snr=float(snr) self.chisqr=float(chisqr) self.mass1=float(mass1) self.mass2=float(mass2) self.mchirp=float(mchirp) | def __init__(self,type=None,ifo=None,time=None,snr=None,chisqr=None,mass1=None,mass2=None,mchirp=None): """ """ self.type=str(type) self.ifo=str(ifo) self.time=float(time) self.snr=float(snr) self.chisqr=float(chisqr) self.mass1=float(mass1) self.mass2=float(mass2) self.mchirp=float(mchirp) | 479,264 |
def __init__(self,type=None,ifo=None,time=None,snr=None,chisqr=None,mass1=None,mass2=None,mchirp=None): """ """ self.type=str(type) self.ifo=str(ifo) self.time=float(time) self.snr=float(snr) self.chisqr=float(chisqr) self.mass1=float(mass1) self.mass2=float(mass2) self.mchirp=float(mchirp) | def __init__(self,type=None,ifo=None,time=None,snr=None,chisqr=None,mass1=None,mass2=None,mchirp=None): """ """ self.type=str(type) self.ifo=str(ifo) self.time=float(time) self.snr=float(snr) self.chisqr=float(chisqr) self.mass1=float(mass1) self.mass2=float(mass2) self.mchirp=float(mchirp) | 479,265 |
uniq_def = (row[ifos_col],row[name_col],row[vers_col]) | uniq_def = (row[ifos_col],row[name_col],row[vers_col]) | 479,266 |
def _abs_diff(c): """ For some angular difference c = |a - b| in radians, find the magnitude of the difference, taking into account the wrap-around at 2*pi. """ c = abs(c) % (2 * LAL_PI) return np.fmin(c, 2 * LAL_PI - c) | def _abs_diff(c): """ For some angular difference c = |a - b| in radians, find the magnitude of the difference, taking into account the wrap-around at 2*pi. """ c = abs(c) % (2 * LAL_PI) return np.where(c < LAL_PI, c, 2 * LAL_PI - c) | 479,267 |
def fisher_rvs(mu, sigma, size=1): """ Return a random (polar, azimuthal) angle drawn from the Fisher distribution. Assume that the concentration parameter (kappa) is large so that we can use a Rayleigh distribution about the north pole and rotate it to be centered at the (polar, azimuthal) coordinate mu. Assume kappa... | def fisher_rvs(mu, sigma, size=1): """ Return a random (polar, azimuthal) angle drawn from the Fisher distribution. Assume that the concentration parameter (kappa) is large so that we can use a Rayleigh distribution about the north pole and rotate it to be centered at the (polar, azimuthal) coordinate mu. Assume kappa... | 479,268 |
def __init__(self, args=None): self.args = args | def __init__(self, args=None): self.args = args | 479,269 |
def __init__(self, cp, dax = False): """ @cp: a ConfigParser object from which the options are read. """ exec_name = 'inspinjfind' sections = ['inspinjfind'] extension = 'xml' InspiralAnalysisJob.__init__(self, cp, sections, exec_name, extension, dax) self.add_condor_cmd('getenv', 'True') # overwrite standard log file ... | def __init__(self, cp, dax = False): """ @cp: a ConfigParser object from which the options are read. """ exec_name = 'inspinjfind' sections = ['inspinjfind'] extension = 'xml' InspiralAnalysisJob.__init__(self, cp, sections, exec_name, extension, dax) self.add_condor_cmd('getenv', 'True') # overwrite standard log file ... | 479,270 |
def fisher_rvs(mu, sigma, size=None): """ Return a random (polar, azimuthal) angle drawn from the Fisher distribution. Assume that the concentration parameter (kappa) is large so that we can use a Rayleigh distribution about the north pole and rotate it to be centered at the (polar, azimuthal) coordinate mu. Assume ka... | def fisher_rvs(mu, sigma, size=1): """ Return a random (polar, azimuthal) angle drawn from the Fisher distribution. Assume that the concentration parameter (kappa) is large so that we can use a Rayleigh distribution about the north pole and rotate it to be centered at the (polar, azimuthal) coordinate mu. Assume kappa... | 479,271 |
def __patchFrameTypeDef__(frametype=None,ifo=None,gpstime=None): """ Temporary patch function, to adjust specfied frame type used in searching the filesystem for files to display in followup. """ if frametype == None: return None if gpstime == None: return None if ifo == None: return None endOfS5=int(875232014) new=Non... | def __patchFrameTypeDef__(frametype=None,ifo=None,gpstime=None): """ Temporary patch function, to adjust specfied frame type used in searching the filesystem for files to display in followup. """ if frametype == None: raise Warning, "input to __patchFrameTypeDef__ included a \ gps time argument specified as None\n" ret... | 479,272 |
def __patchFrameTypeDef__(frametype=None,ifo=None,gpstime=None): """ Temporary patch function, to adjust specfied frame type used in searching the filesystem for files to display in followup. """ if frametype == None: return None if gpstime == None: return None if ifo == None: return None endOfS5=int(875232014) new=Non... | def __patchFrameTypeDef__(frametype=None,ifo=None,gpstime=None): """ Temporary patch function, to adjust specfied frame type used in searching the filesystem for files to display in followup. """ if frametype == None: raise Warning, "input to __patchFrameTypeDef__ included an \ ifo argument specified as None\n" return ... | 479,273 |
def __patchFrameTypeDef__(frametype=None,ifo=None,gpstime=None): """ Temporary patch function, to adjust specfied frame type used in searching the filesystem for files to display in followup. """ if frametype == None: return None if gpstime == None: return None if ifo == None: return None endOfS5=int(875232014) new=Non... | def __patchFrameTypeDef__(frametype=None,ifo=None,gpstime=None): """ Temporary patch function, to adjust specfied frame type used in searching the filesystem for files to display in followup. """ if frametype == None: return None if gpstime == None: return None if ifo == None: return None endOfS5=int(875232014) if int(... | 479,274 |
def __patchFrameTypeDef__(frametype=None,ifo=None,gpstime=None): """ Temporary patch function, to adjust specfied frame type used in searching the filesystem for files to display in followup. """ if frametype == None: return None if gpstime == None: return None if ifo == None: return None endOfS5=int(875232014) new=Non... | def __patchFrameTypeDef__(frametype=None,ifo=None,gpstime=None): """ Temporary patch function, to adjust specfied frame type used in searching the filesystem for files to display in followup. """ if frametype == None: return None if gpstime == None: return None if ifo == None: return None endOfS5=int(875232014) new=Non... | 479,275 |
def __readCache__(self,cacheListing=list()): """ Simple mehtod to read in a cache or list of cache files and return a list of files or an empty list if nothing found. It uses the pathing information from the files passed via cacheListing to aid in our filesystem search. """ #Open the cache entry and search for those en... | def __readCache__(self,cacheListing=list()): """ Simple mehtod to read in a cache or list of cache files and return a list of files or an empty list if nothing found. It uses the pathing information from the files passed via cacheListing to aid in our filesystem search. """ #Open the cache entry and search for those en... | 479,276 |
def __readCache__(self,cacheListing=list()): """ Simple mehtod to read in a cache or list of cache files and return a list of files or an empty list if nothing found. It uses the pathing information from the files passed via cacheListing to aid in our filesystem search. """ #Open the cache entry and search for those en... | def__readCache__(self,cacheListing=list()):"""Simplemehtodtoreadinacacheorlistofcachefilesandreturnalistoffilesoranemptylistifnothingfound.ItusesthepathinginformationfromthefilespassedviacacheListingtoaidinourfilesystemsearch."""#OpenthecacheentryandsearchforthoseentrysfinalList=list()forentryincacheListing:fileListing... | 479,277 |
def get_findVetos(self): tmpList=list() #H1,H2,L1-findFlags_H1,H2,L1_831695156.714.wiki #instrument,ifos ifoString="" for i in range(0,len(self.coinc.ifos)/2):ifoString=ifoString+"%s,"%self.coinc.ifos[2*i:2*i+2] ifoString=ifoString.rstrip(",") insString="" for i in range(0,len(self.coinc.instruments)/2):insString=insSt... | def get_findVetos(self): tmpList=list() #H1,H2,L1-findFlags_H1,H2,L1_831695156.714.wiki #instrument,ifos ifoString="" for i in range(0,len(self.coinc.ifos)/2):ifoString=ifoString+"%s,"%self.coinc.ifos[2*i:2*i+2] ifoString=ifoString.rstrip(",") insString="" for i in range(0,len(self.coinc.instruments)/2):insString=insSt... | 479,278 |
def get_findVetos(self): tmpList=list() #H1,H2,L1-findFlags_H1,H2,L1_831695156.714.wiki #instrument,ifos ifoString="" for i in range(0,len(self.coinc.ifos)/2):ifoString=ifoString+"%s,"%self.coinc.ifos[2*i:2*i+2] ifoString=ifoString.rstrip(",") insString="" for i in range(0,len(self.coinc.instruments)/2):insString=insSt... | def get_findVetos(self): tmpList=list() #H1,H2,L1-findFlags_H1,H2,L1_831695156.714.wiki #instrument,ifos ifoString="" for i in range(0,len(self.coinc.ifos)/2):ifoString=ifoString+"%s,"%self.coinc.ifos[2*i:2*i+2] ifoString=ifoString.rstrip(",") insString="" for i in range(0,len(self.coinc.instruments)/2):insString=insSt... | 479,279 |
def get_findFlags(self): """ """ tmpList=list() #H1,H2,L1-findFlags_H1,H2,L1_831695156.714.wiki #instrument,ifos ifoString="" for i in range(0,len(self.coinc.ifos)/2):ifoString=ifoString+"%s,"%self.coinc.ifos[2*i:2*i+2] ifoString=ifoString.rstrip(",") insString="" for i in range(0,len(self.coinc.instruments)/2):insStri... | def get_findFlags(self): """ """ tmpList=list() #H1,H2,L1-findFlags_H1,H2,L1_831695156.714.wiki #instrument,ifos ifoString="" for i in range(0,len(self.coinc.ifos)/2):ifoString=ifoString+"%s,"%self.coinc.ifos[2*i:2*i+2] ifoString=ifoString.rstrip(",") insString="" for i in range(0,len(self.coinc.instruments)/2):insStri... | 479,280 |
def get_analyzeQscan_RDS(self): """ """ #analyseQscan.py_FG_RDS_full_data/H1-analyseQscan_H1_931176926_116_rds-unspecified-gpstime.cache cacheList=list() cacheFiles=list() for sngl in self.coinc.sngls: timeString=str(float(sngl.time)).replace(".","_") myCacheMask="*%s*/%s-analyseQscan_%s_%s_rds*.cache"%\ (self.coint.ty... | def get_analyzeQscan_RDS(self): """ """ #analyseQscan.py_FG_RDS_full_data/H1-analyseQscan_H1_931176926_116_rds-unspecified-gpstime.cache cacheList=list() cacheFiles=list() for sngl in self.coinc.sngls: timeString=str(float(sngl.time)).replace(".","_") myCacheMask="*%s*/%s-analyseQscan_%s_%s_rds*.cache"%\ (self.coinc.ty... | 479,281 |
def prepareChecklist(wikiFilename=None,wikiCoinc=None,wikiTree=None,file2URL=None): """ Method to prepare a checklist where data products are isolated in directory. """ endOfS5=int(875232014) wikiFileFinder=findFileType(wikiTree,wikiCoinc) # # Check to see if wiki file with name already exists # maxCount=0 while os.pat... | defprepareChecklist(wikiFilename=None,wikiCoinc=None,wikiTree=None,file2URL=None):"""Methodtoprepareachecklistwheredataproductsareisolatedindirectory."""endOfS5=int(875232014)wikiFileFinder=findFileType(wikiTree,wikiCoinc)##Checktoseeifwikifilewithnamealreadyexists#maxCount=0whileos.path.exists(wikiFilename)andmaxCount... | 479,282 |
def prepareChecklist(wikiFilename=None,wikiCoinc=None,wikiTree=None,file2URL=None): """ Method to prepare a checklist where data products are isolated in directory. """ endOfS5=int(875232014) wikiFileFinder=findFileType(wikiTree,wikiCoinc) # # Check to see if wiki file with name already exists # maxCount=0 while os.pat... | defprepareChecklist(wikiFilename=None,wikiCoinc=None,wikiTree=None,file2URL=None):"""Methodtoprepareachecklistwheredataproductsareisolatedindirectory."""endOfS5=int(875232014)wikiFileFinder=findFileType(wikiTree,wikiCoinc)##Checktoseeifwikifilewithnamealreadyexists#maxCount=0whileos.path.exists(wikiFilename)andmaxCount... | 479,283 |
def prepareChecklist(wikiFilename=None,wikiCoinc=None,wikiTree=None,file2URL=None): """ Method to prepare a checklist where data products are isolated in directory. """ endOfS5=int(875232014) wikiFileFinder=findFileType(wikiTree,wikiCoinc) # # Check to see if wiki file with name already exists # maxCount=0 while os.pat... | def prepareChecklist(wikiFilename=None,wikiCoinc=None,wikiTree=None,file2URL=None): """ Method to prepare a checklist where data products are isolated in directory. """ endOfS5=int(875232014) wikiFileFinder=findFileType(wikiTree,wikiCoinc) # # Check to see if wiki file with name already exists # maxCount=0 while os.pat... | 479,284 |
def __init__(self,type=None,ifo=None,time=None,snr=None,chisqr=None,mass1=None,mass2=None,mchirp=None): """ """ self.type=str(type) self.ifo=str(ifo) self.time=float(time) self.snr=float(snr) self.chisqr=float(chisqr) self.mass1=float(mass1) self.mass2=float(mass2) self.mchirp=float(mchirp) | def __init__(self,type=None,ifo=None,time=None,snr=None,chisqr=None,mass1=None,mass2=None,mchirp=None): """ """ self.type=str(type) self.ifo=str(ifo) self.time=float(time) self.snr=float(snr) self.chisqr=float(chisqr) self.mass1=float(mass1) self.mass2=float(mass2) self.mchirp=float(mchirp) | 479,285 |
def __init__(self,type=None,ifo=None,time=None,snr=None,chisqr=None,mass1=None,mass2=None,mchirp=None): """ """ self.type=str(type) self.ifo=str(ifo) self.time=float(time) self.snr=float(snr) self.chisqr=float(chisqr) self.mass1=float(mass1) self.mass2=float(mass2) self.mchirp=float(mchirp) | def __init__(self,type=None,ifo=None,time=None,snr=None,chisqr=None,mass1=None,mass2=None,mchirp=None): """ """ self.type=str(type) self.ifo=str(ifo) self.time=float(time) self.snr=float(snr) self.chisqr=float(chisqr) self.mass1=float(mass1) self.mass2=float(mass2) self.mchirp=float(mchirp) | 479,286 |
def __init__(self, dag, job, cp, opts, coincEvent=None): """ """ self.__conditionalLoadDefaults__(findFlagsNode.defaults,cp) pipeline.CondorDAGNode.__init__(self,job) self.add_var_opt("trigger-time",coincEvent.time) #Output filename oFilename="%s-findFlags_%s_%s.wiki"%(coincEvent.instruments, coincEvent.ifos, coincEven... | def __init__(self, dag, job, cp, opts, coincEvent=None): """ """ self.__conditionalLoadDefaults__(findFlagsNode.defaults,cp) pipeline.CondorDAGNode.__init__(self,job) self.add_var_opt("trigger-time",coincEvent.time) #Output filename oFilename="%s-findFlags_%s_%s.wiki"%(coincEvent.instruments, coincEvent.ifos, coincEven... | 479,287 |
def __init__(self, dag, job, cp, opts, coincEvent=None): """ """ self.__conditionalLoadDefaults__(findVetosNode.defaults,cp) pipeline.CondorDAGNode.__init__(self,job) self.add_var_opt("trigger-time",coincEvent.time) #Output filename oFilename="%s-findVetos_%s_%s.wiki"%(coincEvent.instruments, coincEvent.ifos, coincEven... | def __init__(self, dag, job, cp, opts, coincEvent=None): """ """ self.__conditionalLoadDefaults__(findVetosNode.defaults,cp) pipeline.CondorDAGNode.__init__(self,job) self.add_var_opt("trigger-time",coincEvent.time) #Output filename oFilename="%s-findVetos_%s_%s.wiki"%(coincEvent.instruments, coincEvent.ifos, coincEven... | 479,288 |
def insertAnalyzeQscanTable(self, images=None, thumbs=None, indexes=None, imagesAQ=None, thumbsAQ=None, indexesAQ=None, channelRanks=None): """ Insert a multiple IFO table with 5 cols with the AQ underneath this depends on the numer of IFO keys in indexes dictionary. The option channelRanks is not required to change th... | def insertAnalyzeQscanTable(self, images=None, thumbs=None, indexes=None, imagesAQ=None, thumbsAQ=None, indexesAQ=None, channelRanks=None): """ Insert a multiple IFO table with 5 cols with the AQ underneath this depends on the numer of IFO keys in indexes dictionary. The option channelRanks is not required to change th... | 479,289 |
def insertAnalyzeQscanTable(self, images=None, thumbs=None, indexes=None, imagesAQ=None, thumbsAQ=None, indexesAQ=None, channelRanks=None): """ Insert a multiple IFO table with 5 cols with the AQ underneath this depends on the numer of IFO keys in indexes dictionary. The option channelRanks is not required to change th... | def insertAnalyzeQscanTable(self, images=None, thumbs=None, indexes=None, imagesAQ=None, thumbsAQ=None, indexesAQ=None, channelRanks=None): """ Insert a multiple IFO table with 5 cols with the AQ underneath this depends on the numer of IFO keys in indexes dictionary. The option channelRanks is not required to change th... | 479,290 |
def prepareChecklist(wikiFilename=None,wikiCoinc=None,wikiTree=None,file2URL=None): """ Method to prepare a checklist where data products are isolated in directory. """ endOfS5=int(875232014) wikiFileFinder=findFileType(wikiTree,wikiCoinc) # Check to see if wiki file with name already exists maxCount=0 while os.path.ex... | def prepareChecklist(wikiFilename=None,wikiCoinc=None,wikiTree=None,file2URL=None): """ Method to prepare a checklist where data products are isolated in directory. """ endOfS5=int(875232014) wikiFileFinder=findFileType(wikiTree,wikiCoinc) # Check to see if wiki file with name already exists maxCount=0 while os.path.ex... | 479,291 |
def prepareChecklist(wikiFilename=None,wikiCoinc=None,wikiTree=None,file2URL=None): """ Method to prepare a checklist where data products are isolated in directory. """ endOfS5=int(875232014) wikiFileFinder=findFileType(wikiTree,wikiCoinc) # Check to see if wiki file with name already exists maxCount=0 while os.path.ex... | def prepareChecklist(wikiFilename=None,wikiCoinc=None,wikiTree=None,file2URL=None): """ Method to prepare a checklist where data products are isolated in directory. """ endOfS5=int(875232014) wikiFileFinder=findFileType(wikiTree,wikiCoinc) # Check to see if wiki file with name already exists maxCount=0 while os.path.ex... | 479,292 |
def prepareChecklist(wikiFilename=None,wikiCoinc=None,wikiTree=None,file2URL=None): """ Method to prepare a checklist where data products are isolated in directory. """ endOfS5=int(875232014) wikiFileFinder=findFileType(wikiTree,wikiCoinc) # Check to see if wiki file with name already exists maxCount=0 while os.path.ex... | def prepareChecklist(wikiFilename=None,wikiCoinc=None,wikiTree=None,file2URL=None): """ Method to prepare a checklist where data products are isolated in directory. """ endOfS5=int(875232014) wikiFileFinder=findFileType(wikiTree,wikiCoinc) # Check to see if wiki file with name already exists maxCount=0 while os.path.ex... | 479,293 |
def prepareChecklist(wikiFilename=None,wikiCoinc=None,wikiTree=None,file2URL=None): """ Method to prepare a checklist where data products are isolated in directory. """ endOfS5=int(875232014) wikiFileFinder=findFileType(wikiTree,wikiCoinc) # Check to see if wiki file with name already exists maxCount=0 while os.path.ex... | def prepareChecklist(wikiFilename=None,wikiCoinc=None,wikiTree=None,file2URL=None): """ Method to prepare a checklist where data products are isolated in directory. """ endOfS5=int(875232014) wikiFileFinder=findFileType(wikiTree,wikiCoinc) # Check to see if wiki file with name already exists maxCount=0 while os.path.ex... | 479,294 |
def set_temp_store_directory( connection, temp_store_directory, verbose = False ): """ Sets the temp_store_directory parameter in sqlite. """ try: import sqlite3 except ImportError: # pre 2.5.x from pysqlite2 import dbapi2 as sqlite3 if verbose: print >> sys.stderr, "setting the temp_store_directory to %s" % temp_stor... | def set_temp_store_directory( connection, temp_store_directory, verbose = False ): """ Sets the temp_store_directory parameter in sqlite. """ try: import sqlite3 except ImportError: # pre 2.5.x from pysqlite2 import dbapi2 as sqlite3 if verbose: print >> sys.stderr, "setting the temp_store_directory to %s" % temp_stor... | 479,295 |
def set_temp_store_directory( connection, temp_store_directory, verbose = False ): """ Sets the temp_store_directory parameter in sqlite. """ try: import sqlite3 except ImportError: # pre 2.5.x from pysqlite2 import dbapi2 as sqlite3 if verbose: print >> sys.stderr, "setting the temp_store_directory to %s" % temp_stor... | def set_temp_store_directory( connection, temp_store_directory, verbose = False ): """ Sets the temp_store_directory parameter in sqlite. """ try: import sqlite3 except ImportError: # pre 2.5.x from pysqlite2 import dbapi2 as sqlite3 if verbose: print >> sys.stderr, "setting the temp_store_directory to %s" % temp_stor... | 479,296 |
def set_temp_store_directory( connection, temp_store_directory, verbose = False ): """ Sets the temp_store_directory parameter in sqlite. """ try: import sqlite3 except ImportError: # pre 2.5.x from pysqlite2 import dbapi2 as sqlite3 if verbose: print >> sys.stderr, "setting the temp_store_directory to %s" % temp_stor... | def set_temp_store_directory( connection, temp_store_directory, verbose = False ): """ Sets the temp_store_directory parameter in sqlite. """ try: import sqlite3 except ImportError: # pre 2.5.x from pysqlite2 import dbapi2 as sqlite3 if verbose: print >> sys.stderr, "setting the temp_store_directory to %s ..." % temp_... | 479,297 |
def loadDataFile(filename): print filename infile=open(filename,'r') formatstr=infile.readline().lstrip() header=formatstr.split() llines=[] import re dec=re.compile(r'[^\d.-]+') for line in infile: sline=line.split() proceed=True for s in sline: if dec.search(s) is not None: print 'Warning! Ignoring non-numeric data a... | def loadDataFile(filename): print filename infile=open(filename,'r') formatstr=infile.readline().lstrip() header=formatstr.split() llines=[] import re dec=re.compile(r'[^\d.-]+') for line in infile: sline=line.split() proceed=True for s in sline: if dec.search(s) is not None: print 'Warning! Ignoring non-numeric data a... | 479,298 |
def loadDataFile(filename): print filename infile=open(filename,'r') formatstr=infile.readline().lstrip() header=formatstr.split() llines=[] import re dec=re.compile(r'[^\d.-]+') for line in infile: sline=line.split() proceed=True for s in sline: if dec.search(s) is not None: print 'Warning! Ignoring non-numeric data a... | def loadDataFile(filename): print filename infile=open(filename,'r') formatstr=infile.readline().lstrip() header=formatstr.split() llines=[] import re dec=re.compile(r'[^\d.-]+') for line in infile: sline=line.split() proceed=True for s in sline: if dec.search(s) is not None: print 'Warning! Ignoring non-numeric data a... | 479,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.