idx int64 0 252k | question stringlengths 48 5.28k | target stringlengths 5 1.23k |
|---|---|---|
5,200 | def SetPosition ( self , track_id , position ) : self . iface . SetPosition ( convert2dbus ( track_id , 'o' ) , convert2dbus ( position , 'x' ) ) | Sets the current track position in microseconds . |
5,201 | def process_lists ( self ) : for l1_idx , obj1 in enumerate ( self . l1 ) : for l2_idx , obj2 in enumerate ( self . l2 ) : if self . equal ( obj1 , obj2 ) : self . matches . add ( ( l1_idx , l2_idx ) ) | Do any preprocessing of the lists . |
5,202 | def get_matches ( self , src , src_idx ) : if src not in ( 'l1' , 'l2' ) : raise ValueError ( 'Must have one of "l1" or "l2" as src' ) if src == 'l1' : target_list = self . l2 else : target_list = self . l1 comparator = { 'l1' : lambda s_idx , t_idx : ( s_idx , t_idx ) in self . matches , 'l2' : lambda s_idx , t_idx : ... | Get elements equal to the idx th in src from the other list . |
5,203 | def find_the_closest_atoms ( self , topology ) : self . universe . load_new ( topology ) self . universe . ligand_noH = self . universe . ligand . select_atoms ( "not name H*" ) ligand_positions = self . universe . ligand_noH . positions for residue in self . dict_of_plotted_res . keys ( ) : residue_selection = self . ... | This function defines the ligand atoms that are closest to the residues that will be plotted in the final graph . |
5,204 | def load_data ( self , topology , mol_file , ligand_name , offset = 0 ) : self . load_topology ( topology ) self . renumber_system ( offset ) self . rename_ligand ( ligand_name , mol_file ) self . load_mol ( mol_file ) | This function loads all relevant data - except trajectories since those are dealt with one at a time . Therefore this process only needs to be done once and every time a trajectory needs to be loaded it can be loaded seperataly and the Data object can be shared across LINTools processes . |
5,205 | def analyse_topology ( self , topology , cutoff = 3.5 ) : self . define_residues_for_plotting_topology ( cutoff ) self . find_the_closest_atoms ( topology ) | In case user wants to analyse only a single topology file this process will determine the residues that should be plotted and find the ligand atoms closest to these residues . |
5,206 | def get_header ( vcf_file_path ) : logger . info ( "Parsing header of file {0}" . format ( vcf_file_path ) ) head = HeaderParser ( ) handle = get_vcf_handle ( infile = vcf_file_path ) for line in handle : line = line . rstrip ( ) if line . startswith ( '#' ) : if line . startswith ( '##' ) : head . parse_meta_data ( li... | Parse the header and return a header object |
5,207 | def sample_lonlat ( self , n ) : radius = self . sample_radius ( n ) a = radius b = self . jacobian * radius t = 2. * np . pi * np . random . rand ( n ) cost , sint = np . cos ( t ) , np . sin ( t ) phi = np . pi / 2. - np . deg2rad ( self . theta ) cosphi , sinphi = np . cos ( phi ) , np . sin ( phi ) x = a * cost * c... | Sample 2D distribution of points in lon lat |
5,208 | def group ( iterable , key ) : for _ , grouped in groupby ( sorted ( iterable , key = key ) , key = key ) : yield list ( grouped ) | groupby which sorts the input discards the key and returns the output as a sequence of lists . |
5,209 | def aggregate_count ( keyname ) : def inner ( docs ) : return sum ( doc [ keyname ] for doc in docs ) return keyname , inner | Straightforward sum of the given keyname . |
5,210 | def aggregate_rate ( rate_key , count_key ) : def inner ( docs ) : total = sum ( doc [ count_key ] for doc in docs ) weighted_total = sum ( doc [ rate_key ] * doc [ count_key ] for doc in docs ) total_rate = weighted_total / total return total_rate return rate_key , inner | Compute an aggregate rate for rate_key weighted according to count_rate . |
5,211 | def make_aggregate ( docs , aggregations ) : new_doc = dict ( docs [ 0 ] ) for keyname , aggregation_function in aggregations : new_doc [ keyname ] = aggregation_function ( docs ) return new_doc | Given docs and aggregations return a single document with the aggregations applied . |
5,212 | def json ( value ) : uncleaned = jsonlib . dumps ( value ) clean = bleach . clean ( uncleaned ) return mark_safe ( clean ) | Sanitize the JSON string using the Bleach HTML tag remover |
5,213 | def find_pareto_front ( population ) : pareto_front = set ( range ( len ( population ) ) ) for i in range ( len ( population ) ) : if i not in pareto_front : continue ind1 = population [ i ] for j in range ( i + 1 , len ( population ) ) : ind2 = population [ j ] if ind2 . fitness . dominates ( ind1 . fitness ) or ind1 ... | Finds a subset of nondominated individuals in a given list |
5,214 | def _to_ndarray ( self , a ) : if isinstance ( a , ( list , tuple ) ) : a = numpy . array ( a ) if not is_ndarray ( a ) : raise TypeError ( "Expected an ndarray but got object of type '{}' instead" . format ( type ( a ) ) ) return a | Casts Python lists and tuples to a numpy array or raises an AssertionError . |
5,215 | def fn_abs ( self , value ) : if is_ndarray ( value ) : return numpy . absolute ( value ) else : return abs ( value ) | Return the absolute value of a number . |
5,216 | def fn_get_mask ( self , value ) : value = self . _to_ndarray ( value ) if numpy . ma . is_masked ( value ) : return value . mask else : return numpy . zeros ( value . shape ) . astype ( bool ) | Return an array mask . |
5,217 | def fn_min ( self , a , axis = None ) : return numpy . nanmin ( self . _to_ndarray ( a ) , axis = axis ) | Return the minimum of an array ignoring any NaNs . |
5,218 | def fn_max ( self , a , axis = None ) : return numpy . nanmax ( self . _to_ndarray ( a ) , axis = axis ) | Return the maximum of an array ignoring any NaNs . |
5,219 | def fn_median ( self , a , axis = None ) : return numpy . nanmedian ( self . _to_ndarray ( a ) , axis = axis ) | Compute the median of an array ignoring NaNs . |
5,220 | def fn_mean ( self , a , axis = None ) : return numpy . nanmean ( self . _to_ndarray ( a ) , axis = axis ) | Compute the arithmetic mean of an array ignoring NaNs . |
5,221 | def fn_std ( self , a , axis = None ) : return numpy . nanstd ( self . _to_ndarray ( a ) , axis = axis ) | Compute the standard deviation of an array ignoring NaNs . |
5,222 | def fn_var ( self , a , axis = None ) : return numpy . nanvar ( self . _to_ndarray ( a ) , axis = axis ) | Compute the variance of an array ignoring NaNs . |
5,223 | def fn_ceil ( self , value ) : if is_ndarray ( value ) or isinstance ( value , ( list , tuple ) ) : return numpy . ceil ( self . _to_ndarray ( value ) ) else : return math . ceil ( value ) | Return the ceiling of a number . |
5,224 | def fn_int ( self , value ) : if is_ndarray ( value ) or isinstance ( value , ( list , tuple ) ) : return self . _to_ndarray ( value ) . astype ( 'int' ) else : return int ( value ) | Return the value cast to an int . |
5,225 | def fn_float ( self , value ) : if is_ndarray ( value ) or isinstance ( value , ( list , tuple ) ) : return self . _to_ndarray ( value ) . astype ( 'float' ) else : return float ( value ) | Return the value cast to a float . |
5,226 | def make_datetime ( dt , date_parser = parse_date ) : if ( isinstance ( dt , ( datetime . datetime , datetime . date , datetime . time , pd . Timestamp , np . datetime64 ) ) or dt in ( float ( 'nan' ) , float ( 'inf' ) , float ( '-inf' ) , None , '' ) ) : return dt if isinstance ( dt , ( float , int ) ) : return dateti... | Coerce a datetime or string into datetime . datetime object |
5,227 | def quantize_datetime ( dt , resolution = None ) : resolution = int ( resolution or 6 ) if hasattr ( dt , 'timetuple' ) : dt = dt . timetuple ( ) if isinstance ( dt , time . struct_time ) : dt = list ( dt ) [ : 6 ] dt += [ int ( ( dt [ 5 ] - int ( dt [ 5 ] ) ) * 1000000 ) ] dt [ 5 ] = int ( dt [ 5 ] ) return datetime .... | Quantize a datetime to integer years months days hours minutes seconds or microseconds |
5,228 | def timetag_str ( dt = None , sep = '-' , filler = '0' , resolution = 6 ) : resolution = int ( resolution or 6 ) if sep in ( None , False ) : sep = '' sep = str ( sep ) dt = datetime . datetime . now ( ) if dt is None else dt return sep . join ( ( '{0:' + filler + ( '2' if filler else '' ) + 'd}' ) . format ( i ) for i... | Generate a date - time tag suitable for appending to a file name . |
5,229 | def make_tz_aware ( dt , tz = 'UTC' , is_dst = None ) : dt = make_datetime ( dt ) if not isinstance ( dt , ( list , datetime . datetime , datetime . date , datetime . time , pd . Timestamp ) ) : return dt try : tz = dt . tzinfo or tz except ( ValueError , AttributeError , TypeError ) : pass try : tzstr = str ( tz ) . s... | Add timezone information to a datetime object only if it is naive . |
5,230 | def translate_addresstype ( f ) : @ wraps ( f ) def wr ( r , pc ) : at = r [ "addressType" ] try : r . update ( { "addressType" : POSTCODE_API_TYPEDEFS_ADDRESS_TYPES [ at ] } ) except : logger . warning ( "Warning: {}: " "unknown 'addressType': {}" . format ( pc , at ) ) return f ( r , pc ) return wr | decorator to translate the addressType field . |
5,231 | def translate_purposes ( f ) : @ wraps ( f ) def wr ( r , pc ) : tmp = [ ] for P in r [ "purposes" ] : try : tmp . append ( POSTCODE_API_TYPEDEFS_PURPOSES [ P ] ) except : logger . warning ( "Warning: {}: " "cannot translate 'purpose': {}" . format ( pc , P ) ) tmp . append ( P ) r . update ( { "purposes" : tmp } ) ret... | decorator to translate the purposes field . |
5,232 | def quantile ( data , num_breaks ) : def scipy_mquantiles ( a , prob = list ( [ .25 , .5 , .75 ] ) , alphap = .4 , betap = .4 , axis = None , limit = ( ) ) : def _quantiles1D ( data , m , p ) : x = numpy . sort ( data . compressed ( ) ) n = len ( x ) if n == 0 : return numpy . ma . array ( numpy . empty ( len ( p ) , d... | Calculate quantile breaks . |
5,233 | def equal ( data , num_breaks ) : step = ( numpy . amax ( data ) - numpy . amin ( data ) ) / num_breaks return numpy . linspace ( numpy . amin ( data ) + step , numpy . amax ( data ) , num_breaks ) | Calculate equal interval breaks . |
5,234 | def add_column ( filename , column , formula , force = False ) : columns = parse_formula ( formula ) logger . info ( "Running file: %s" % filename ) logger . debug ( " Reading columns: %s" % columns ) data = fitsio . read ( filename , columns = columns ) logger . debug ( ' Evaluating formula: %s' % formula ) col = ev... | Add a column to a FITS file . |
5,235 | def load_files ( filenames , multiproc = False , ** kwargs ) : filenames = np . atleast_1d ( filenames ) logger . debug ( "Loading %s files..." % len ( filenames ) ) kwargs = [ dict ( filename = f , ** kwargs ) for f in filenames ] if multiproc : from multiprocessing import Pool processes = multiproc if multiproc > 0 e... | Load a set of FITS files with kwargs . |
5,236 | def applyFracdet ( self , lon , lat ) : self . loadFracdet ( ) fracdet_core = meanFracdet ( self . m_fracdet , lon , lat , np . tile ( 0.1 , len ( lon ) ) ) fracdet_wide = meanFracdet ( self . m_fracdet , lon , lat , np . tile ( 0.5 , len ( lon ) ) ) return ( fracdet_core >= self . config [ self . algorithm ] [ 'fracde... | We want to enforce minimum fracdet for a satellite to be considered detectable |
5,237 | def applyHotspot ( self , lon , lat ) : self . loadRealResults ( ) cut_detect_real = ( self . data_real [ 'SIG' ] >= self . config [ self . algorithm ] [ 'sig_threshold' ] ) lon_real = self . data_real [ 'RA' ] [ cut_detect_real ] lat_real = self . data_real [ 'DEC' ] [ cut_detect_real ] cut_hotspot = np . tile ( True ... | Exclude objects that are too close to hotspot |
5,238 | def predict ( self , lon , lat , ** kwargs ) : assert self . classifier is not None , 'ERROR' pred = np . zeros ( len ( lon ) ) cut_geometry , flags_geometry = self . applyGeometry ( lon , lat ) x_test = [ ] for key , operation in self . config [ 'operation' ] [ 'params_intrinsic' ] : assert operation . lower ( ) in [ ... | distance abs_mag r_physical |
5,239 | def catalogFactory ( name , ** kwargs ) : fn = lambda member : inspect . isclass ( member ) and member . __module__ == __name__ catalogs = odict ( inspect . getmembers ( sys . modules [ __name__ ] , fn ) ) if name not in list ( catalogs . keys ( ) ) : msg = "%s not found in catalogs:\n %s" % ( name , list ( kernels . k... | Factory for various catalogs . |
5,240 | def write_results ( filename , config , srcfile , samples ) : results = createResults ( config , srcfile , samples = samples ) results . write ( filename ) | Package everything nicely |
5,241 | def estimate ( self , param , burn = None , clip = 10.0 , alpha = 0.32 ) : if param not in list ( self . samples . names ) + list ( self . source . params ) + [ 'age' , 'metallicity' ] : msg = 'Unrecognized parameter: %s' % param raise KeyError ( msg ) if param in self . samples . names : if param . startswith ( 'posit... | Estimate parameter value and uncertainties |
5,242 | def estimate_params ( self , burn = None , clip = 10.0 , alpha = 0.32 ) : mle = self . get_mle ( ) out = odict ( ) for param in mle . keys ( ) : out [ param ] = self . estimate ( param , burn = burn , clip = clip , alpha = alpha ) return out | Estimate all source parameters |
5,243 | def estimate_position_angle ( self , param = 'position_angle' , burn = None , clip = 10.0 , alpha = 0.32 ) : pa = self . samples . get ( param , burn = burn , clip = clip ) peak = ugali . utils . stats . kde_peak ( pa ) shift = 180. * ( ( pa + 90 - peak ) > 180 ) pa -= shift ret = ugali . utils . stats . peak_interval ... | Estimate the position angle from the posterior dealing with periodicity . |
5,244 | def date_range_for_webtrends ( cls , start_at = None , end_at = None ) : if start_at and end_at : start_date = cls . parse_standard_date_string_to_date ( start_at ) end_date = cls . parse_standard_date_string_to_date ( end_at ) return [ ( cls . parse_date_for_query ( start_date ) , cls . parse_date_for_query ( end_date... | Get the start and end formatted for query or the last hour if none specified . Unlike reports this does not aggregate periods and so it is possible to just query a range and parse out the individual hours . |
5,245 | def get_ugali_dir ( ) : dirname = os . getenv ( 'UGALIDIR' ) if not dirname : dirname = os . path . join ( os . getenv ( 'HOME' ) , '.ugali' ) if not os . path . exists ( dirname ) : from ugali . utils . logger import logger msg = "Creating UGALIDIR:\n%s" % dirname logger . warning ( msg ) return mkdir ( dirname ) | Get the path to the ugali data directory from the environment |
5,246 | def get_iso_dir ( ) : dirname = os . path . join ( get_ugali_dir ( ) , 'isochrones' ) if not os . path . exists ( dirname ) : from ugali . utils . logger import logger msg = "Isochrone directory not found:\n%s" % dirname logger . warning ( msg ) return dirname | Get the ugali isochrone directory . |
5,247 | def registerParser ( self , parser ) : if not isinstance ( parser , Subparser ) : raise TypeError ( "%s is not an instance of a subparser." % parser ) self . parsers . append ( parser ) | Registers a parser to parse configuration inputs . |
5,248 | def addConfig ( self , name , default = None , cast = None , required = False , description = None ) : if not self . configNameRE . match ( name ) : raise InvalidConfigurationException ( "Invalid configuration name: %s" % name ) self . configs [ self . _sanitizeName ( name ) ] = { 'default' : default , 'cast' : cast , ... | Adds the given configuration option to the ConfigManager . |
5,249 | def parse ( self ) : self . _config = _Config ( ) self . _setDefaults ( ) for parser in self . parsers : for key , value in parser . parse ( self , self . _config ) . items ( ) : key = self . _sanitizeName ( key ) if key not in self . configs : raise UnknownConfigurationException ( key ) if value is not None : self . _... | Executes the registered parsers to parse input configurations . |
5,250 | def _setDefaults ( self ) : for configName , configDict in self . configs . items ( ) : self . _setConfig ( configName , configDict [ 'default' ] ) | Sets all the expected configuration options on the config object as either the requested default value or None . |
5,251 | def _cast ( self ) : for configName , configDict in self . configs . items ( ) : if configDict [ 'cast' ] is not None : configValue = getattr ( self . _config , configName ) if configValue is not None : try : self . _setConfig ( configName , configDict [ 'cast' ] ( configValue ) ) except : raise InvalidConfigurationExc... | Iterates through our parsed configuration options and cast any options with marked cast types . |
5,252 | def list_models ( self , macaroons ) : return make_request ( "{}model" . format ( self . url ) , timeout = self . timeout , client = self . _client , cookies = self . cookies ) | Get the logged in user s models from the JIMM controller . |
5,253 | def write ( self , novel_title = 'novel' , filetype = 'txt' ) : self . _compose_chapters ( ) self . _write_to_file ( novel_title , filetype ) | Composes chapters and writes the novel to a text file |
5,254 | def _compose_chapters ( self ) : for count in range ( self . chapter_count ) : chapter_num = count + 1 c = Chapter ( self . markov , chapter_num ) self . chapters . append ( c ) | Creates a chapters and appends them to list |
5,255 | def valid_address ( address ) : if not address : return False components = str ( address ) . split ( ':' ) if len ( components ) > 2 or not valid_hostname ( components [ 0 ] ) : return False if len ( components ) == 2 and not valid_port ( components [ 1 ] ) : return False return True | Determines whether the specified address string is valid . |
5,256 | def valid_hostname ( host ) : if len ( host ) > 255 : return False if host [ - 1 : ] == '.' : host = host [ : - 1 ] return all ( _hostname_re . match ( c ) for c in host . split ( '.' ) ) | Returns whether the specified string is a valid hostname . |
5,257 | def sample ( self , n , mass_min = 0.1 , mass_max = 10. , steps = 10000 , seed = None ) : if seed is not None : np . random . seed ( seed ) d_mass = ( mass_max - mass_min ) / float ( steps ) mass = np . linspace ( mass_min , mass_max , steps ) cdf = np . insert ( np . cumsum ( d_mass * self . pdf ( mass [ 1 : ] , log_m... | Sample initial mass values between mass_min and mass_max following the IMF distribution . |
5,258 | def pdf ( cls , mass , log_mode = True ) : log_mass = np . log10 ( mass ) mb = mbreak = [ 0.08 , 0.5 ] a = alpha = [ 0.3 , 1.3 , 2.3 ] norm = 0.27947743949440446 b = 1. / norm c = b * mbreak [ 0 ] ** ( alpha [ 1 ] - alpha [ 0 ] ) d = c * mbreak [ 1 ] ** ( alpha [ 2 ] - alpha [ 1 ] ) dn_dm = b * ( mass < 0.08 ) * mass *... | PDF for the Kroupa IMF . |
5,259 | def pdf ( cls , mass , log_mode = True ) : alpha = 2.35 a = 0.060285569480482866 dn_dm = a * mass ** ( - alpha ) if log_mode : return dn_dm * ( mass * np . log ( 10 ) ) else : return dn_dm | PDF for the Salpeter IMF . |
5,260 | def _getConfigFile ( self , config ) : joinPath = lambda p : ( os . path . join ( p ) if isinstance ( p , ( tuple , list ) ) else p ) if self . filepathConfig is not None and self . filenameConfig is not None : if hasattr ( config , self . filepathConfig ) and hasattr ( config , self . filenameConfig ) : path = joinPat... | Retrieves a file descriptor to a configuration file to process . |
5,261 | def _count_citations ( aux_file ) : counter = defaultdict ( int ) with open ( aux_file ) as fobj : content = fobj . read ( ) for match in CITE_PATTERN . finditer ( content ) : name = match . groups ( ) [ 0 ] counter [ name ] += 1 return counter | Counts the citations in an aux - file . |
5,262 | def _setup_logger ( self ) : log = logging . getLogger ( 'latexmk.py' ) handler = logging . StreamHandler ( ) log . addHandler ( handler ) if self . opt . verbose : log . setLevel ( logging . INFO ) return log | Set up a logger . |
5,263 | def _parse_texlipse_config ( self ) : if not os . path . isfile ( '.texlipse' ) : time . sleep ( 0.1 ) if not os . path . isfile ( '.texlipse' ) : self . log . error ( '! Fatal error: File .texlipse is missing.' ) self . log . error ( '! Exiting...' ) sys . exit ( 1 ) with open ( '.texlipse' ) as fobj : content = fobj ... | Read the project name from the texlipse config file . texlipse . |
5,264 | def _read_latex_files ( self ) : if os . path . isfile ( '%s.aux' % self . project_name ) : cite_counter = self . generate_citation_counter ( ) self . read_glossaries ( ) else : cite_counter = { '%s.aux' % self . project_name : defaultdict ( int ) } fname = '%s.toc' % self . project_name if os . path . isfile ( fname )... | Check if some latex output files exist before first latex run process them and return the generated data . |
5,265 | def read_glossaries ( self ) : filename = '%s.aux' % self . project_name with open ( filename ) as fobj : main_aux = fobj . read ( ) pattern = r'\\@newglossary\{(.*)\}\{.*\}\{(.*)\}\{(.*)\}' for match in re . finditer ( pattern , main_aux ) : name , ext_i , ext_o = match . groups ( ) self . glossaries [ name ] = ( ext_... | Read all existing glossaries in the main aux - file . |
5,266 | def check_errors ( self ) : errors = ERROR_PATTTERN . findall ( self . out ) if errors : self . log . error ( '! Errors occurred:' ) self . log . error ( '\n' . join ( [ error . replace ( '\r' , '' ) . strip ( ) for error in chain ( * errors ) if error . strip ( ) ] ) ) self . log . error ( '! See "%s.log" for details.... | Check if errors occured during a latex run by scanning the output . |
5,267 | def generate_citation_counter ( self ) : cite_counter = dict ( ) filename = '%s.aux' % self . project_name with open ( filename ) as fobj : main_aux = fobj . read ( ) cite_counter [ filename ] = _count_citations ( filename ) for match in re . finditer ( r'\\@input\{(.*.aux)\}' , main_aux ) : filename = match . groups (... | Generate dictionary with the number of citations in all included files . If this changes after the first latex run you have to run bibtex . |
5,268 | def latex_run ( self ) : self . log . info ( 'Running %s...' % self . latex_cmd ) cmd = [ self . latex_cmd ] cmd . extend ( LATEX_FLAGS ) cmd . append ( '%s.tex' % self . project_name ) try : with open ( os . devnull , 'w' ) as null : Popen ( cmd , stdout = null , stderr = null ) . wait ( ) except OSError : self . log ... | Start latex run . |
5,269 | def bibtex_run ( self ) : self . log . info ( 'Running bibtex...' ) try : with open ( os . devnull , 'w' ) as null : Popen ( [ 'bibtex' , self . project_name ] , stdout = null ) . wait ( ) except OSError : self . log . error ( NO_LATEX_ERROR % 'bibtex' ) sys . exit ( 1 ) shutil . copy ( '%s.bib' % self . bib_file , '%s... | Start bibtex run . |
5,270 | def makeindex_runs ( self , gloss_files ) : gloss_changed = False for gloss in self . glossaries : make_gloss = False ext_i , ext_o = self . glossaries [ gloss ] fname_in = '%s.%s' % ( self . project_name , ext_i ) fname_out = '%s.%s' % ( self . project_name , ext_o ) if re . search ( 'No file %s.' % fname_in , self . ... | Check for each glossary if it has to be regenerated with makeindex . |
5,271 | def open_preview ( self ) : self . log . info ( 'Opening preview...' ) if self . opt . pdf : ext = 'pdf' else : ext = 'dvi' filename = '%s.%s' % ( self . project_name , ext ) if sys . platform == 'win32' : try : os . startfile ( filename ) except OSError : self . log . error ( 'Preview-Error: Extension .%s is not linke... | Try to open a preview of the generated document . Currently only supported on Windows . |
5,272 | def need_latex_rerun ( self ) : for pattern in LATEX_RERUN_PATTERNS : if pattern . search ( self . out ) : return True return False | Test for all rerun patterns if they match the output . |
5,273 | def run ( self ) : self . old_dir = [ ] if self . opt . clean : self . old_dir = os . listdir ( '.' ) cite_counter , toc_file , gloss_files = self . _read_latex_files ( ) self . latex_run ( ) self . read_glossaries ( ) gloss_changed = self . makeindex_runs ( gloss_files ) if gloss_changed or self . _is_toc_changed ( to... | Run the LaTeX compilation . |
5,274 | def command ( self , outfile , configfile , pix ) : params = dict ( script = self . config [ 'scan' ] [ 'script' ] , config = configfile , outfile = outfile , nside = self . nside_likelihood , pix = pix , verbose = '-v' if self . verbose else '' ) cmd = '%(script)s %(config)s %(outfile)s --hpx %(nside)i %(pix)i %(verbo... | Generate the command for running the likelihood scan . |
5,275 | def submit_all ( self , coords = None , queue = None , debug = False ) : if coords is None : pixels = np . arange ( hp . nside2npix ( self . nside_likelihood ) ) else : coords = np . asarray ( coords ) if coords . ndim == 1 : coords = np . array ( [ coords ] ) if coords . shape [ 1 ] == 2 : lon , lat = coords . T radiu... | Submit likelihood analyses on a set of coordinates . If coords is None submit all coordinates in the footprint . |
5,276 | def check ( cls ) : if cls == AppSettings : return None exceptions = [ ] for setting in cls . settings . values ( ) : try : setting . check ( ) except Exception as e : exceptions . append ( str ( e ) ) if exceptions : raise ImproperlyConfigured ( "\n" . join ( exceptions ) ) | Class method to check every settings . |
5,277 | def parse_args ( name = "" , args = None ) : def _load_json_file ( path ) : with open ( path ) as f : json_data = json . load ( f ) json_data [ 'path_to_json_file' ] = path return json_data parser = argparse . ArgumentParser ( description = "%s collector for sending" " data to the performance" " platform" % name ) pars... | Parse command line argument for a collector |
5,278 | def hash ( self ) : renderer_str = "{}|{}|{}|{}" . format ( self . renderer . __class__ . __name__ , self . renderer . colormap , self . renderer . fill_value , self . renderer . background_color ) if isinstance ( self . renderer , StretchedRenderer ) : renderer_str = "{}|{}|{}" . format ( renderer_str , self . rendere... | Returns a hash of this render configuration from the variable renderer and time_index parameters . Used for caching the full - extent native projection render so that subsequent requests can be served by a warp operation only . |
5,279 | def factory ( type , module = None , ** kwargs ) : cls = type if module is None : module = __name__ fn = lambda member : inspect . isclass ( member ) and member . __module__ == module classes = odict ( inspect . getmembers ( sys . modules [ module ] , fn ) ) members = odict ( [ ( k . lower ( ) , v ) for k , v in classe... | Factory for creating objects . Arguments are passed directly to the constructor of the chosen class . |
5,280 | def get_definition_from_renderer ( renderer ) : config = { 'colors' : [ [ x [ 0 ] , x [ 1 ] . to_hex ( ) ] for x in renderer . colormap ] , 'options' : { } } if renderer . fill_value : config [ 'options' ] [ 'fill_value' ] = renderer . fill_value if isinstance ( renderer , StretchedRenderer ) : config [ 'type' ] = 'str... | Returns a dictionary definition of the given renderer |
5,281 | def set_model ( self , name , model ) : try : self . __getattribute__ ( 'models' ) except AttributeError : object . __setattr__ ( self , 'models' , odict ( ) ) self . models [ name ] = model | Set a model . |
5,282 | def set_params ( self , ** kwargs ) : for key , value in list ( kwargs . items ( ) ) : setattr ( self , key , value ) | Set the parameter values |
5,283 | def get_params ( self ) : return odict ( [ ( key , param . value ) for key , param in self . params . items ( ) ] ) | Get an odict of the parameter names and values |
5,284 | def get_free_params ( self ) : return odict ( [ ( key , param . value ) for key , param in self . params . items ( ) if param . free ] ) | Get an odict of free parameter names and values |
5,285 | def iter_finds ( regex_obj , s ) : if isinstance ( regex_obj , str ) : for m in re . finditer ( regex_obj , s ) : yield m . group ( ) else : for m in regex_obj . finditer ( s ) : yield m . group ( ) | Generate all matches found within a string for a regex and yield each match as a string |
5,286 | def composite_decorator ( func ) : @ wraps ( func ) def wrapper ( self , * args , ** kwargs ) : total = [ ] for weight , iso in zip ( self . weights , self . isochrones ) : subfunc = getattr ( iso , func . __name__ ) total . append ( weight * subfunc ( * args , ** kwargs ) ) return np . sum ( total , axis = 0 ) return ... | Decorator for wrapping functions that calculate a weighted sum |
5,287 | def mergeCatalogs ( catalog_list ) : for c in catalog_list : if c . data . dtype . names != catalog_list [ 0 ] . data . dtype . names : msg = "Catalog data columns not the same." raise Exception ( msg ) data = np . concatenate ( [ c . data for c in catalog_list ] ) config = catalog_list [ 0 ] . config return Catalog ( ... | Merge a list of Catalogs . |
5,288 | def applyCut ( self , cut ) : return Catalog ( self . config , data = self . data [ cut ] ) | Return a new catalog which is a subset of objects selected using the input cut array . |
5,289 | def bootstrap ( self , mc_bit = 0x10 , seed = None ) : if seed is not None : np . random . seed ( seed ) data = copy . deepcopy ( self . data ) idx = np . random . randint ( 0 , len ( data ) , len ( data ) ) data [ self . config [ 'catalog' ] [ 'mag_1_field' ] ] [ : ] = self . mag_1 [ idx ] data [ self . config [ 'cata... | Return a random catalog by boostrapping the colors of the objects in the current catalog . |
5,290 | def project ( self , projector = None ) : msg = "'%s.project': ADW 2018-05-05" % self . __class__ . __name__ DeprecationWarning ( msg ) if projector is None : try : self . projector = ugali . utils . projector . Projector ( self . config [ 'coords' ] [ 'reference' ] [ 0 ] , self . config [ 'coords' ] [ 'reference' ] [ ... | Project coordinates on sphere to image plane using Projector class . |
5,291 | def spatialBin ( self , roi ) : if hasattr ( self , 'pixel_roi_index' ) and hasattr ( self , 'pixel' ) : logger . warning ( 'Catalog alread spatially binned' ) return self . pixel = ang2pix ( self . config [ 'coords' ] [ 'nside_pixel' ] , self . lon , self . lat ) self . pixel_roi_index = roi . indexROI ( self . lon , ... | Calculate indices of ROI pixels corresponding to object locations . |
5,292 | def write ( self , outfile , clobber = True , ** kwargs ) : fitsio . write ( outfile , self . data , clobber = True , ** kwargs ) | Write the current object catalog to FITS file . |
5,293 | def _parse ( self , roi = None , filenames = None ) : if ( roi is not None ) and ( filenames is not None ) : msg = "Cannot take both roi and filenames" raise Exception ( msg ) if roi is not None : pixels = roi . getCatalogPixels ( ) filenames = self . config . getFilenames ( ) [ 'catalog' ] [ pixels ] elif filenames is... | Parse catalog FITS files into recarray . |
5,294 | def _defineVariables ( self ) : logger . info ( 'Catalog contains %i objects' % ( len ( self . data ) ) ) mc_source_id_field = self . config [ 'catalog' ] [ 'mc_source_id_field' ] if mc_source_id_field is not None : if mc_source_id_field not in self . data . dtype . names : array = np . zeros ( len ( self . data ) , dt... | Helper funtion to define pertinent variables from catalog data . |
5,295 | def add_node ( self , node_id , task , inputs ) : if node_id in self . nodes_by_id : raise ValueError ( 'The node {0} already exists in this workflow.' . format ( node_id ) ) node = WorkflowNode ( node_id , task , inputs ) self . nodes_by_id [ node_id ] = node for source , value in six . itervalues ( inputs ) : if sour... | Adds a node to the workflow . |
5,296 | def map_output ( self , node_id , node_output_name , parameter_name ) : self . output_mapping [ parameter_name ] = ( node_id , node_output_name ) dependents = self . dependents_by_node_id . get ( node_id , set ( ) ) dependents . add ( 'output_{}' . format ( parameter_name ) ) self . dependents_by_node_id [ node_id ] = ... | Maps the output from a node to a workflow output . |
5,297 | def to_json ( self , indent = None ) : inputs = ParameterCollection ( self . inputs ) d = { 'meta' : { 'name' : self . name , 'description' : self . description } , 'inputs' : [ ] , 'workflow' : [ ] , 'outputs' : [ { 'name' : k , 'node' : v } for k , v in six . iteritems ( self . output_mapping ) ] } for parameter in s... | Serialize this workflow to JSON |
5,298 | def from_json ( cls , text ) : d = json . loads ( text ) meta = d . get ( 'meta' , { } ) workflow = cls ( name = meta . get ( 'name' ) , description = meta . get ( 'description' ) ) for workflow_input in d . get ( 'inputs' , [ ] ) : parameter_cls = Parameter . by_id ( workflow_input [ 'type' ] ) args = [ workflow_input... | Return a new workflow deserialized from a JSON string |
5,299 | def get_handler ( self , * args , ** options ) : handler = super ( ) . get_handler ( * args , ** options ) use_static_handler = options [ 'use_static_handler' ] insecure_serving = options [ 'insecure_serving' ] if use_static_handler and ( settings . DEBUG or insecure_serving ) : return CRAStaticFilesHandler ( handler )... | Return the static files serving handler wrapping the default handler if static files should be served . Otherwise return the default handler . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.