idx
int64
0
252k
question
stringlengths
48
5.28k
target
stringlengths
5
1.23k
10,000
def prepare_blobs ( self ) : self . raw_header = self . extract_header ( ) if self . cache_enabled : self . _cache_offsets ( )
Populate the blobs
10,001
def extract_header ( self ) : self . log . info ( "Extracting the header" ) raw_header = self . raw_header = defaultdict ( list ) first_line = self . blob_file . readline ( ) first_line = try_decode_string ( first_line ) self . blob_file . seek ( 0 , 0 ) if not first_line . startswith ( str ( 'start_run' ) ) : self . l...
Create a dictionary with the EVT header information
10,002
def get_blob ( self , index ) : self . log . info ( "Retrieving blob #{}" . format ( index ) ) if index > len ( self . event_offsets ) - 1 : self . log . info ( "Index not in cache, caching offsets" ) self . _cache_offsets ( index , verbose = False ) self . blob_file . seek ( self . event_offsets [ index ] , 0 ) blob =...
Return a blob with the event at the given index
10,003
def process ( self , blob = None ) : try : blob = self . get_blob ( self . index ) except IndexError : self . log . info ( "Got an IndexError, trying the next file" ) if ( self . basename or self . filenames ) and self . file_index < self . index_stop : self . file_index += 1 self . log . info ( "Now at file_index={}" ...
Pump the next blob to the modules
10,004
def _cache_offsets ( self , up_to_index = None , verbose = True ) : if not up_to_index : if verbose : self . print ( "Caching event file offsets, this may take a bit." ) self . blob_file . seek ( 0 , 0 ) self . event_offsets = [ ] if not self . raw_header : self . event_offsets . append ( 0 ) else : self . blob_file . ...
Cache all event offsets .
10,005
def _record_offset ( self ) : offset = self . blob_file . tell ( ) self . event_offsets . append ( offset )
Stores the current file pointer position
10,006
def _create_blob ( self ) : blob = None for line in self . blob_file : line = try_decode_string ( line ) line = line . strip ( ) if line == '' : self . log . info ( "Ignoring empty line..." ) continue if line . startswith ( 'end_event:' ) and blob : blob [ 'raw_header' ] = self . raw_header return blob try : tag , valu...
Parse the next event from the current file position
10,007
def runserver ( project_name ) : DIR = os . listdir ( project_name ) if 'settings.py' not in DIR : raise NotImplementedError ( 'No file called: settings.py found in %s' % project_name ) CGI_BIN_FOLDER = os . path . join ( project_name , 'cgi' , 'cgi-bin' ) CGI_FOLDER = os . path . join ( project_name , 'cgi' ) if not o...
Runs a python cgi server in a subprocess .
10,008
def getUtility ( self , decision , sample , aggregationMode = "avg" ) : utilities = self . getUtilities ( decision , sample ) if aggregationMode == "avg" : utility = numpy . mean ( utilities ) elif aggregationMode == "min" : utility = min ( utilities ) elif aggregationMode == "max" : utility = max ( utilities ) else : ...
Get the utility of a given decision given a preference .
10,009
def getUtilities ( self , decision , orderVector ) : scoringVector = self . getScoringVector ( orderVector ) utilities = [ ] for alt in decision : altPosition = orderVector . index ( alt ) utility = float ( scoringVector [ altPosition ] ) if self . isLoss == True : utility = - 1 * utility utilities . append ( utility )...
Returns a floats that contains the utilities of every candidate in the decision .
10,010
def getUtilities ( self , decision , binaryRelations ) : m = len ( binaryRelations ) utilities = [ ] for cand in decision : tops = [ cand - 1 ] index = 0 while index < len ( tops ) : s = tops [ index ] for j in range ( m ) : if j == s : continue if binaryRelations [ j ] [ s ] > 0 : if j not in tops : tops . append ( j ...
Returns a floats that contains the utilities of every candidate in the decision . This was adapted from code written by Lirong Xia .
10,011
def db_credentials ( self ) : try : username = self . config . get ( 'DB' , 'username' ) password = self . config . get ( 'DB' , 'password' ) except Error : username = input ( "Please enter your KM3NeT DB username: " ) password = getpass . getpass ( "Password: " ) return username , password
Return username and password for the KM3NeT WebDB .
10,012
def get_path ( src ) : res = None while not res : if res is False : print ( colored ( 'You must provide a path to an existing directory!' , 'red' ) ) print ( 'You need a local clone or release of (a fork of) ' 'https://github.com/{0}' . format ( src ) ) res = input ( colored ( 'Local path to {0}: ' . format ( src ) , '...
Prompts the user to input a local path .
10,013
def execute_all ( self ) : for workflow_id in self . workflows : if self . workflows [ workflow_id ] . online : for interval in self . workflows [ workflow_id ] . requested_intervals : logging . info ( "Executing workflow {} over interval {}" . format ( workflow_id , interval ) ) self . workflows [ workflow_id ] . exec...
Execute all workflows
10,014
def execute ( self , sources , sink , interval , alignment_stream = None ) : if not isinstance ( interval , TimeInterval ) : raise TypeError ( 'Expected TimeInterval, got {}' . format ( type ( interval ) ) ) if interval . end > sink . channel . up_to_timestamp : raise StreamNotAvailableError ( sink . channel . up_to_ti...
Execute the tool over the given time interval . If an alignment stream is given the output instances will be aligned to this stream
10,015
def create_stream ( self , stream_id , sandbox = None ) : if stream_id in self . streams : raise StreamAlreadyExistsError ( "Stream with id '{}' already exists" . format ( stream_id ) ) if sandbox is not None : raise ValueError ( "Cannot use sandboxes with memory streams" ) stream = Stream ( channel = self , stream_id ...
Must be overridden by deriving classes must create the stream according to the tool and return its unique identifier stream_id
10,016
def purge_all ( self , remove_definitions = False ) : for stream_id in list ( self . streams . keys ( ) ) : self . purge_stream ( stream_id , remove_definition = remove_definitions )
Clears all streams in the channel - use with caution!
10,017
def update_state ( self , up_to_timestamp ) : for stream_id in self . streams : self . streams [ stream_id ] . calculated_intervals = TimeIntervals ( [ ( MIN_DATE , up_to_timestamp ) ] ) self . up_to_timestamp = up_to_timestamp
Call this function to ensure that the channel is up to date at the time of timestamp . I . e . all the streams that have been created before or at that timestamp are calculated exactly until up_to_timestamp .
10,018
def compile_regex ( self , pattern , flags = 0 ) : pattern_re = regex . compile ( '(?P<substr>%\{(?P<fullname>(?P<patname>\w+)(?::(?P<subname>\w+))?)\})' ) while 1 : matches = [ md . groupdict ( ) for md in pattern_re . finditer ( pattern ) ] if len ( matches ) == 0 : break for md in matches : if md [ 'patname' ] in se...
Compile regex from pattern and pattern_dict
10,019
def _load_patterns ( self , folders , pattern_dict = None ) : if pattern_dict is None : pattern_dict = { } for folder in folders : for file in os . listdir ( folder ) : if regex . match ( '^[\w-]+$' , file ) : self . _load_pattern_file ( os . path . join ( folder , file ) , pattern_dict ) return pattern_dict
Load all pattern from all the files in folders
10,020
def load_pkl ( filenames ) : if not isinstance ( filenames , ( list , tuple ) ) : filenames = [ filenames ] times = [ ] for name in filenames : name = str ( name ) with open ( name , 'rb' ) as file : loaded_obj = pickle . load ( file ) if not isinstance ( loaded_obj , Times ) : raise TypeError ( "At least one loaded ob...
Unpickle file contents .
10,021
async def retrieve ( self , url , ** kwargs ) : try : async with self . websession . request ( 'GET' , url , ** kwargs ) as res : if res . status != 200 : raise Exception ( "Could not retrieve information from API" ) if res . content_type == 'application/json' : return await res . json ( ) return await res . text ( ) e...
Issue API requests .
10,022
def _to_number ( cls , string ) : try : if float ( string ) - int ( string ) == 0 : return int ( string ) return float ( string ) except ValueError : try : return float ( string ) except ValueError : return string
Convert string to int or float .
10,023
async def stations ( self ) : data = await self . retrieve ( API_DISTRITS ) Station = namedtuple ( 'Station' , [ 'latitude' , 'longitude' , 'idAreaAviso' , 'idConselho' , 'idDistrito' , 'idRegiao' , 'globalIdLocal' , 'local' ] ) _stations = [ ] for station in data [ 'data' ] : _station = Station ( self . _to_number ( s...
Retrieve stations .
10,024
async def weather_type_classe ( self ) : data = await self . retrieve ( url = API_WEATHER_TYPE ) self . weather_type = dict ( ) for _type in data [ 'data' ] : self . weather_type [ _type [ 'idWeatherType' ] ] = _type [ 'descIdWeatherTypePT' ] return self . weather_type
Retrieve translation for weather type .
10,025
async def wind_type_classe ( self ) : data = await self . retrieve ( url = API_WIND_TYPE ) self . wind_type = dict ( ) for _type in data [ 'data' ] : self . wind_type [ int ( _type [ 'classWindSpeed' ] ) ] = _type [ 'descClassWindSpeedDailyPT' ] return self . wind_type
Retrieve translation for wind type .
10,026
def register ( self , plugin ) : for listener in plugin . listeners : self . listeners [ listener ] . add ( plugin ) self . plugins . add ( plugin ) plugin . messenger = self . messages plugin . start ( )
Add the plugin to our set of listeners for each message that it listens to tell it to use our messages Queue for communication and start it up .
10,027
def start ( self ) : self . recieve ( 'APP_START' ) self . alive = True while self . alive : message , payload = self . messages . get ( ) if message == 'APP_STOP' : for plugin in self . plugins : plugin . recieve ( 'SHUTDOWN' ) self . alive = False else : self . recieve ( message , payload )
Send APP_START to any plugins that listen for it and loop around waiting for messages and sending them to their listening plugins until it s time to shutdown .
10,028
def choose ( self , palette ) : try : self . _cycler = cycle ( self . colours [ palette ] ) except KeyError : raise KeyError ( "Chose one of the following colour palettes: {0}" . format ( self . available ) )
Pick a palette
10,029
def refresh_styles ( self ) : import matplotlib . pyplot as plt self . colours = { } for style in plt . style . available : try : style_colours = plt . style . library [ style ] [ 'axes.prop_cycle' ] self . colours [ style ] = [ c [ 'color' ] for c in list ( style_colours ) ] except KeyError : continue self . colours [...
Load all available styles
10,030
def get_file_object ( username , password , utc_start = None , utc_stop = None ) : if not utc_start : utc_start = datetime . now ( ) if not utc_stop : utc_stop = utc_start + timedelta ( days = 1 ) logging . info ( "Downloading schedules for username [%s] in range [%s] to " "[%s]." % ( username , utc_start , utc_stop ) ...
Make the connection . Return a file - like object .
10,031
def process_file_object ( file_obj , importer , progress ) : logging . info ( "Processing schedule data." ) try : handler = XmlCallbacks ( importer , progress ) parser = sax . make_parser ( ) parser . setContentHandler ( handler ) parser . setErrorHandler ( handler ) parser . parse ( file_obj ) except : logging . excep...
Parse the data using the connected file - like object .
10,032
def parse_schedules ( username , password , importer , progress , utc_start = None , utc_stop = None ) : file_obj = get_file_object ( username , password , utc_start , utc_stop ) process_file_object ( file_obj , importer , progress )
A utility function to marry the connecting and reading functions .
10,033
def km3h5concat ( input_files , output_file , n_events = None , ** kwargs ) : from km3pipe import Pipeline from km3pipe . io import HDF5Pump , HDF5Sink pipe = Pipeline ( ) pipe . attach ( HDF5Pump , filenames = input_files , ** kwargs ) pipe . attach ( StatusBar , every = 250 ) pipe . attach ( HDF5Sink , filename = out...
Concatenate KM3HDF5 files via pipeline .
10,034
def get_data ( stream , parameters , fmt ) : sds = kp . db . StreamDS ( ) if stream not in sds . streams : log . error ( "Stream '{}' not found in the database." . format ( stream ) ) return params = { } if parameters : for parameter in parameters : if '=' not in parameter : log . error ( "Invalid parameter syntax '{}'...
Retrieve data for given stream and parameters or None if not found
10,035
def available_streams ( ) : sds = kp . db . StreamDS ( ) print ( "Available streams: " ) print ( ', ' . join ( sorted ( sds . streams ) ) )
Show a short list of available streams .
10,036
def upload_runsummary ( csv_filename , dryrun = False ) : print ( "Checking '{}' for consistency." . format ( csv_filename ) ) if not os . path . exists ( csv_filename ) : log . critical ( "{} -> file not found." . format ( csv_filename ) ) return try : df = pd . read_csv ( csv_filename , sep = '\t' ) except pd . error...
Reads the CSV file and uploads its contents to the runsummary table
10,037
def convert_runsummary_to_json ( df , comment = 'Uploaded via km3pipe.StreamDS' , prefix = 'TEST_' ) : data_field = [ ] comment += ", by {}" . format ( getpass . getuser ( ) ) for det_id , det_data in df . groupby ( 'det_id' ) : runs_field = [ ] data_field . append ( { "DetectorId" : det_id , "Runs" : runs_field } ) fo...
Convert a Pandas DataFrame with runsummary to JSON for DB upload
10,038
def calcAcceptanceRatio ( self , V , W ) : acceptanceRatio = 1.0 for comb in itertools . combinations ( V , 2 ) : vIOverJ = 1 wIOverJ = 1 if V . index ( comb [ 0 ] ) > V . index ( comb [ 1 ] ) : vIOverJ = 0 if W . index ( comb [ 0 ] ) > W . index ( comb [ 1 ] ) : wIOverJ = 0 acceptanceRatio = acceptanceRatio * self . p...
Given a order vector V and a proposed order vector W calculate the acceptance ratio for changing to W when using MCMC .
10,039
def getNextSample ( self , V ) : randPos = random . randint ( 0 , len ( V ) - 2 ) W = copy . deepcopy ( V ) d = V [ randPos ] c = V [ randPos + 1 ] W [ randPos ] = c W [ randPos + 1 ] = d prMW = 1 prMV = 1 prob = min ( 1.0 , ( prMW / prMV ) * pow ( self . phi , self . wmg [ d ] [ c ] ) ) / 2 if random . random ( ) <= p...
Generate the next sample by randomly flipping two adjacent candidates .
10,040
def getNextSample ( self , V ) : positions = range ( 0 , len ( self . wmg ) ) randPoss = random . sample ( positions , self . shuffleSize ) flipSet = copy . deepcopy ( randPoss ) randPoss . sort ( ) W = copy . deepcopy ( V ) for j in range ( 0 , self . shuffleSize ) : W [ randPoss [ j ] ] = V [ flipSet [ j ] ] prMW = 1...
Generate the next sample by randomly shuffling candidates .
10,041
def getNextSample ( self , V ) : phi = self . phi wmg = self . wmg W = [ ] W . append ( V [ 0 ] ) for j in range ( 2 , len ( V ) + 1 ) : randomSelect = random . random ( ) threshold = 0.0 denom = 1.0 for k in range ( 1 , j ) : denom = denom + phi ** k for k in range ( 1 , j + 1 ) : numerator = phi ** ( j - k ) threshol...
We generate a new ranking based on a Mallows - based jumping distribution . The algorithm is described in Bayesian Ordinal Peer Grading by Raman and Joachims .
10,042
def getNextSample ( self , V ) : W , WProb = self . drawRankingPlakettLuce ( V ) VProb = self . calcProbOfVFromW ( V , W ) acceptanceRatio = self . calcAcceptanceRatio ( V , W ) prob = min ( 1.0 , acceptanceRatio * ( VProb / WProb ) ) if random . random ( ) <= prob : V = W return V
Given a ranking over the candidates generate a new ranking by assigning each candidate at position i a Plakett - Luce weight of phi^i and draw a new ranking .
10,043
def calcDrawingProbs ( self ) : wmg = self . wmg phi = self . phi weights = [ ] for i in range ( 0 , len ( wmg . keys ( ) ) ) : weights . append ( phi ** i ) totalWeight = sum ( weights ) for i in range ( 0 , len ( wmg . keys ( ) ) ) : weights [ i ] = weights [ i ] / totalWeight return weights
Returns a vector that contains the probabily of an item being from each position . We say that every item in a order vector is drawn with weight phi^i where i is its position .
10,044
def drawRankingPlakettLuce ( self , rankList ) : probs = self . plakettLuceProbs numCands = len ( rankList ) newRanking = [ ] remainingCands = copy . deepcopy ( rankList ) probsCopy = copy . deepcopy ( self . plakettLuceProbs ) totalProb = sum ( probs ) prob = 1.0 while ( len ( newRanking ) < numCands ) : rand = random...
Given an order vector over the candidates draw candidates to generate a new order vector .
10,045
def calcProbOfVFromW ( self , V , W ) : weights = range ( 0 , len ( V ) ) i = 0 for alt in W : weights [ alt - 1 ] = self . phi ** i i = i + 1 prob = 1.0 totalWeight = sum ( weights ) for alt in V : prob = prob * weights [ alt - 1 ] / totalWeight totalWeight = totalWeight - weights [ alt - 1 ] return prob
Given a order vector V and an order vector W calculate the probability that we generate V as our next sample if our current sample was W .
10,046
def get_hist ( rfile , histname , get_overflow = False ) : import root_numpy as rnp rfile = open_rfile ( rfile ) hist = rfile [ histname ] xlims = np . array ( list ( hist . xedges ( ) ) ) bin_values = rnp . hist2array ( hist , include_overflow = get_overflow ) rfile . close ( ) return bin_values , xlims
Read a 1D Histogram .
10,047
def interpol_hist2d ( h2d , oversamp_factor = 10 ) : from rootpy import ROOTError xlim = h2d . bins ( axis = 0 ) ylim = h2d . bins ( axis = 1 ) xn = h2d . nbins ( 0 ) yn = h2d . nbins ( 1 ) x = np . linspace ( xlim [ 0 ] , xlim [ 1 ] , xn * oversamp_factor ) y = np . linspace ( ylim [ 0 ] , ylim [ 1 ] , yn * oversamp_f...
Sample the interpolator of a root 2d hist .
10,048
def create_window ( size = None , samples = 16 , * , fullscreen = False , title = None , threaded = True ) -> Window : if size is None : width , height = 1280 , 720 else : width , height = size if samples < 0 or ( samples & ( samples - 1 ) ) != 0 : raise Exception ( 'Invalid number of samples: %d' % samples ) window = ...
Create the main window .
10,049
def clear ( self , red = 0.0 , green = 0.0 , blue = 0.0 , alpha = 0.0 ) -> None : self . wnd . clear ( red , green , blue , alpha )
Clear the window .
10,050
def windowed ( self , size ) -> None : width , height = size self . wnd . windowed ( width , height )
Set the window to windowed mode .
10,051
def product_metadata ( product , dst_folder , counter = None , writers = [ file_writer ] , geometry_check = None ) : if not counter : counter = { 'products' : 0 , 'saved_tiles' : 0 , 'skipped_tiles' : 0 , 'skipped_tiles_paths' : [ ] } s3_url = 'http://sentinel-s2-l1c.s3.amazonaws.com' product_meta_link = '{0}/{1}' . fo...
Extract metadata for a specific product
10,052
def daily_metadata ( year , month , day , dst_folder , writers = [ file_writer ] , geometry_check = None , num_worker_threads = 1 ) : threaded = False counter = { 'products' : 0 , 'saved_tiles' : 0 , 'skipped_tiles' : 0 , 'skipped_tiles_paths' : [ ] } if num_worker_threads > 1 : threaded = True queue = Queue ( ) year_d...
Extra metadata for all products in a specific date
10,053
def range_metadata ( start , end , dst_folder , num_worker_threads = 0 , writers = [ file_writer ] , geometry_check = None ) : assert isinstance ( start , date ) assert isinstance ( end , date ) delta = end - start dates = [ ] for i in range ( delta . days + 1 ) : dates . append ( start + timedelta ( days = i ) ) days ...
Extra metadata for all products in a date range
10,054
def get_on_tmdb ( uri , ** kwargs ) : kwargs [ 'api_key' ] = app . config [ 'TMDB_API_KEY' ] response = requests_session . get ( ( TMDB_API_URL + uri ) . encode ( 'utf8' ) , params = kwargs ) response . raise_for_status ( ) return json . loads ( response . text )
Get a resource on TMDB .
10,055
def search ( ) : redis_key = 's_%s' % request . args [ 'query' ] . lower ( ) cached = redis_ro_conn . get ( redis_key ) if cached : return Response ( cached ) else : try : found = get_on_tmdb ( u'/search/movie' , query = request . args [ 'query' ] ) movies = [ ] for movie in found [ 'results' ] : cast = get_on_tmdb ( u...
Search a movie on TMDB .
10,056
def get_movie ( tmdb_id ) : redis_key = 'm_%s' % tmdb_id cached = redis_ro_conn . get ( redis_key ) if cached : return Response ( cached ) else : try : details = get_on_tmdb ( u'/movie/%d' % tmdb_id ) cast = get_on_tmdb ( u'/movie/%d/casts' % tmdb_id ) alternative = get_on_tmdb ( u'/movie/%d/alternative_titles' % tmdb_...
Get informations about a movie using its tmdb id .
10,057
def _handle_response_error ( self , response , retries , ** kwargs ) : r error = self . _convert_response_to_error ( response ) if error is None : return response max_retries = self . _max_retries_for_error ( error ) if max_retries is None or retries >= max_retries : return response backoff = min ( 0.0625 * 2 ** retrie...
r Provides a way for each connection wrapper to handle error responses .
10,058
def _convert_response_to_error ( self , response ) : content_type = response . headers . get ( "content-type" , "" ) if "application/x-protobuf" in content_type : self . logger . debug ( "Decoding protobuf response." ) data = status_pb2 . Status . FromString ( response . content ) status = self . _PB_ERROR_CODES . get ...
Subclasses may override this method in order to influence how errors are parsed from the response .
10,059
def parse_pattern ( format_string , env , wrapper = lambda x , y : y ) : formatter = Formatter ( ) fields = [ x [ 1 ] for x in formatter . parse ( format_string ) if x [ 1 ] is not None ] prepared_env = { } for field in fields : for field_alt in ( x . strip ( ) for x in field . split ( '|' ) ) : if field_alt [ 0 ] in '...
Parse the format_string and return prepared data according to the env .
10,060
def perc ( arr , p = 95 , ** kwargs ) : offset = ( 100 - p ) / 2 return np . percentile ( arr , ( offset , 100 - offset ) , ** kwargs )
Create symmetric percentiles with p coverage .
10,061
def resample_1d ( arr , n_out = None , random_state = None ) : if random_state is None : random_state = np . random . RandomState ( ) arr = np . atleast_1d ( arr ) n = len ( arr ) if n_out is None : n_out = n idx = random_state . randint ( 0 , n , size = n ) return arr [ idx ]
Resample an array with replacement .
10,062
def bootstrap_params ( rv_cont , data , n_iter = 5 , ** kwargs ) : fit_res = [ ] for _ in range ( n_iter ) : params = rv_cont . fit ( resample_1d ( data , ** kwargs ) ) fit_res . append ( params ) fit_res = np . array ( fit_res ) return fit_res
Bootstrap the fit params of a distribution .
10,063
def param_describe ( params , quant = 95 , axis = 0 ) : par = np . mean ( params , axis = axis ) lo , up = perc ( quant ) p_up = np . percentile ( params , up , axis = axis ) p_lo = np . percentile ( params , lo , axis = axis ) return par , p_lo , p_up
Get mean + quantile range from bootstrapped params .
10,064
def bootstrap_fit ( rv_cont , data , n_iter = 10 , quant = 95 , print_params = True , ** kwargs ) : fit_params = bootstrap_params ( rv_cont , data , n_iter ) par , lo , up = param_describe ( fit_params , quant = quant ) names = param_names ( rv_cont ) maxlen = max ( [ len ( s ) for s in names ] ) print ( "-------------...
Bootstrap a distribution fit + get confidence intervals for the params .
10,065
def rvs ( self , * args , ** kwargs ) : size = kwargs . pop ( 'size' , 1 ) random_state = kwargs . pop ( 'size' , None ) return self . _kde . sample ( n_samples = size , random_state = random_state )
Draw Random Variates .
10,066
def main ( ) : from docopt import docopt args = docopt ( __doc__ ) infile = args [ 'INFILE' ] outfile = args [ 'OUTFILE' ] i3extract ( infile , outfile )
Entry point when running as script from commandline .
10,067
def connect ( self , server_config ) : if 'connection_string' in server_config : self . client = pymongo . MongoClient ( server_config [ 'connection_string' ] ) self . db = self . client [ server_config [ 'db' ] ] else : self . client = pymongo . MongoClient ( server_config [ 'host' ] , server_config [ 'port' ] , tz_aw...
Connect using the configuration given
10,068
def ptconcat ( output_file , input_files , overwrite = False ) : filt = tb . Filters ( complevel = 5 , shuffle = True , fletcher32 = True , complib = 'zlib' ) out_tabs = { } dt_file = input_files [ 0 ] log . info ( "Reading data struct '%s'..." % dt_file ) h5struc = tb . open_file ( dt_file , 'r' ) log . info ( "Openin...
Concatenate HDF5 Files
10,069
def calibrate_dom ( dom_id , data , detector , livetime = None , fit_ang_dist = False , scale_mc_to_data = True , ad_fit_shape = 'pexp' , fit_background = True , ctmin = - 1. ) : if isinstance ( data , str ) : filename = data loaders = { '.h5' : load_k40_coincidences_from_hdf5 , '.root' : load_k40_coincidences_from_roo...
Calibrate intra DOM PMT time offsets efficiencies and sigmas
10,070
def load_k40_coincidences_from_hdf5 ( filename , dom_id ) : with h5py . File ( filename , 'r' ) as h5f : data = h5f [ '/k40counts/{0}' . format ( dom_id ) ] livetime = data . attrs [ 'livetime' ] data = np . array ( data ) return data , livetime
Load k40 coincidences from hdf5 file
10,071
def load_k40_coincidences_from_rootfile ( filename , dom_id ) : from ROOT import TFile root_file_monitor = TFile ( filename , "READ" ) dom_name = str ( dom_id ) + ".2S" histo_2d_monitor = root_file_monitor . Get ( dom_name ) data = [ ] for c in range ( 1 , histo_2d_monitor . GetNbinsX ( ) + 1 ) : combination = [ ] for ...
Load k40 coincidences from JMonitorK40 ROOT file
10,072
def calculate_angles ( detector , combs ) : angles = [ ] pmt_angles = detector . pmt_angles for first , second in combs : angles . append ( kp . math . angle_between ( np . array ( pmt_angles [ first ] ) , np . array ( pmt_angles [ second ] ) ) ) return np . array ( angles )
Calculates angles between PMT combinations according to positions in detector_file
10,073
def fit_angular_distribution ( angles , rates , rate_errors , shape = 'pexp' ) : if shape == 'exp' : fit_function = exponential if shape == 'pexp' : fit_function = exponential_polinomial cos_angles = np . cos ( angles ) popt , pcov = optimize . curve_fit ( fit_function , cos_angles , rates ) fitted_rates = fit_function...
Fits angular distribution of rates .
10,074
def minimize_t0s ( means , weights , combs ) : def make_quality_function ( means , weights , combs ) : def quality_function ( t0s ) : sq_sum = 0 for mean , comb , weight in zip ( means , combs , weights ) : sq_sum += ( ( mean - ( t0s [ comb [ 1 ] ] - t0s [ comb [ 0 ] ] ) ) * weight ) ** 2 return sq_sum return quality_f...
Varies t0s to minimize the deviation of the gaussian means from zero .
10,075
def minimize_qes ( fitted_rates , rates , weights , combs ) : def make_quality_function ( fitted_rates , rates , weights , combs ) : def quality_function ( qes ) : sq_sum = 0 for fitted_rate , comb , rate , weight in zip ( fitted_rates , combs , rates , weights ) : sq_sum += ( ( rate / qes [ comb [ 0 ] ] / qes [ comb [...
Varies QEs to minimize the deviation of the rates from the fitted_rates .
10,076
def correct_means ( means , opt_t0s , combs ) : corrected_means = np . array ( [ ( opt_t0s [ comb [ 1 ] ] - opt_t0s [ comb [ 0 ] ] ) - mean for mean , comb in zip ( means , combs ) ] ) return corrected_means
Applies optimal t0s to gaussians means .
10,077
def correct_rates ( rates , opt_qes , combs ) : corrected_rates = np . array ( [ rate / opt_qes [ comb [ 0 ] ] / opt_qes [ comb [ 1 ] ] for rate , comb in zip ( rates , combs ) ] ) return corrected_rates
Applies optimal qes to rates .
10,078
def calculate_rms_means ( means , corrected_means ) : rms_means = np . sqrt ( np . mean ( ( means - 0 ) ** 2 ) ) rms_corrected_means = np . sqrt ( np . mean ( ( corrected_means - 0 ) ** 2 ) ) return rms_means , rms_corrected_means
Calculates RMS of means from zero before and after correction
10,079
def calculate_rms_rates ( rates , fitted_rates , corrected_rates ) : rms_rates = np . sqrt ( np . mean ( ( rates - fitted_rates ) ** 2 ) ) rms_corrected_rates = np . sqrt ( np . mean ( ( corrected_rates - fitted_rates ) ** 2 ) ) return rms_rates , rms_corrected_rates
Calculates RMS of rates from fitted_rates before and after correction
10,080
def add_to_twofold_matrix ( times , tdcs , mat , tmax = 10 ) : h_idx = 0 c_idx = 0 n_hits = len ( times ) multiplicity = 0 while h_idx <= n_hits : c_idx = h_idx + 1 if ( c_idx < n_hits ) and ( times [ c_idx ] - times [ h_idx ] <= tmax ) : multiplicity = 2 c_idx += 1 while ( c_idx < n_hits ) and ( times [ c_idx ] - time...
Add counts to twofold coincidences for a given tmax .
10,081
def reset ( self ) : self . counts = defaultdict ( partial ( np . zeros , ( 465 , self . tmax * 2 + 1 ) ) ) self . n_timeslices = defaultdict ( int )
Reset coincidence counter
10,082
def dump ( self ) : self . print ( "Dumping data to {}" . format ( self . dump_filename ) ) pickle . dump ( { 'data' : self . counts , 'livetime' : self . get_livetime ( ) } , open ( self . dump_filename , "wb" ) )
Write coincidence counts into a Python pickle
10,083
def get_named_by_definition ( cls , element_list , string_def ) : try : return next ( ( st . value for st in element_list if st . definition == string_def ) ) except Exception : return None
Attempts to get an IOOS definition from a list of xml elements
10,084
def get_ioos_def ( self , ident , elem_type , ont ) : if elem_type == "identifier" : getter_fn = self . system . get_identifiers_by_name elif elem_type == "classifier" : getter_fn = self . system . get_classifiers_by_name else : raise ValueError ( "Unknown element type '{}'" . format ( elem_type ) ) return DescribeSens...
Gets a definition given an identifier and where to search for it
10,085
def get_sentence ( start = None , depth = 7 ) : if not GRAMMAR : return 'Please set a GRAMMAR file' start = start if start else GRAMMAR . start ( ) if isinstance ( start , Nonterminal ) : productions = GRAMMAR . productions ( start ) if not depth : terminals = [ p for p in productions if not isinstance ( start , Nonter...
follow the grammatical patterns to generate a random sentence
10,086
def format_sentence ( sentence ) : for index , word in enumerate ( sentence ) : if word == 'a' and index + 1 < len ( sentence ) and re . match ( r'^[aeiou]' , sentence [ index + 1 ] ) and not re . match ( r'^uni' , sentence [ index + 1 ] ) : sentence [ index ] = 'an' text = ' ' . join ( sentence ) text = '%s%s' % ( tex...
fix display formatting of a sentence array
10,087
def new_station ( self , _id , callSign , name , affiliate , fccChannelNumber ) : if self . __v_station : print ( "[Station: %s, %s, %s, %s, %s]" % ( _id , callSign , name , affiliate , fccChannelNumber ) )
Callback run for each new station
10,088
def new_lineup ( self , name , location , device , _type , postalCode , _id ) : if self . __v_lineup : print ( "[Lineup: %s, %s, %s, %s, %s, %s]" % ( name , location , device , _type , postalCode , _id ) )
Callback run for each new lineup
10,089
def new_genre ( self , program , genre , relevance ) : if self . __v_genre : print ( "[Genre: %s, %s, %s]" % ( program , genre , relevance ) )
Callback run for each new program genre entry
10,090
def qsub ( script , job_name , dryrun = False , * args , ** kwargs ) : print ( "Preparing job script..." ) job_string = gen_job ( script = script , job_name = job_name , * args , ** kwargs ) env = os . environ . copy ( ) if dryrun : print ( "This is a dry run! Here is the generated job file, which will " "not be submit...
Submit a job via qsub .
10,091
def gen_job ( script , job_name , log_path = 'qlogs' , group = 'km3net' , platform = 'cl7' , walltime = '00:10:00' , vmem = '8G' , fsize = '8G' , shell = None , email = None , send_mail = 'n' , job_array_start = 1 , job_array_stop = None , job_array_step = 1 , irods = False , sps = True , hpss = False , xrootd = False ...
Generate a job script .
10,092
def get_jpp_env ( jpp_dir ) : env = { v [ 0 ] : '' . join ( v [ 1 : ] ) for v in [ l . split ( '=' ) for l in os . popen ( "source {0}/setenv.sh {0} && env" . format ( jpp_dir ) ) . read ( ) . split ( '\n' ) if '=' in l ] } return env
Return the environment dict of a loaded Jpp env .
10,093
def iget ( self , irods_path , attempts = 1 , pause = 15 ) : if attempts > 1 : cmd = cmd = lstrip ( cmd ) cmd = cmd . format ( attempts , irods_path , pause ) self . add ( cmd ) else : self . add ( 'iget -v "{}"' . format ( irods_path ) )
Add an iget command to retrieve a file from iRODS .
10,094
def _add_two_argument_command ( self , command , arg1 , arg2 ) : self . lines . append ( "{} {} {}" . format ( command , arg1 , arg2 ) )
Helper function for two - argument commands
10,095
def get_devices ( self ) : devices = self . make_request ( '["{username}","{password}","info","",""]' . format ( username = self . username , password = self . password ) ) if devices != False : garage_doors = [ ] try : self . apicode = devices . find ( 'apicode' ) . text self . _device_states = { } for doorNum in rang...
List all garage door devices .
10,096
def get_status ( self , device_id ) : devices = self . get_devices ( ) if devices != False : for device in devices : if device [ 'door' ] == device_id : return device [ 'status' ] return False
List only MyQ garage door devices .
10,097
def analyze ( segments , analysis , lookup = dict ( bipa = { } , dolgo = { } ) ) : if not segments : raise ValueError ( 'Empty sequence.' ) if not [ segment for segment in segments if segment . strip ( ) ] : raise ValueError ( 'No information in the sequence.' ) try : bipa_analysis , sc_analysis = [ ] , [ ] for s in se...
Test a sequence for compatibility with CLPA and LingPy .
10,098
def most_energetic ( df ) : idx = df . groupby ( [ 'event_id' ] ) [ 'energy' ] . transform ( max ) == df [ 'energy' ] return df [ idx ] . reindex ( )
Grab most energetic particle from mc_tracks dataframe .
10,099
def _connect ( self ) : log . debug ( "Connecting to JLigier" ) self . socket = socket . socket ( ) self . socket . connect ( ( self . host , self . port ) )
Connect to JLigier