idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
25,200
def _load_market_scheme ( self ) : try : self . scheme = yaml . load ( open ( self . scheme_path , 'r' ) ) except Exception , error : raise LoadMarketSchemeFailed ( reason = error )
Load market yaml description
53
5
25,201
def fetch ( self , code , * * kwargs ) : log . debug ( 'fetching QuanDL data (%s)' % code ) # This way you can use your credentials even if # you didn't provide them to the constructor if 'authtoken' in kwargs : self . quandl_key = kwargs . pop ( 'authtoken' ) # Harmonization: Quandl call start trim_start if 'start' in kwargs : kwargs [ 'trim_start' ] = kwargs . pop ( 'start' ) if 'end' in kwargs : kwargs [ 'trim_end' ] = kwargs . pop ( 'end' ) try : data = Quandl . get ( code , authtoken = self . quandl_key , * * kwargs ) # FIXME With a symbol not found, insert a not_found column data . index = data . index . tz_localize ( pytz . utc ) except Exception , error : log . error ( 'unable to fetch {}: {}' . format ( code , error ) ) data = pd . DataFrame ( ) return data
Quandl entry point in datafeed object
254
9
25,202
def rolling_performances ( self , timestamp = 'one_month' ) : # TODO Study the impact of month choice # TODO Check timestamp in an enumeration # TODO Implement other benchmarks for perf computation # (zipline issue, maybe expected) if self . metrics : perfs = { } length = range ( len ( self . metrics [ timestamp ] ) ) index = self . _get_index ( self . metrics [ timestamp ] ) perf_keys = self . metrics [ timestamp ] [ 0 ] . keys ( ) perf_keys . pop ( perf_keys . index ( 'period_label' ) ) perfs [ 'period' ] = np . array ( [ pd . datetime . date ( date ) for date in index ] ) for key in perf_keys : perfs [ key ] = self . _to_perf_array ( timestamp , key , length ) else : # TODO Get it from DB if it exists raise NotImplementedError ( ) return pd . DataFrame ( perfs , index = index )
Filters self . perfs
223
6
25,203
def overall_metrics ( self , timestamp = 'one_month' , metrics = None ) : perfs = dict ( ) # If no rolling perfs provided, computes it if metrics is None : metrics = self . rolling_performances ( timestamp = timestamp ) riskfree = np . mean ( metrics [ 'treasury_period_return' ] ) perfs [ 'sharpe' ] = qstk_get_sharpe_ratio ( metrics [ 'algorithm_period_return' ] . values , risk_free = riskfree ) perfs [ 'algorithm_period_return' ] = ( ( ( metrics [ 'algorithm_period_return' ] + 1 ) . cumprod ( ) ) - 1 ) [ - 1 ] perfs [ 'max_drawdown' ] = max ( metrics [ 'max_drawdown' ] ) perfs [ 'algo_volatility' ] = np . mean ( metrics [ 'algo_volatility' ] ) perfs [ 'beta' ] = np . mean ( metrics [ 'beta' ] ) perfs [ 'alpha' ] = np . mean ( metrics [ 'alpha' ] ) perfs [ 'benchmark_period_return' ] = ( ( ( metrics [ 'benchmark_period_return' ] + 1 ) . cumprod ( ) ) - 1 ) [ - 1 ] return perfs
Use zipline results to compute some performance indicators
299
10
25,204
def _normalize_data_types ( self , strategy ) : for k , v in strategy . iteritems ( ) : if not isinstance ( v , str ) : # There is probably nothing to do continue if v == 'true' : strategy [ k ] = True elif v == 'false' or v is None : strategy [ k ] = False else : try : if v . find ( '.' ) > 0 : strategy [ k ] = float ( v ) else : strategy [ k ] = int ( v ) except ValueError : pass
some contexts only retrieves strings giving back right type
115
10
25,205
def _get_benchmark_handler ( self , last_trade , freq = 'minutely' ) : return LiveBenchmark ( last_trade , frequency = freq ) . surcharge_market_data if utils . is_live ( last_trade ) else None
Setup a custom benchmark handler or let zipline manage it
59
12
25,206
def configure_environment ( self , last_trade , benchmark , timezone ) : if last_trade . tzinfo is None : last_trade = pytz . utc . localize ( last_trade ) # Setup the trading calendar from market informations self . benchmark = benchmark self . context = TradingEnvironment ( bm_symbol = benchmark , exchange_tz = timezone , load = self . _get_benchmark_handler ( last_trade ) )
Prepare benchmark loader and trading context
98
7
25,207
def apply_mapping ( raw_row , mapping ) : row = { target : mapping_func ( raw_row [ source_key ] ) for target , ( mapping_func , source_key ) in mapping . fget ( ) . items ( ) } return row
Override this to hand craft conversion of row .
57
9
25,208
def invert_dataframe_axis ( fct ) : def inner ( * args , * * kwargs ) : df_to_invert = fct ( * args , * * kwargs ) return pd . DataFrame ( df_to_invert . to_dict ( ) . values ( ) , index = df_to_invert . to_dict ( ) . keys ( ) ) return inner
Make dataframe index column names and vice et versa
90
10
25,209
def use_google_symbol ( fct ) : def decorator ( symbols ) : google_symbols = [ ] # If one symbol string if isinstance ( symbols , str ) : symbols = [ symbols ] symbols = sorted ( symbols ) for symbol in symbols : dot_pos = symbol . find ( '.' ) google_symbols . append ( symbol [ : dot_pos ] if ( dot_pos > 0 ) else symbol ) data = fct ( google_symbols ) #NOTE Not very elegant data . columns = [ s for s in symbols if s . split ( '.' ) [ 0 ] in data . columns ] return data return decorator
Removes . PA or other market indicator from yahoo symbol convention to suit google convention
141
17
25,210
def get_sector ( symbol ) : url = 'http://finance.yahoo.com/q/pr?s=%s+Profile' % symbol soup = BeautifulSoup ( urlopen ( url ) . read ( ) ) try : sector = soup . find ( 'td' , text = 'Sector:' ) . find_next_sibling ( ) . string . encode ( 'utf-8' ) except : sector = '' return sector
Uses BeautifulSoup to scrape stock sector from Yahoo! Finance website
95
14
25,211
def get_industry ( symbol ) : url = 'http://finance.yahoo.com/q/pr?s=%s+Profile' % symbol soup = BeautifulSoup ( urlopen ( url ) . read ( ) ) try : industry = soup . find ( 'td' , text = 'Industry:' ) . find_next_sibling ( ) . string . encode ( 'utf-8' ) except : industry = '' return industry
Uses BeautifulSoup to scrape stock industry from Yahoo! Finance website
96
14
25,212
def get_type ( symbol ) : url = 'http://finance.yahoo.com/q/pr?s=%s+Profile' % symbol soup = BeautifulSoup ( urlopen ( url ) . read ( ) ) if soup . find ( 'span' , text = 'Business Summary' ) : return 'Stock' elif soup . find ( 'span' , text = 'Fund Summary' ) : asset_type = 'Fund' elif symbol . find ( '^' ) == 0 : asset_type = 'Index' else : pass return asset_type
Uses BeautifulSoup to scrape symbol category from Yahoo! Finance website
122
14
25,213
def get_historical_prices ( symbol , start_date , end_date ) : params = urlencode ( { 's' : symbol , 'a' : int ( start_date [ 5 : 7 ] ) - 1 , 'b' : int ( start_date [ 8 : 10 ] ) , 'c' : int ( start_date [ 0 : 4 ] ) , 'd' : int ( end_date [ 5 : 7 ] ) - 1 , 'e' : int ( end_date [ 8 : 10 ] ) , 'f' : int ( end_date [ 0 : 4 ] ) , 'g' : 'd' , 'ignore' : '.csv' , } ) url = 'http://ichart.yahoo.com/table.csv?%s' % params req = Request ( url ) resp = urlopen ( req ) content = str ( resp . read ( ) . decode ( 'utf-8' ) . strip ( ) ) daily_data = content . splitlines ( ) hist_dict = dict ( ) keys = daily_data [ 0 ] . split ( ',' ) for day in daily_data [ 1 : ] : day_data = day . split ( ',' ) date = day_data [ 0 ] hist_dict [ date ] = { keys [ 1 ] : day_data [ 1 ] , keys [ 2 ] : day_data [ 2 ] , keys [ 3 ] : day_data [ 3 ] , keys [ 4 ] : day_data [ 4 ] , keys [ 5 ] : day_data [ 5 ] , keys [ 6 ] : day_data [ 6 ] } return hist_dict
Get historical prices for the given ticker symbol . Date format is YYYY - MM - DD
353
20
25,214
def _fx_mapping ( raw_rates ) : return { pair [ 0 ] . lower ( ) : { 'timeStamp' : pair [ 1 ] , 'bid' : float ( pair [ 2 ] + pair [ 3 ] ) , 'ask' : float ( pair [ 4 ] + pair [ 5 ] ) , 'high' : float ( pair [ 6 ] ) , 'low' : float ( pair [ 7 ] ) } for pair in map ( lambda x : x . split ( ',' ) , raw_rates ) }
Map raw output to clearer labels
114
6
25,215
def query_rates ( self , pairs = [ ] ) : # If no pairs, TrueFx will use the ones given the last time payload = { 'id' : self . _session } if pairs : payload [ 'c' ] = _clean_pairs ( pairs ) response = requests . get ( self . _api_url , params = payload ) mapped_data = _fx_mapping ( response . content . split ( '\n' ) [ : - 2 ] ) return Series ( mapped_data ) if len ( mapped_data ) == 1 else DataFrame ( mapped_data )
Perform a request against truefx data
127
8
25,216
def next_tick ( date , interval = 15 ) : # Intuition works with utc dates, conversion are made for I/O now = dt . datetime . now ( pytz . utc ) live = False # Sleep until we reach the given date while now < date : time . sleep ( interval ) # Update current time now = dt . datetime . now ( pytz . utc ) # Since we're here, we waited a future date, so this is live trading live = True return live
Only return when we reach given datetime
109
8
25,217
def intuition_module ( location ) : path = location . split ( '.' ) # Get the last field, i.e. the object name in the file obj = path . pop ( - 1 ) return dna . utils . dynamic_import ( '.' . join ( path ) , obj )
Build the module path and import it
63
7
25,218
def build_trading_timeline ( start , end ) : EMPTY_DATES = pd . date_range ( '2000/01/01' , periods = 0 , tz = pytz . utc ) now = dt . datetime . now ( tz = pytz . utc ) if not start : if not end : # Live trading until the end of the day bt_dates = EMPTY_DATES live_dates = pd . date_range ( start = now , end = normalize_date_format ( '23h59' ) ) else : end = normalize_date_format ( end ) if end < now : # Backtesting since a year before end bt_dates = pd . date_range ( start = end - 360 * pd . datetools . day , end = end ) live_dates = EMPTY_DATES elif end > now : # Live trading from now to end bt_dates = EMPTY_DATES live_dates = pd . date_range ( start = now , end = end ) else : start = normalize_date_format ( start ) if start < now : if not end : # Backtest for a year or until now end = start + 360 * pd . datetools . day if end > now : end = now - pd . datetools . day live_dates = EMPTY_DATES bt_dates = pd . date_range ( start = start , end = end ) else : end = normalize_date_format ( end ) if end < now : # Nothing to do, backtest from start to end live_dates = EMPTY_DATES bt_dates = pd . date_range ( start = start , end = end ) elif end > now : # Hybrid timeline, backtest from start to end, live # trade from now to end bt_dates = pd . date_range ( start = start , end = now - pd . datetools . day ) live_dates = pd . date_range ( start = now , end = end ) elif start > now : if not end : # Live trading from start to the end of the day bt_dates = EMPTY_DATES live_dates = pd . date_range ( start = start , end = normalize_date_format ( '23h59' ) ) else : # Live trading from start to end end = normalize_date_format ( end ) bt_dates = EMPTY_DATES live_dates = pd . date_range ( start = start , end = end ) return bt_dates + live_dates
Build the daily - based index we will trade on
574
10
25,219
def is_leap ( year ) : x = math . fmod ( year , 4 ) y = math . fmod ( year , 100 ) z = math . fmod ( year , 400 ) # Divisible by 4 and, # either not divisible by 100 or divisible by 400. return not x and ( y or not z )
Leap year or not in the Gregorian calendar .
72
11
25,220
def gcal2jd ( year , month , day ) : year = int ( year ) month = int ( month ) day = int ( day ) a = ipart ( ( month - 14 ) / 12.0 ) jd = ipart ( ( 1461 * ( year + 4800 + a ) ) / 4.0 ) jd += ipart ( ( 367 * ( month - 2 - 12 * a ) ) / 12.0 ) x = ipart ( ( year + 4900 + a ) / 100.0 ) jd -= ipart ( ( 3 * x ) / 4.0 ) jd += day - 2432075.5 # was 32075; add 2400000.5 jd -= 0.5 # 0 hours; above JD is for midday, switch to midnight. return MJD_0 , jd
Gregorian calendar date to Julian date .
177
8
25,221
def jd2gcal ( jd1 , jd2 ) : from math import modf jd1_f , jd1_i = modf ( jd1 ) jd2_f , jd2_i = modf ( jd2 ) jd_i = jd1_i + jd2_i f = jd1_f + jd2_f # Set JD to noon of the current date. Fractional part is the # fraction from midnight of the current date. if - 0.5 < f < 0.5 : f += 0.5 elif f >= 0.5 : jd_i += 1 f -= 0.5 elif f <= - 0.5 : jd_i -= 1 f += 1.5 l = jd_i + 68569 n = ipart ( ( 4 * l ) / 146097.0 ) l -= ipart ( ( ( 146097 * n ) + 3 ) / 4.0 ) i = ipart ( ( 4000 * ( l + 1 ) ) / 1461001 ) l -= ipart ( ( 1461 * i ) / 4.0 ) - 31 j = ipart ( ( 80 * l ) / 2447.0 ) day = l - ipart ( ( 2447 * j ) / 80.0 ) l = ipart ( j / 11.0 ) month = j + 2 - ( 12 * l ) year = 100 * ( n - 49 ) + i + l return int ( year ) , int ( month ) , int ( day ) , f
Julian date to Gregorian calendar date and time of day .
339
13
25,222
def jcal2jd ( year , month , day ) : year = int ( year ) month = int ( month ) day = int ( day ) jd = 367 * year x = ipart ( ( month - 9 ) / 7.0 ) jd -= ipart ( ( 7 * ( year + 5001 + x ) ) / 4.0 ) jd += ipart ( ( 275 * month ) / 9.0 ) jd += day jd += 1729777 - 2400000.5 # Return 240000.5 as first part of JD. jd -= 0.5 # Convert midday to midnight. return MJD_0 , jd
Julian calendar date to Julian date .
139
8
25,223
def add_args_kwargs ( func ) : @ wraps ( func ) def wrapper ( * args , * * kwargs ) : props = argspec ( func ) # if 'args' not in props: if isinstance ( props [ 1 ] , type ( None ) ) : args = args [ : len ( props [ 0 ] ) ] if ( ( not isinstance ( props [ 2 ] , type ( None ) ) ) or ( not isinstance ( props [ 3 ] , type ( None ) ) ) ) : return func ( * args , * * kwargs ) else : return func ( * args ) return wrapper
Add Args and Kwargs
132
6
25,224
def set_up_log ( filename , verbose = True ) : # Add file extension. filename += '.log' if verbose : print ( 'Preparing log file:' , filename ) # Capture warnings. logging . captureWarnings ( True ) # Set output format. formatter = logging . Formatter ( fmt = '%(asctime)s %(message)s' , datefmt = '%d/%m/%Y %H:%M:%S' ) # Create file handler. fh = logging . FileHandler ( filename = filename , mode = 'w' ) fh . setLevel ( logging . DEBUG ) fh . setFormatter ( formatter ) # Create log. log = logging . getLogger ( filename ) log . setLevel ( logging . DEBUG ) log . addHandler ( fh ) # Send opening message. log . info ( 'The log file has been set-up.' ) return log
Set up log
202
3
25,225
def add_observer ( self , signal , observer ) : self . _is_allowed_signal ( signal ) self . _add_observer ( signal , observer )
Add an observer to the object .
37
7
25,226
def remove_observer ( self , signal , observer ) : self . _is_allowed_event ( signal ) self . _remove_observer ( signal , observer )
Remove an observer from the object .
36
7
25,227
def notify_observers ( self , signal , * * kwargs ) : # Check if a notification if in progress if self . _locked : return False # Set the lock self . _locked = True # Create a signal object signal_to_be_notified = SignalObject ( ) setattr ( signal_to_be_notified , "object" , self ) setattr ( signal_to_be_notified , "signal" , signal ) for name , value in kwargs . items ( ) : setattr ( signal_to_be_notified , name , value ) # Notify all the observers for observer in self . _observers [ signal ] : observer ( signal_to_be_notified ) # Unlock the notification process self . _locked = False
Notify observers of a given signal .
169
8
25,228
def _is_allowed_signal ( self , signal ) : if signal not in self . _allowed_signals : raise Exception ( "Signal '{0}' is not allowed for '{1}'." . format ( signal , type ( self ) ) )
Check if a signal is valid .
57
7
25,229
def _add_observer ( self , signal , observer ) : if observer not in self . _observers [ signal ] : self . _observers [ signal ] . append ( observer )
Associate an observer to a valid signal .
42
9
25,230
def _remove_observer ( self , signal , observer ) : if observer in self . _observers [ signal ] : self . _observers [ signal ] . remove ( observer )
Remove an observer to a valid signal .
41
8
25,231
def is_converge ( self ) : if len ( self . list_cv_values ) < self . wind : return start_idx = - self . wind mid_idx = - ( self . wind // 2 ) old_mean = np . array ( self . list_cv_values [ start_idx : mid_idx ] ) . mean ( ) current_mean = np . array ( self . list_cv_values [ mid_idx : ] ) . mean ( ) normalize_residual_metrics = ( np . abs ( old_mean - current_mean ) / np . abs ( old_mean ) ) self . converge_flag = normalize_residual_metrics < self . eps
Return True if the convergence criteria is matched .
160
9
25,232
def retrieve_metrics ( self ) : time = np . array ( self . list_dates ) if len ( time ) >= 1 : time -= time [ 0 ] return { 'time' : time , 'index' : self . list_iters , 'values' : self . list_cv_values }
Return the convergence metrics saved with the corresponding iterations .
66
10
25,233
def _check_cost ( self ) : # Add current cost value to the test list self . _test_list . append ( self . cost ) # Check if enough cost values have been collected if len ( self . _test_list ) == self . _test_range : # The mean of the first half of the test list t1 = np . mean ( self . _test_list [ len ( self . _test_list ) // 2 : ] , axis = 0 ) # The mean of the second half of the test list t2 = np . mean ( self . _test_list [ : len ( self . _test_list ) // 2 ] , axis = 0 ) # Calculate the change across the test list if not np . around ( t1 , decimals = 16 ) : cost_diff = 0.0 else : cost_diff = ( np . linalg . norm ( t1 - t2 ) / np . linalg . norm ( t1 ) ) # Reset the test list self . _test_list = [ ] if self . _verbose : print ( ' - CONVERGENCE TEST - ' ) print ( ' - CHANGE IN COST:' , cost_diff ) print ( '' ) # Check for convergence return cost_diff <= self . _tolerance else : return False
Check cost function
280
3
25,234
def _calc_cost ( self , * args , * * kwargs ) : return np . sum ( [ op . cost ( * args , * * kwargs ) for op in self . _operators ] )
Calculate the cost
48
5
25,235
def get_cost ( self , * args , * * kwargs ) : # Check if the cost should be calculated if self . _iteration % self . _cost_interval : test_result = False else : if self . _verbose : print ( ' - ITERATION:' , self . _iteration ) # Calculate the current cost self . cost = self . _calc_cost ( verbose = self . _verbose , * args , * * kwargs ) self . _cost_list . append ( self . cost ) if self . _verbose : print ( ' - COST:' , self . cost ) print ( '' ) # Test for convergence test_result = self . _check_cost ( ) # Update the current iteration number self . _iteration += 1 return test_result
Get cost function
174
3
25,236
def add_noise ( data , sigma = 1.0 , noise_type = 'gauss' ) : data = np . array ( data ) if noise_type not in ( 'gauss' , 'poisson' ) : raise ValueError ( 'Invalid noise type. Options are "gauss" or' '"poisson"' ) if isinstance ( sigma , ( list , tuple , np . ndarray ) ) : if len ( sigma ) != data . shape [ 0 ] : raise ValueError ( 'Number of sigma values must match first ' 'dimension of input data' ) if noise_type is 'gauss' : random = np . random . randn ( * data . shape ) elif noise_type is 'poisson' : random = np . random . poisson ( np . abs ( data ) ) if isinstance ( sigma , ( int , float ) ) : return data + sigma * random else : return data + np . array ( [ s * r for s , r in zip ( sigma , random ) ] )
r Add noise to data
228
5
25,237
def thresh ( data , threshold , threshold_type = 'hard' ) : data = np . array ( data ) if threshold_type not in ( 'hard' , 'soft' ) : raise ValueError ( 'Invalid threshold type. Options are "hard" or' '"soft"' ) if threshold_type == 'soft' : return np . around ( np . maximum ( ( 1.0 - threshold / np . maximum ( np . finfo ( np . float64 ) . eps , np . abs ( data ) ) ) , 0.0 ) * data , decimals = 15 ) else : return data * ( np . abs ( data ) >= threshold )
r Threshold data
142
4
25,238
def _get_grad_method ( self , data ) : self . grad = self . trans_op ( self . op ( data ) - self . obs_data )
r Get the gradient
36
4
25,239
def _cost_method ( self , * args , * * kwargs ) : cost_val = 0.5 * np . linalg . norm ( self . obs_data - self . op ( args [ 0 ] ) ) ** 2 if 'verbose' in kwargs and kwargs [ 'verbose' ] : print ( ' - DATA FIDELITY (X):' , cost_val ) return cost_val
Calculate gradient component of the cost
94
8
25,240
def _cost_method ( self , * args , * * kwargs ) : if 'verbose' in kwargs and kwargs [ 'verbose' ] : print ( ' - Min (X):' , np . min ( args [ 0 ] ) ) return 0.0
Calculate positivity component of the cost
62
9
25,241
def _cost_method ( self , * args , * * kwargs ) : cost_val = np . sum ( np . abs ( self . weights * self . _linear . op ( args [ 0 ] ) ) ) if 'verbose' in kwargs and kwargs [ 'verbose' ] : print ( ' - L1 NORM (X):' , cost_val ) return cost_val
Calculate sparsity component of the cost
89
9
25,242
def _cost_method ( self , * args , * * kwargs ) : cost_val = self . thresh * nuclear_norm ( cube2matrix ( args [ 0 ] ) ) if 'verbose' in kwargs and kwargs [ 'verbose' ] : print ( ' - NUCLEAR NORM (X):' , cost_val ) return cost_val
Calculate low - rank component of the cost
85
10
25,243
def _op_method ( self , data , extra_factor = 1.0 ) : return self . linear_op . adj_op ( self . prox_op . op ( self . linear_op . op ( data ) , extra_factor = extra_factor ) )
r Operator method
58
3
25,244
def _cost_method ( self , * args , * * kwargs ) : return self . prox_op . cost ( self . linear_op . op ( args [ 0 ] ) , * * kwargs )
Calculate the cost function associated to the composed function
47
11
25,245
def _cost_method ( self , * args , * * kwargs ) : return np . sum ( [ operator . cost ( data ) for operator , data in zip ( self . operators , args [ 0 ] ) ] )
Calculate combined proximity operator components of the cost
48
10
25,246
def min_max_normalize ( img ) : min_img = img . min ( ) max_img = img . max ( ) return ( img - min_img ) / ( max_img - min_img )
Centre and normalize a given array .
47
9
25,247
def _preprocess_input ( test , ref , mask = None ) : test = np . abs ( np . copy ( test ) ) . astype ( 'float64' ) ref = np . abs ( np . copy ( ref ) ) . astype ( 'float64' ) test = min_max_normalize ( test ) ref = min_max_normalize ( ref ) if ( not isinstance ( mask , np . ndarray ) ) and ( mask is not None ) : raise ValueError ( "mask should be None, or a np.ndarray," " got '{0}' instead." . format ( mask ) ) if mask is None : return test , ref , None return test , ref , mask
Wrapper to the metric
154
5
25,248
def file_name_error ( file_name ) : if file_name == '' or file_name [ 0 ] [ 0 ] == '-' : raise IOError ( 'Input file name not specified.' ) elif not os . path . isfile ( file_name ) : raise IOError ( 'Input file name [%s] not found!' % file_name )
File name error
79
3
25,249
def is_executable ( exe_name ) : if not isinstance ( exe_name , str ) : raise TypeError ( 'Executable name must be a string.' ) def is_exe ( fpath ) : return os . path . isfile ( fpath ) and os . access ( fpath , os . X_OK ) fpath , fname = os . path . split ( exe_name ) if not fpath : res = any ( [ is_exe ( os . path . join ( path , exe_name ) ) for path in os . environ [ "PATH" ] . split ( os . pathsep ) ] ) else : res = is_exe ( exe_name ) if not res : raise IOError ( '{} does not appear to be a valid executable on this ' 'system.' . format ( exe_name ) )
Check if Input is Executable
188
6
25,250
def _check_operator ( self , operator ) : if not isinstance ( operator , type ( None ) ) : tree = [ obj . __name__ for obj in getmro ( operator . __class__ ) ] if not any ( [ parent in tree for parent in self . _op_parents ] ) : warn ( '{0} does not inherit an operator ' 'parent.' . format ( str ( operator . __class__ ) ) )
Check Set - Up
94
4
25,251
def _check_restart_params ( self , restart_strategy , min_beta , s_greedy , xi_restart ) : if restart_strategy is None : return True if self . mode != 'regular' : raise ValueError ( 'Restarting strategies can only be used with ' 'regular mode.' ) greedy_params_check = ( min_beta is None or s_greedy is None or s_greedy <= 1 ) if restart_strategy == 'greedy' and greedy_params_check : raise ValueError ( 'You need a min_beta and an s_greedy > 1 for ' 'greedy restart.' ) if xi_restart is None or xi_restart >= 1 : raise ValueError ( 'You need a xi_restart < 1 for restart.' ) return True
r Check restarting parameters
180
5
25,252
def is_restart ( self , z_old , x_new , x_old ) : if self . restart_strategy is None : return False criterion = np . vdot ( z_old - x_new , x_new - x_old ) >= 0 if criterion : if 'adaptive' in self . restart_strategy : self . r_lazy *= self . xi_restart if self . restart_strategy in [ 'adaptive-ii' , 'adaptive-2' ] : self . _t_now = 1 if self . restart_strategy == 'greedy' : cur_delta = np . linalg . norm ( x_new - x_old ) if self . _delta_0 is None : self . _delta_0 = self . s_greedy * cur_delta else : self . _safeguard = cur_delta >= self . _delta_0 return criterion
r Check whether the algorithm needs to restart
208
8
25,253
def update_beta ( self , beta ) : if self . _safeguard : beta *= self . xi_restart beta = max ( beta , self . min_beta ) return beta
r Update beta
42
3
25,254
def update_lambda ( self , * args , * * kwargs ) : if self . restart_strategy == 'greedy' : return 2 # Steps 3 and 4 from alg.10.7. self . _t_prev = self . _t_now if self . mode == 'regular' : self . _t_now = ( self . p_lazy + np . sqrt ( self . r_lazy * self . _t_prev ** 2 + self . q_lazy ) ) * 0.5 elif self . mode == 'CD' : self . _t_now = ( self . _n + self . a_cd - 1 ) / self . a_cd self . _n += 1 return 1 + ( self . _t_prev - 1 ) / self . _t_now
r Update lambda
178
3
25,255
def call_mr_transform ( data , opt = '' , path = './' , remove_files = True ) : # pragma: no cover if not import_astropy : raise ImportError ( 'Astropy package not found.' ) if ( not isinstance ( data , np . ndarray ) ) or ( data . ndim != 2 ) : raise ValueError ( 'Input data must be a 2D numpy array.' ) executable = 'mr_transform' # Make sure mr_transform is installed. is_executable ( executable ) # Create a unique string using the current date and time. unique_string = datetime . now ( ) . strftime ( '%Y.%m.%d_%H.%M.%S' ) # Set the ouput file names. file_name = path + 'mr_temp_' + unique_string file_fits = file_name + '.fits' file_mr = file_name + '.mr' # Write the input data to a fits file. fits . writeto ( file_fits , data ) if isinstance ( opt , str ) : opt = opt . split ( ) # Call mr_transform. try : check_call ( [ executable ] + opt + [ file_fits , file_mr ] ) except Exception : warn ( '{} failed to run with the options provided.' . format ( executable ) ) remove ( file_fits ) else : # Retrieve wavelet transformed data. result = fits . getdata ( file_mr ) # Remove the temporary files. if remove_files : remove ( file_fits ) remove ( file_mr ) # Return the mr_transform results. return result
r Call mr_transform
359
6
25,256
def get_mr_filters ( data_shape , opt = '' , coarse = False ) : # pragma: no cover # Adjust the shape of the input data. data_shape = np . array ( data_shape ) data_shape += data_shape % 2 - 1 # Create fake data. fake_data = np . zeros ( data_shape ) fake_data [ tuple ( zip ( data_shape // 2 ) ) ] = 1 # Call mr_transform. mr_filters = call_mr_transform ( fake_data , opt = opt ) # Return filters if coarse : return mr_filters else : return mr_filters [ : - 1 ]
Get mr_transform filters
147
6
25,257
def gram_schmidt ( matrix , return_opt = 'orthonormal' ) : if return_opt not in ( 'orthonormal' , 'orthogonal' , 'both' ) : raise ValueError ( 'Invalid return_opt, options are: "orthonormal", ' '"orthogonal" or "both"' ) u = [ ] e = [ ] for vector in matrix : if len ( u ) == 0 : u_now = vector else : u_now = vector - sum ( [ project ( u_i , vector ) for u_i in u ] ) u . append ( u_now ) e . append ( u_now / np . linalg . norm ( u_now , 2 ) ) u = np . array ( u ) e = np . array ( e ) if return_opt == 'orthonormal' : return e elif return_opt == 'orthogonal' : return u elif return_opt == 'both' : return u , e
r Gram - Schmit
214
5
25,258
def nuclear_norm ( data ) : # Get SVD of the data. u , s , v = np . linalg . svd ( data ) # Return nuclear norm. return np . sum ( s )
r Nuclear norm
45
3
25,259
def project ( u , v ) : return np . inner ( v , u ) / np . inner ( u , u ) * u
r Project vector
28
3
25,260
def rot_matrix ( angle ) : return np . around ( np . array ( [ [ np . cos ( angle ) , - np . sin ( angle ) ] , [ np . sin ( angle ) , np . cos ( angle ) ] ] , dtype = 'float' ) , 10 )
r Rotation matrix
63
4
25,261
def _set_initial_x ( self ) : return np . random . random ( self . _data_shape ) . astype ( self . _data_type )
Set initial value of x
36
5
25,262
def get_spec_rad ( self , tolerance = 1e-6 , max_iter = 20 , extra_factor = 1.0 ) : # Set (or reset) values of x. x_old = self . _set_initial_x ( ) # Iterate until the L2 norm of x converges. for i in range ( max_iter ) : x_old_norm = np . linalg . norm ( x_old ) x_new = self . _operator ( x_old ) / x_old_norm x_new_norm = np . linalg . norm ( x_new ) if ( np . abs ( x_new_norm - x_old_norm ) < tolerance ) : if self . _verbose : print ( ' - Power Method converged after %d iterations!' % ( i + 1 ) ) break elif i == max_iter - 1 and self . _verbose : print ( ' - Power Method did not converge after %d ' 'iterations!' % max_iter ) np . copyto ( x_old , x_new ) self . spec_rad = x_new_norm * extra_factor self . inv_spec_rad = 1.0 / self . spec_rad
Get spectral radius
266
3
25,263
def _check_type ( self , input_val ) : if not isinstance ( input_val , ( list , tuple , np . ndarray ) ) : raise TypeError ( 'Invalid input type, input must be a list, tuple ' 'or numpy array.' ) input_val = np . array ( input_val ) if not input_val . size : raise ValueError ( 'Input list is empty.' ) return input_val
Check Input Type
94
3
25,264
def find_n_pc ( u , factor = 0.5 ) : if np . sqrt ( u . shape [ 0 ] ) % 1 : raise ValueError ( 'Invalid left singular value. The size of the first ' 'dimenion of u must be perfect square.' ) # Get the shape of the array array_shape = np . repeat ( np . int ( np . sqrt ( u . shape [ 0 ] ) ) , 2 ) # Find the auto correlation of the left singular vector. u_auto = [ convolve ( a . reshape ( array_shape ) , np . rot90 ( a . reshape ( array_shape ) , 2 ) ) for a in u . T ] # Return the required number of principal components. return np . sum ( [ ( a [ tuple ( zip ( array_shape // 2 ) ) ] ** 2 <= factor * np . sum ( a ** 2 ) ) for a in u_auto ] )
Find number of principal components
201
5
25,265
def calculate_svd ( data ) : if ( not isinstance ( data , np . ndarray ) ) or ( data . ndim != 2 ) : raise TypeError ( 'Input data must be a 2D np.ndarray.' ) return svd ( data , check_finite = False , lapack_driver = 'gesvd' , full_matrices = False )
Calculate Singular Value Decomposition
82
9
25,266
def svd_thresh ( data , threshold = None , n_pc = None , thresh_type = 'hard' ) : if ( ( not isinstance ( n_pc , ( int , str , type ( None ) ) ) ) or ( isinstance ( n_pc , int ) and n_pc <= 0 ) or ( isinstance ( n_pc , str ) and n_pc != 'all' ) ) : raise ValueError ( 'Invalid value for "n_pc", specify a positive ' 'integer value or "all"' ) # Get SVD of input data. u , s , v = calculate_svd ( data ) # Find the threshold if not provided. if isinstance ( threshold , type ( None ) ) : # Find the required number of principal components if not specified. if isinstance ( n_pc , type ( None ) ) : n_pc = find_n_pc ( u , factor = 0.1 ) # If the number of PCs is too large use all of the singular values. if ( ( isinstance ( n_pc , int ) and n_pc >= s . size ) or ( isinstance ( n_pc , str ) and n_pc == 'all' ) ) : n_pc = s . size warn ( 'Using all singular values.' ) threshold = s [ n_pc - 1 ] # Threshold the singular values. s_new = thresh ( s , threshold , thresh_type ) if np . all ( s_new == s ) : warn ( 'No change to singular values.' ) # Diagonalize the svd s_new = np . diag ( s_new ) # Return the thresholded data. return np . dot ( u , np . dot ( s_new , v ) )
r Threshold the singular values
377
6
25,267
def svd_thresh_coef ( data , operator , threshold , thresh_type = 'hard' ) : if not callable ( operator ) : raise TypeError ( 'Operator must be a callable function.' ) # Get SVD of data matrix u , s , v = calculate_svd ( data ) # Diagnalise s s = np . diag ( s ) # Compute coefficients a = np . dot ( s , v ) # Get the shape of the array array_shape = np . repeat ( np . int ( np . sqrt ( u . shape [ 0 ] ) ) , 2 ) # Compute threshold matrix. ti = np . array ( [ np . linalg . norm ( x ) for x in operator ( matrix2cube ( u , array_shape ) ) ] ) threshold *= np . repeat ( ti , a . shape [ 1 ] ) . reshape ( a . shape ) # Threshold coefficients. a_new = thresh ( a , threshold , thresh_type ) # Return the thresholded image. return np . dot ( u , a_new )
Threshold the singular values coefficients
236
6
25,268
def gaussian_kernel ( data_shape , sigma , norm = 'max' ) : if not import_astropy : # pragma: no cover raise ImportError ( 'Astropy package not found.' ) if norm not in ( 'max' , 'sum' , 'none' ) : raise ValueError ( 'Invalid norm, options are "max", "sum" or "none".' ) kernel = np . array ( Gaussian2DKernel ( sigma , x_size = data_shape [ 1 ] , y_size = data_shape [ 0 ] ) ) if norm == 'max' : return kernel / np . max ( kernel ) elif norm == 'sum' : return kernel / np . sum ( kernel ) elif norm == 'none' : return kernel
r Gaussian kernel
166
4
25,269
def mad ( data ) : return np . median ( np . abs ( data - np . median ( data ) ) )
r Median absolute deviation
25
4
25,270
def psnr ( data1 , data2 , method = 'starck' , max_pix = 255 ) : if method == 'starck' : return ( 20 * np . log10 ( ( data1 . shape [ 0 ] * np . abs ( np . max ( data1 ) - np . min ( data1 ) ) ) / np . linalg . norm ( data1 - data2 ) ) ) elif method == 'wiki' : return ( 20 * np . log10 ( max_pix ) - 10 * np . log10 ( mse ( data1 , data2 ) ) ) else : raise ValueError ( 'Invalid PSNR method. Options are "starck" and ' '"wiki"' )
r Peak Signal - to - Noise Ratio
156
8
25,271
def psnr_stack ( data1 , data2 , metric = np . mean , method = 'starck' ) : if data1 . ndim != 3 or data2 . ndim != 3 : raise ValueError ( 'Input data must be a 3D np.ndarray' ) return metric ( [ psnr ( i , j , method = method ) for i , j in zip ( data1 , data2 ) ] )
r Peak Signa - to - Noise for stack of images
93
12
25,272
def cube2map ( data_cube , layout ) : if data_cube . ndim != 3 : raise ValueError ( 'The input data must have 3 dimensions.' ) if data_cube . shape [ 0 ] != np . prod ( layout ) : raise ValueError ( 'The desired layout must match the number of input ' 'data layers.' ) return np . vstack ( [ np . hstack ( data_cube [ slice ( layout [ 1 ] * i , layout [ 1 ] * ( i + 1 ) ) ] ) for i in range ( layout [ 0 ] ) ] )
r Cube to Map
123
4
25,273
def map2cube ( data_map , layout ) : if np . all ( np . array ( data_map . shape ) % np . array ( layout ) ) : raise ValueError ( 'The desired layout must be a multiple of the number ' 'pixels in the data map.' ) d_shape = np . array ( data_map . shape ) // np . array ( layout ) return np . array ( [ data_map [ ( slice ( i * d_shape [ 0 ] , ( i + 1 ) * d_shape [ 0 ] ) , slice ( j * d_shape [ 1 ] , ( j + 1 ) * d_shape [ 1 ] ) ) ] for i in range ( layout [ 0 ] ) for j in range ( layout [ 1 ] ) ] )
r Map to cube
166
4
25,274
def map2matrix ( data_map , layout ) : layout = np . array ( layout ) # Select n objects n_obj = np . prod ( layout ) # Get the shape of the images image_shape = ( np . array ( data_map . shape ) // layout ) [ 0 ] # Stack objects from map data_matrix = [ ] for i in range ( n_obj ) : lower = ( image_shape * ( i // layout [ 1 ] ) , image_shape * ( i % layout [ 1 ] ) ) upper = ( image_shape * ( i // layout [ 1 ] + 1 ) , image_shape * ( i % layout [ 1 ] + 1 ) ) data_matrix . append ( ( data_map [ lower [ 0 ] : upper [ 0 ] , lower [ 1 ] : upper [ 1 ] ] ) . reshape ( image_shape ** 2 ) ) return np . array ( data_matrix ) . T
r Map to Matrix
203
4
25,275
def matrix2map ( data_matrix , map_shape ) : map_shape = np . array ( map_shape ) # Get the shape and layout of the images image_shape = np . sqrt ( data_matrix . shape [ 0 ] ) . astype ( int ) layout = np . array ( map_shape // np . repeat ( image_shape , 2 ) , dtype = 'int' ) # Map objects from matrix data_map = np . zeros ( map_shape ) temp = data_matrix . reshape ( image_shape , image_shape , data_matrix . shape [ 1 ] ) for i in range ( data_matrix . shape [ 1 ] ) : lower = ( image_shape * ( i // layout [ 1 ] ) , image_shape * ( i % layout [ 1 ] ) ) upper = ( image_shape * ( i // layout [ 1 ] + 1 ) , image_shape * ( i % layout [ 1 ] + 1 ) ) data_map [ lower [ 0 ] : upper [ 0 ] , lower [ 1 ] : upper [ 1 ] ] = temp [ : , : , i ] return data_map . astype ( int )
r Matrix to Map
256
4
25,276
def cube2matrix ( data_cube ) : return data_cube . reshape ( [ data_cube . shape [ 0 ] ] + [ np . prod ( data_cube . shape [ 1 : ] ) ] ) . T
r Cube to Matrix
49
4
25,277
def matrix2cube ( data_matrix , im_shape ) : return data_matrix . T . reshape ( [ data_matrix . shape [ 1 ] ] + list ( im_shape ) )
r Matrix to Cube
45
4
25,278
def plotCost ( cost_list , output = None ) : if not import_fail : if isinstance ( output , type ( None ) ) : file_name = 'cost_function.png' else : file_name = output + '_cost_function.png' plt . figure ( ) plt . plot ( np . log10 ( cost_list ) , 'r-' ) plt . title ( 'Cost Function' ) plt . xlabel ( 'Iteration' ) plt . ylabel ( r'$\log_{10}$ Cost' ) plt . savefig ( file_name ) plt . close ( ) print ( ' - Saving cost function data to:' , file_name ) else : warn ( 'Matplotlib not installed.' )
Plot cost function
165
3
25,279
def Gaussian_filter ( x , sigma , norm = True ) : x = check_float ( x ) sigma = check_float ( sigma ) val = np . exp ( - 0.5 * ( x / sigma ) ** 2 ) if norm : return val / ( np . sqrt ( 2 * np . pi ) * sigma ) else : return val
r Gaussian filter
80
4
25,280
def mex_hat ( x , sigma ) : x = check_float ( x ) sigma = check_float ( sigma ) xs = ( x / sigma ) ** 2 val = 2 * ( 3 * sigma ) ** - 0.5 * np . pi ** - 0.25 return val * ( 1 - xs ) * np . exp ( - 0.5 * xs )
r Mexican hat
87
3
25,281
def mex_hat_dir ( x , y , sigma ) : x = check_float ( x ) sigma = check_float ( sigma ) return - 0.5 * ( x / sigma ) ** 2 * mex_hat ( y , sigma )
r Directional Mexican hat
59
5
25,282
def convolve ( data , kernel , method = 'scipy' ) : if data . ndim != kernel . ndim : raise ValueError ( 'Data and kernel must have the same dimensions.' ) if method not in ( 'astropy' , 'scipy' ) : raise ValueError ( 'Invalid method. Options are "astropy" or "scipy".' ) if not import_astropy : # pragma: no cover method = 'scipy' if method == 'astropy' : return convolve_fft ( data , kernel , boundary = 'wrap' , crop = False , nan_treatment = 'fill' , normalize_kernel = False ) elif method == 'scipy' : return scipy . signal . fftconvolve ( data , kernel , mode = 'same' )
r Convolve data with kernel
178
6
25,283
def convolve_stack ( data , kernel , rot_kernel = False , method = 'scipy' ) : if rot_kernel : kernel = rotate_stack ( kernel ) return np . array ( [ convolve ( data_i , kernel_i , method = method ) for data_i , kernel_i in zip ( data , kernel ) ] )
r Convolve stack of data with stack of kernels
76
10
25,284
def check_callable ( val , add_agrs = True ) : if not callable ( val ) : raise TypeError ( 'The input object must be a callable function.' ) if add_agrs : val = add_args_kwargs ( val ) return val
r Check input object is callable
59
7
25,285
def check_float ( val ) : if not isinstance ( val , ( int , float , list , tuple , np . ndarray ) ) : raise TypeError ( 'Invalid input type.' ) if isinstance ( val , int ) : val = float ( val ) elif isinstance ( val , ( list , tuple ) ) : val = np . array ( val , dtype = float ) elif isinstance ( val , np . ndarray ) and ( not np . issubdtype ( val . dtype , np . floating ) ) : val = val . astype ( float ) return val
r Check if input value is a float or a np . ndarray of floats if not convert .
129
21
25,286
def check_int ( val ) : if not isinstance ( val , ( int , float , list , tuple , np . ndarray ) ) : raise TypeError ( 'Invalid input type.' ) if isinstance ( val , float ) : val = int ( val ) elif isinstance ( val , ( list , tuple ) ) : val = np . array ( val , dtype = int ) elif isinstance ( val , np . ndarray ) and ( not np . issubdtype ( val . dtype , np . integer ) ) : val = val . astype ( int ) return val
r Check if input value is an int or a np . ndarray of ints if not convert .
129
22
25,287
def check_npndarray ( val , dtype = None , writeable = True , verbose = True ) : if not isinstance ( val , np . ndarray ) : raise TypeError ( 'Input is not a numpy array.' ) if ( ( not isinstance ( dtype , type ( None ) ) ) and ( not np . issubdtype ( val . dtype , dtype ) ) ) : raise TypeError ( 'The numpy array elements are not of type: {}' '' . format ( dtype ) ) if not writeable and verbose and val . flags . writeable : warn ( 'Making input data immutable.' ) val . flags . writeable = writeable
Check if input object is a numpy array .
148
10
25,288
def positive ( data ) : if not isinstance ( data , ( int , float , list , tuple , np . ndarray ) ) : raise TypeError ( 'Invalid data type, input must be `int`, `float`, ' '`list`, `tuple` or `np.ndarray`.' ) def pos_thresh ( data ) : return data * ( data > 0 ) def pos_recursive ( data ) : data = np . array ( data ) if not data . dtype == 'O' : result = list ( pos_thresh ( data ) ) else : result = [ pos_recursive ( x ) for x in data ] return result if isinstance ( data , ( int , float ) ) : return pos_thresh ( data ) else : return np . array ( pos_recursive ( data ) )
r Positivity operator
178
5
25,289
def mean ( self ) : return np . dot ( np . array ( self . norm_scores ) , self . weights )
Compute a total score for each model over all the tests .
27
13
25,290
def T ( self ) : return ScoreMatrix ( self . tests , self . models , scores = self . values , weights = self . weights , transpose = True )
Get transpose of this ScoreMatrix .
35
8
25,291
def to_html ( self , show_mean = None , sortable = None , colorize = True , * args , * * kwargs ) : if show_mean is None : show_mean = self . show_mean if sortable is None : sortable = self . sortable df = self . copy ( ) if show_mean : df . insert ( 0 , 'Mean' , None ) df . loc [ : , 'Mean' ] = [ '%.3f' % self [ m ] . mean ( ) for m in self . models ] html = df . to_html ( * args , * * kwargs ) # Pandas method html , table_id = self . annotate ( df , html , show_mean , colorize ) if sortable : self . dynamify ( table_id ) return html
Extend Pandas built in to_html method for rendering a DataFrame and use it to render a ScoreMatrix .
179
24
25,292
def rec_apply ( func , n ) : if n > 1 : rec_func = rec_apply ( func , n - 1 ) return lambda x : func ( rec_func ( x ) ) return func
Used to determine parent directory n levels up by repeatedly applying os . path . dirname
44
17
25,293
def printd ( * args , * * kwargs ) : global settings if settings [ 'PRINT_DEBUG_STATE' ] : print ( * args , * * kwargs ) return True return False
Print if PRINT_DEBUG_STATE is True
44
10
25,294
def assert_dimensionless ( value ) : if isinstance ( value , Quantity ) : value = value . simplified if value . dimensionality == Dimensionality ( { } ) : value = value . base . item ( ) else : raise TypeError ( "Score value %s must be dimensionless" % value ) return value
Tests for dimensionlessness of input . If input is dimensionless but expressed as a Quantity it returns the bare value . If it not it raised an error .
66
32
25,295
def import_all_modules ( package , skip = None , verbose = False , prefix = "" , depth = 0 ) : skip = [ ] if skip is None else skip for ff , modname , ispkg in pkgutil . walk_packages ( path = package . __path__ , prefix = prefix , onerror = lambda x : None ) : if ff . path not in package . __path__ [ 0 ] : # Solves weird bug continue if verbose : print ( '\t' * depth , modname ) if modname in skip : if verbose : print ( '\t' * depth , '*Skipping*' ) continue module = '%s.%s' % ( package . __name__ , modname ) subpackage = importlib . import_module ( module ) if ispkg : import_all_modules ( subpackage , skip = skip , verbose = verbose , depth = depth + 1 )
Recursively imports all subpackages modules and submodules of a given package . package should be an imported package not a string . skip is a list of modules or subpackages not to import .
201
39
25,296
def method_cache ( by = 'value' , method = 'run' ) : def decorate_ ( func ) : def decorate ( * args , * * kwargs ) : model = args [ 0 ] # Assumed to be self. assert hasattr ( model , method ) , "Model must have a '%s' method." % method if func . __name__ == method : # Run itself. method_args = kwargs else : # Any other method. method_args = kwargs [ method ] if method in kwargs else { } if not hasattr ( model . __class__ , 'cached_runs' ) : # If there is no run cache. model . __class__ . cached_runs = { } # Create the method cache. cache = model . __class__ . cached_runs if by == 'value' : model_dict = { key : value for key , value in list ( model . __dict__ . items ( ) ) if key [ 0 ] != '_' } method_signature = SciUnit . dict_hash ( { 'attrs' : model_dict , 'args' : method_args } ) # Hash key. elif by == 'instance' : method_signature = SciUnit . dict_hash ( { 'id' : id ( model ) , 'args' : method_args } ) # Hash key. else : raise ValueError ( "Cache type must be 'value' or 'instance'" ) if method_signature not in cache : print ( "Method with this signature not found in the cache. Running..." ) f = getattr ( model , method ) f ( * * method_args ) cache [ method_signature ] = ( datetime . now ( ) , model . __dict__ . copy ( ) ) else : print ( "Method with this signature found in the cache. Restoring..." ) _ , attrs = cache [ method_signature ] model . __dict__ . update ( attrs ) return func ( * args , * * kwargs ) return decorate return decorate_
A decorator used on any model method which calls the model s method method if that latter method has not been called using the current arguments or simply sets model attributes to match the run results if it has .
444
41
25,297
def convert_path ( cls , file ) : if isinstance ( file , str ) : return file elif isinstance ( file , list ) and all ( [ isinstance ( x , str ) for x in file ] ) : return "/" . join ( file ) else : print ( "Incorrect path specified" ) return - 1
Check to see if an extended path is given and convert appropriately
71
12
25,298
def get_path ( self , file ) : class_path = inspect . getfile ( self . __class__ ) parent_path = os . path . dirname ( class_path ) path = os . path . join ( parent_path , self . path , file ) return os . path . realpath ( path )
Get the full path of the notebook found in the directory specified by self . path .
68
17
25,299
def fix_display ( self ) : try : tkinter . Tk ( ) except ( tkinter . TclError , NameError ) : # If there is no display. try : import matplotlib as mpl except ImportError : pass else : print ( "Setting matplotlib backend to Agg" ) mpl . use ( 'Agg' )
If this is being run on a headless system the Matplotlib backend must be changed to one that doesn t need a display .
76
27