idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
231,500
def _value_function ( self , x_input , y_true , y_pred ) : if len ( y_true . shape ) == 1 : return y_pred . argmax ( 1 ) . eq ( y_true ) . double ( ) . mean ( ) . item ( ) else : raise NotImplementedError
Return classification accuracy of input
70
5
231,501
def on_epoch_end ( self , epoch_info ) : metrics_df = pd . DataFrame ( [ epoch_info . result ] ) . set_index ( 'epoch_idx' ) visdom_append_metrics ( self . vis , metrics_df , first_epoch = epoch_info . global_epoch_idx == 1 )
Update data in visdom on push
81
7
231,502
def on_batch_end ( self , batch_info ) : if self . settings . stream_lr : iteration_idx = ( float ( batch_info . epoch_number ) + float ( batch_info . batch_number ) / batch_info . batches_per_epoch ) lr = batch_info . optimizer . param_groups [ - 1 ] [ 'lr' ] metrics_df = pd . DataFrame ( [ lr ] , index = [ iteration_idx ] , columns = [ 'lr' ] ) visdom_append_metrics ( self . vis , metrics_df , first_epoch = ( batch_info . epoch_number == 1 ) and ( batch_info . batch_number == 0 ) )
Stream LR to visdom
161
5
231,503
def main ( ) : parser = argparse . ArgumentParser ( description = 'Paperboy deep learning launcher' ) parser . add_argument ( 'config' , metavar = 'FILENAME' , help = 'Configuration file for the run' ) parser . add_argument ( 'command' , metavar = 'COMMAND' , help = 'A command to run' ) parser . add_argument ( 'varargs' , nargs = '*' , metavar = 'VARARGS' , help = 'Extra options to the command' ) parser . add_argument ( '-r' , '--run_number' , type = int , default = 0 , help = "A run number" ) parser . add_argument ( '-d' , '--device' , default = 'cuda' , help = "A device to run the model on" ) parser . add_argument ( '-s' , '--seed' , type = int , default = None , help = "Random seed for the project" ) parser . add_argument ( '-p' , '--param' , type = str , metavar = 'NAME=VALUE' , action = 'append' , default = [ ] , help = "Configuration parameters" ) parser . add_argument ( '--continue' , action = 'store_true' , default = False , help = "Continue previously started learning process" ) parser . add_argument ( '--profile' , type = str , default = None , help = "Profiler output" ) args = parser . parse_args ( ) model_config = ModelConfig . from_file ( args . config , args . run_number , continue_training = getattr ( args , 'continue' ) , device = args . device , seed = args . seed , params = { k : v for ( k , v ) in ( Parser . parse_equality ( eq ) for eq in args . param ) } ) if model_config . project_dir not in sys . path : sys . path . append ( model_config . project_dir ) multiprocessing_setting = model_config . provide_with_default ( 'multiprocessing' , default = None ) if multiprocessing_setting : # This needs to be called before any of PyTorch module is imported multiprocessing . set_start_method ( multiprocessing_setting ) # Set seed already in the launcher from vel . util . random import set_seed set_seed ( model_config . seed ) model_config . banner ( args . command ) if args . profile : print ( "[PROFILER] Running Vel in profiling mode, output filename={}" . format ( args . profile ) ) import cProfile import pstats profiler = cProfile . Profile ( ) profiler . enable ( ) model_config . run_command ( args . command , args . varargs ) profiler . disable ( ) profiler . dump_stats ( args . profile ) profiler . print_stats ( sort = 'tottime' ) print ( "======================================================================" ) pstats . Stats ( profiler ) . strip_dirs ( ) . sort_stats ( 'tottime' ) . print_stats ( 30 ) print ( "======================================================================" ) pstats . Stats ( profiler ) . strip_dirs ( ) . sort_stats ( 'cumtime' ) . print_stats ( 30 ) else : model_config . run_command ( args . command , args . varargs ) model_config . quit_banner ( )
Paperboy entry point - parse the arguments and run a command
764
12
231,504
def set_seed ( seed : int ) : random . seed ( seed ) np . random . seed ( seed ) torch . random . manual_seed ( seed )
Set random seed for python numpy and pytorch RNGs
34
14
231,505
def better ( old_value , new_value , mode ) : if ( old_value is None or np . isnan ( old_value ) ) and ( new_value is not None and not np . isnan ( new_value ) ) : return True if mode == 'min' : return new_value < old_value elif mode == 'max' : return new_value > old_value else : raise RuntimeError ( f"Mode '{mode}' value is not supported" )
Check if new value is better than the old value
106
10
231,506
def reset_weights ( self ) : init . uniform_ ( self . linear . weight , - 3e-3 , 3e-3 ) init . zeros_ ( self . linear . bias )
Initialize weights to sane defaults
42
6
231,507
def discount_bootstrap ( rewards_buffer , dones_buffer , final_values , discount_factor , number_of_steps ) : true_value_buffer = torch . zeros_like ( rewards_buffer ) # discount/bootstrap off value fn current_value = final_values for i in reversed ( range ( number_of_steps ) ) : current_value = rewards_buffer [ i ] + discount_factor * current_value * ( 1.0 - dones_buffer [ i ] ) true_value_buffer [ i ] = current_value return true_value_buffer
Calculate state values bootstrapping off the following state values
127
13
231,508
def find_project_directory ( start_path ) -> str : start_path = os . path . realpath ( start_path ) possible_name = os . path . join ( start_path , ModelConfig . PROJECT_FILE_NAME ) if os . path . exists ( possible_name ) : return start_path else : up_path = os . path . realpath ( os . path . join ( start_path , '..' ) ) if os . path . realpath ( start_path ) == up_path : raise RuntimeError ( f"Couldn't find project file starting from {start_path}" ) else : return ModelConfig . find_project_directory ( up_path )
Locate top - level project directory
150
7
231,509
def from_file ( cls , filename : str , run_number : int , continue_training : bool = False , seed : int = None , device : str = 'cuda' , params = None ) : with open ( filename , 'r' ) as fp : model_config_contents = Parser . parse ( fp ) project_config_path = ModelConfig . find_project_directory ( os . path . dirname ( os . path . abspath ( filename ) ) ) with open ( os . path . join ( project_config_path , cls . PROJECT_FILE_NAME ) , 'r' ) as fp : project_config_contents = Parser . parse ( fp ) aggregate_dictionary = { * * project_config_contents , * * model_config_contents } return ModelConfig ( filename = filename , configuration = aggregate_dictionary , run_number = run_number , project_dir = project_config_path , continue_training = continue_training , seed = seed , device = device , parameters = params )
Create model config from file
233
5
231,510
def from_memory ( cls , model_data : dict , run_number : int , project_dir : str , continue_training = False , seed : int = None , device : str = 'cuda' , params = None ) : return ModelConfig ( filename = "[memory]" , configuration = model_data , run_number = run_number , project_dir = project_dir , continue_training = continue_training , seed = seed , device = device , parameters = params )
Create model config from supplied data
104
6
231,511
def run_command ( self , command_name , varargs ) : command_descriptor = self . get_command ( command_name ) return command_descriptor . run ( * varargs )
Instantiate model class
44
4
231,512
def project_data_dir ( self , * args ) -> str : return os . path . normpath ( os . path . join ( self . project_dir , 'data' , * args ) )
Directory where to store data
43
5
231,513
def output_dir ( self , * args ) -> str : return os . path . join ( self . project_dir , 'output' , * args )
Directory where to store output
33
5
231,514
def project_top_dir ( self , * args ) -> str : return os . path . join ( self . project_dir , * args )
Project top - level directory
31
5
231,515
def provide_with_default ( self , name , default = None ) : return self . provider . instantiate_by_name_with_default ( name , default_value = default )
Return a dependency - injected instance
40
6
231,516
def benchmark_method ( f ) : @ wraps ( f ) def inner ( name , * args , * * kwargs ) : return Benchmark ( name , f , args , kwargs ) return inner
decorator to turn f into a factory of benchmarks
44
11
231,517
def bench ( participants = participants , benchmarks = benchmarks , bench_time = BENCH_TIME ) : mcs = [ p . factory ( ) for p in participants ] means = [ [ ] for p in participants ] stddevs = [ [ ] for p in participants ] # Have each lifter do one benchmark each last_fn = None for benchmark_name , fn , args , kwargs in benchmarks : logger . info ( '' ) logger . info ( '%s' , benchmark_name ) for i , ( participant , mc ) in enumerate ( zip ( participants , mcs ) ) : # FIXME: set before bench for get if 'get' in fn . __name__ : last_fn ( mc , * args , * * kwargs ) sw = Stopwatch ( ) while sw . total ( ) < bench_time : with sw . timing ( ) : fn ( mc , * args , * * kwargs ) means [ i ] . append ( sw . mean ( ) ) stddevs [ i ] . append ( sw . stddev ( ) ) logger . info ( u'%76s: %s' , participant . name , sw ) last_fn = fn return means , stddevs
Do you even lift?
262
5
231,518
def strip_datetime ( value ) : if isinstance ( value , basestring ) : try : return parse_datetime ( value ) except ValueError : return elif isinstance ( value , integer_types ) : try : return datetime . datetime . utcfromtimestamp ( value / 1e3 ) except ( ValueError , OverflowError , OSError ) : return
Converts value to datetime if string or int .
84
11
231,519
def set_session_token ( self , session_token ) : self . session_token = session_token self . _login_time = datetime . datetime . now ( )
Sets session token and new login time .
39
9
231,520
def get_password ( self ) : if self . password is None : if os . environ . get ( self . username + 'password' ) : self . password = os . environ . get ( self . username + 'password' ) else : raise PasswordError ( self . username )
If password is not provided will look in environment variables for username + password .
61
15
231,521
def get_app_key ( self ) : if self . app_key is None : if os . environ . get ( self . username ) : self . app_key = os . environ . get ( self . username ) else : raise AppKeyError ( self . username )
If app_key is not provided will look in environment variables for username .
60
15
231,522
def session_expired ( self ) : if not self . _login_time or ( datetime . datetime . now ( ) - self . _login_time ) . total_seconds ( ) > 12000 : return True
Returns True if login_time not set or seconds since login time is greater than 200 mins .
48
19
231,523
def check_status_code ( response , codes = None ) : codes = codes or [ 200 ] if response . status_code not in codes : raise StatusCodeError ( response . status_code )
Checks response . status_code is in codes .
42
11
231,524
def list_runner_book ( self , market_id , selection_id , handicap = None , price_projection = None , order_projection = None , match_projection = None , include_overall_position = None , partition_matched_by_strategy_ref = None , customer_strategy_refs = None , currency_code = None , matched_since = None , bet_ids = None , locale = None , session = None , lightweight = None ) : params = clean_locals ( locals ( ) ) method = '%s%s' % ( self . URI , 'listRunnerBook' ) ( response , elapsed_time ) = self . request ( method , params , session ) return self . process_response ( response , resources . MarketBook , elapsed_time , lightweight )
Returns a list of dynamic data about a market and a specified runner . Dynamic data includes prices the status of the market the status of selections the traded volume and the status of any orders you have placed in the market
175
42
231,525
def list_current_orders ( self , bet_ids = None , market_ids = None , order_projection = None , customer_order_refs = None , customer_strategy_refs = None , date_range = time_range ( ) , order_by = None , sort_dir = None , from_record = None , record_count = None , session = None , lightweight = None ) : params = clean_locals ( locals ( ) ) method = '%s%s' % ( self . URI , 'listCurrentOrders' ) ( response , elapsed_time ) = self . request ( method , params , session ) return self . process_response ( response , resources . CurrentOrders , elapsed_time , lightweight )
Returns a list of your current orders .
161
8
231,526
def list_cleared_orders ( self , bet_status = 'SETTLED' , event_type_ids = None , event_ids = None , market_ids = None , runner_ids = None , bet_ids = None , customer_order_refs = None , customer_strategy_refs = None , side = None , settled_date_range = time_range ( ) , group_by = None , include_item_description = None , locale = None , from_record = None , record_count = None , session = None , lightweight = None ) : params = clean_locals ( locals ( ) ) method = '%s%s' % ( self . URI , 'listClearedOrders' ) ( response , elapsed_time ) = self . request ( method , params , session ) return self . process_response ( response , resources . ClearedOrders , elapsed_time , lightweight )
Returns a list of settled bets based on the bet status ordered by settled date .
199
16
231,527
def list_market_profit_and_loss ( self , market_ids , include_settled_bets = None , include_bsp_bets = None , net_of_commission = None , session = None , lightweight = None ) : params = clean_locals ( locals ( ) ) method = '%s%s' % ( self . URI , 'listMarketProfitAndLoss' ) ( response , elapsed_time ) = self . request ( method , params , session ) return self . process_response ( response , resources . MarketProfitLoss , elapsed_time , lightweight )
Retrieve profit and loss for a given list of OPEN markets .
130
13
231,528
def place_orders ( self , market_id , instructions , customer_ref = None , market_version = None , customer_strategy_ref = None , async_ = None , session = None , lightweight = None ) : params = clean_locals ( locals ( ) ) method = '%s%s' % ( self . URI , 'placeOrders' ) ( response , elapsed_time ) = self . request ( method , params , session ) return self . process_response ( response , resources . PlaceOrders , elapsed_time , lightweight )
Place new orders into market .
118
6
231,529
def serialise ( self ) : return { 'marketId' : self . market_id , 'totalAvailable' : None , 'isMarketDataDelayed' : None , 'lastMatchTime' : None , 'betDelay' : self . market_definition . get ( 'betDelay' ) , 'version' : self . market_definition . get ( 'version' ) , 'complete' : self . market_definition . get ( 'complete' ) , 'runnersVoidable' : self . market_definition . get ( 'runnersVoidable' ) , 'totalMatched' : self . total_matched , 'status' : self . market_definition . get ( 'status' ) , 'bspReconciled' : self . market_definition . get ( 'bspReconciled' ) , 'crossMatching' : self . market_definition . get ( 'crossMatching' ) , 'inplay' : self . market_definition . get ( 'inPlay' ) , 'numberOfWinners' : self . market_definition . get ( 'numberOfWinners' ) , 'numberOfRunners' : len ( self . market_definition . get ( 'runners' ) ) , 'numberOfActiveRunners' : self . market_definition . get ( 'numberOfActiveRunners' ) , 'runners' : [ runner . serialise ( self . market_definition_runner_dict [ ( runner . selection_id , runner . handicap ) ] ) for runner in self . runners ] , 'publishTime' : self . publish_time , 'priceLadderDefinition' : self . market_definition . get ( 'priceLadderDefinition' ) , 'keyLineDescription' : self . market_definition . get ( 'keyLineDefinition' ) , 'marketDefinition' : self . market_definition , # used in lightweight }
Creates standard market book json response will error if EX_MARKET_DEF not incl .
402
20
231,530
def list_race_details ( self , meeting_ids = None , race_ids = None , session = None , lightweight = None ) : params = clean_locals ( locals ( ) ) method = '%s%s' % ( self . URI , 'listRaceDetails' ) ( response , elapsed_time ) = self . request ( method , params , session ) return self . process_response ( response , resources . RaceDetails , elapsed_time , lightweight )
Search for races to get their details .
99
8
231,531
def list_available_events ( self , event_ids = None , event_type_ids = None , event_status = None , session = None , lightweight = None ) : params = clean_locals ( locals ( ) ) method = '%s%s' % ( self . URI , 'listAvailableEvents' ) ( response , elapsed_time ) = self . request ( method , params , session ) return self . process_response ( response , resources . AvailableEvent , elapsed_time , lightweight )
Search for events that have live score data available .
107
10
231,532
def list_scores ( self , update_keys , session = None , lightweight = None ) : params = clean_locals ( locals ( ) ) method = '%s%s' % ( self . URI , 'listScores' ) ( response , elapsed_time ) = self . request ( method , params , session ) return self . process_response ( response , resources . Score , elapsed_time , lightweight )
Returns a list of current scores for the given events .
89
11
231,533
def list_incidents ( self , update_keys , session = None , lightweight = None ) : params = clean_locals ( locals ( ) ) method = '%s%s' % ( self . URI , 'listIncidents' ) ( response , elapsed_time ) = self . request ( method , params , session ) return self . process_response ( response , resources . Incidents , elapsed_time , lightweight )
Returns a list of incidents for the given events .
90
10
231,534
def get_event_timeline ( self , event_id , session = None , lightweight = None ) : url = '%s%s' % ( self . url , 'eventTimeline' ) params = { 'eventId' : event_id , 'alt' : 'json' , 'regionCode' : 'UK' , 'locale' : 'en_GB' } ( response , elapsed_time ) = self . request ( params = params , session = session , url = url ) return self . process_response ( response , resources . EventTimeline , elapsed_time , lightweight )
Returns event timeline for event id provided .
128
8
231,535
def get_event_timelines ( self , event_ids , session = None , lightweight = None ) : url = '%s%s' % ( self . url , 'eventTimelines' ) params = { 'eventIds' : ',' . join ( str ( x ) for x in event_ids ) , 'alt' : 'json' , 'regionCode' : 'UK' , 'locale' : 'en_GB' } ( response , elapsed_time ) = self . request ( params = params , session = session , url = url ) return self . process_response ( response , resources . EventTimeline , elapsed_time , lightweight )
Returns a list of event timelines based on event id s supplied .
142
13
231,536
def get_scores ( self , event_ids , session = None , lightweight = None ) : url = '%s%s' % ( self . url , 'scores' ) params = { 'eventIds' : ',' . join ( str ( x ) for x in event_ids ) , 'alt' : 'json' , 'regionCode' : 'UK' , 'locale' : 'en_GB' } ( response , elapsed_time ) = self . request ( params = params , session = session , url = url ) return self . process_response ( response , resources . Scores , elapsed_time , lightweight )
Returns a list of scores based on event id s supplied .
137
12
231,537
def create_stream ( self , unique_id = 0 , listener = None , timeout = 11 , buffer_size = 1024 , description = 'BetfairSocket' , host = None ) : listener = listener if listener else BaseListener ( ) return BetfairStream ( unique_id , listener , app_key = self . client . app_key , session_token = self . client . session_token , timeout = timeout , buffer_size = buffer_size , description = description , host = host , )
Creates BetfairStream .
106
6
231,538
def get_my_data ( self , session = None ) : params = clean_locals ( locals ( ) ) method = 'GetMyData' ( response , elapsed_time ) = self . request ( method , params , session ) return response
Returns a list of data descriptions for data which has been purchased by the signed in user .
52
18
231,539
def get_data_size ( self , sport , plan , from_day , from_month , from_year , to_day , to_month , to_year , event_id = None , event_name = None , market_types_collection = None , countries_collection = None , file_type_collection = None , session = None ) : params = clean_locals ( locals ( ) ) method = 'GetAdvBasketDataSize' ( response , elapsed_time ) = self . request ( method , params , session ) return response
Returns a dictionary of file count and combines size files .
117
11
231,540
def login ( self , session = None ) : session = session or self . client . session try : response = session . get ( self . login_url ) except ConnectionError : raise APIError ( None , self . login_url , None , 'ConnectionError' ) except Exception as e : raise APIError ( None , self . login_url , None , e ) app_key = re . findall ( r'''"appKey":\s"(.*?)"''' , response . text ) if app_key : self . app_key = app_key [ 0 ] else : raise RaceCardError ( "Unable to find appKey" )
Parses app key from betfair exchange site .
138
11
231,541
def get_race_card ( self , market_ids , data_entries = None , session = None , lightweight = None ) : if not self . app_key : raise RaceCardError ( "You need to login before requesting a race_card\n" "APIClient.race_card.login()" ) params = self . create_race_card_req ( market_ids , data_entries ) ( response , elapsed_time ) = self . request ( params = params , session = session ) return self . process_response ( response , resources . RaceCard , elapsed_time , lightweight )
Returns a list of race cards based on market ids provided .
131
13
231,542
def on_data ( self , raw_data ) : try : data = json . loads ( raw_data ) except ValueError : logger . error ( 'value error: %s' % raw_data ) return unique_id = data . get ( 'id' ) if self . _error_handler ( data , unique_id ) : return False operation = data [ 'op' ] if operation == 'connection' : self . _on_connection ( data , unique_id ) elif operation == 'status' : self . _on_status ( data , unique_id ) elif operation in [ 'mcm' , 'ocm' ] : # historic data does not contain unique_id if self . stream_unique_id not in [ unique_id , 'HISTORICAL' ] : logger . warning ( 'Unwanted data received from uniqueId: %s, expecting: %s' % ( unique_id , self . stream_unique_id ) ) return self . _on_change_message ( data , unique_id )
Called when raw data is received from connection . Override this method if you wish to manually handle the stream data
224
23
231,543
def _on_connection ( self , data , unique_id ) : if unique_id is None : unique_id = self . stream_unique_id self . connection_id = data . get ( 'connectionId' ) logger . info ( '[Connect: %s]: connection_id: %s' % ( unique_id , self . connection_id ) )
Called on collection operation
78
5
231,544
def _on_status ( data , unique_id ) : status_code = data . get ( 'statusCode' ) logger . info ( '[Subscription: %s]: %s' % ( unique_id , status_code ) )
Called on status operation
51
5
231,545
def _error_handler ( data , unique_id ) : if data . get ( 'statusCode' ) == 'FAILURE' : logger . error ( '[Subscription: %s] %s: %s' % ( unique_id , data . get ( 'errorCode' ) , data . get ( 'errorMessage' ) ) ) if data . get ( 'connectionClosed' ) : return True if data . get ( 'status' ) : # Clients shouldn't disconnect if status 503 is returned; when the stream # recovers updates will be sent containing the latest data logger . warning ( '[Subscription: %s] status: %s' % ( unique_id , data [ 'status' ] ) )
Called when data first received
153
6
231,546
def stop ( self ) : self . _running = False if self . _socket is None : return try : self . _socket . shutdown ( socket . SHUT_RDWR ) self . _socket . close ( ) except socket . error : pass self . _socket = None
Stops read loop and closes socket if it has been created .
58
13
231,547
def authenticate ( self ) : unique_id = self . new_unique_id ( ) message = { 'op' : 'authentication' , 'id' : unique_id , 'appKey' : self . app_key , 'session' : self . session_token , } self . _send ( message ) return unique_id
Authentication request .
73
4
231,548
def heartbeat ( self ) : unique_id = self . new_unique_id ( ) message = { 'op' : 'heartbeat' , 'id' : unique_id , } self . _send ( message ) return unique_id
Heartbeat request to keep session alive .
51
8
231,549
def subscribe_to_markets ( self , market_filter , market_data_filter , initial_clk = None , clk = None , conflate_ms = None , heartbeat_ms = None , segmentation_enabled = True ) : unique_id = self . new_unique_id ( ) message = { 'op' : 'marketSubscription' , 'id' : unique_id , 'marketFilter' : market_filter , 'marketDataFilter' : market_data_filter , 'initialClk' : initial_clk , 'clk' : clk , 'conflateMs' : conflate_ms , 'heartbeatMs' : heartbeat_ms , 'segmentationEnabled' : segmentation_enabled , } if initial_clk and clk : # if resubscribe only update unique_id self . listener . stream_unique_id = unique_id else : self . listener . register_stream ( unique_id , 'marketSubscription' ) self . _send ( message ) return unique_id
Market subscription request .
224
4
231,550
def subscribe_to_orders ( self , order_filter = None , initial_clk = None , clk = None , conflate_ms = None , heartbeat_ms = None , segmentation_enabled = True ) : unique_id = self . new_unique_id ( ) message = { 'op' : 'orderSubscription' , 'id' : unique_id , 'orderFilter' : order_filter , 'initialClk' : initial_clk , 'clk' : clk , 'conflateMs' : conflate_ms , 'heartbeatMs' : heartbeat_ms , 'segmentationEnabled' : segmentation_enabled , } if initial_clk and clk : # if resubscribe only update unique_id self . listener . stream_unique_id = unique_id else : self . listener . register_stream ( unique_id , 'orderSubscription' ) self . _send ( message ) return unique_id
Order subscription request .
208
4
231,551
def _create_socket ( self ) : s = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) s = ssl . wrap_socket ( s ) s . connect ( ( self . host , self . __port ) ) s . settimeout ( self . timeout ) return s
Creates ssl socket connects to stream api and sets timeout .
67
13
231,552
def _read_loop ( self ) : while self . _running : received_data_raw = self . _receive_all ( ) if self . _running : self . receive_count += 1 self . datetime_last_received = datetime . datetime . utcnow ( ) received_data_split = received_data_raw . split ( self . __CRLF ) for received_data in received_data_split : if received_data : self . _data ( received_data )
Read loop splits by CRLF and pushes received data to _data .
109
15
231,553
def _receive_all ( self ) : ( data , part ) = ( '' , '' ) if is_py3 : crlf_bytes = bytes ( self . __CRLF , encoding = self . __encoding ) else : crlf_bytes = self . __CRLF while self . _running and part [ - 2 : ] != crlf_bytes : try : part = self . _socket . recv ( self . buffer_size ) except ( socket . timeout , socket . error ) as e : if self . _running : self . stop ( ) raise SocketError ( '[Connect: %s]: Socket %s' % ( self . _unique_id , e ) ) else : return # 133, prevents error if stop is called mid recv # an empty string indicates the server shutdown the socket if len ( part ) == 0 : self . stop ( ) raise SocketError ( 'Connection closed by server' ) data += part . decode ( self . __encoding ) return data
Whilst socket is running receives data from socket till CRLF is detected .
211
16
231,554
def _data ( self , received_data ) : if self . listener . on_data ( received_data ) is False : self . stop ( ) raise ListenerError ( self . listener . connection_id , received_data )
Sends data to listener if False is returned ; socket is closed .
49
14
231,555
def _send ( self , message ) : if not self . _running : self . _connect ( ) self . authenticate ( ) message_dumped = json . dumps ( message ) + self . __CRLF try : self . _socket . send ( message_dumped . encode ( ) ) except ( socket . timeout , socket . error ) as e : self . stop ( ) raise SocketError ( '[Connect: %s]: Socket %s' % ( self . _unique_id , e ) )
If not running connects socket and authenticates . Adds CRLF and sends message to Betfair .
108
20
231,556
def fit_transform ( self , X , y = None , * * fit_params ) : Xt , yt , fit_params = self . _fit ( X , y , * * fit_params ) if isinstance ( self . _final_estimator , XyTransformerMixin ) : Xt , yt , _ = self . _final_estimator . fit_transform ( Xt , yt ) else : if hasattr ( self . _final_estimator , 'fit_transform' ) : Xt = self . _final_estimator . fit_transform ( Xt , yt ) else : self . _final_estimator . fit ( Xt , yt ) Xt = self . _final_estimator . transform ( Xt ) self . N_fit = len ( yt ) return Xt , yt
Fit the model and transform with the final estimator Fits all the transforms one after the other and transforms the data then uses fit_transform on transformed data with the final estimator .
190
37
231,557
def predict ( self , X ) : Xt , _ , _ = self . _transform ( X ) return self . _final_estimator . predict ( Xt )
Apply transforms to the data and predict with the final estimator
37
12
231,558
def transform_predict ( self , X , y ) : Xt , yt , _ = self . _transform ( X , y ) yp = self . _final_estimator . predict ( Xt ) return yt , yp
Apply transforms to the data and predict with the final estimator . Unlike predict this also returns the transformed target
53
21
231,559
def score ( self , X , y = None , sample_weight = None ) : Xt , yt , swt = self . _transform ( X , y , sample_weight ) self . N_test = len ( yt ) score_params = { } if swt is not None : score_params [ 'sample_weight' ] = swt if self . scorer is None : return self . _final_estimator . score ( Xt , yt , * * score_params ) return self . scorer ( self . _final_estimator , Xt , yt , * * score_params )
Apply transforms and score with the final estimator
134
9
231,560
def predict_proba ( self , X ) : Xt , _ , _ = self . _transform ( X ) return self . _final_estimator . predict_proba ( Xt )
Apply transforms and predict_proba of the final estimator
43
12
231,561
def decision_function ( self , X ) : Xt , _ , _ = self . _transform ( X ) return self . _final_estimator . decision_function ( Xt )
Apply transforms and decision_function of the final estimator
41
11
231,562
def predict_log_proba ( self , X ) : Xt , _ , _ = self . _transform ( X ) return self . _final_estimator . predict_log_proba ( Xt )
Apply transforms and predict_log_proba of the final estimator
47
14
231,563
def base_features ( ) : features = { 'mean' : mean , 'median' : median , 'abs_energy' : abs_energy , 'std' : std , 'var' : var , 'min' : minimum , 'max' : maximum , 'skew' : skew , 'kurt' : kurt , 'mse' : mse , 'mnx' : mean_crossings } return features
Returns dictionary of some basic features that can be calculated for segmented time series data
93
16
231,564
def all_features ( ) : features = { 'mean' : mean , 'median' : median , 'gmean' : gmean , 'hmean' : hmean , 'vec_sum' : vec_sum , 'abs_sum' : abs_sum , 'abs_energy' : abs_energy , 'std' : std , 'var' : var , 'variation' : variation , 'min' : minimum , 'max' : maximum , 'skew' : skew , 'kurt' : kurt , 'mean_diff' : mean_diff , 'mean_abs_diff' : means_abs_diff , 'mse' : mse , 'mnx' : mean_crossings , 'hist4' : hist ( ) , 'corr' : corr2 , 'mean_abs_value' : mean_abs , 'zero_crossings' : zero_crossing ( ) , 'slope_sign_changes' : slope_sign_changes ( ) , 'waveform_length' : waveform_length , 'emg_var' : emg_var , 'root_mean_square' : root_mean_square , 'willison_amplitude' : willison_amplitude ( ) } return features
Returns dictionary of all features in the module
277
8
231,565
def emg_features ( threshold = 0 ) : return { 'mean_abs_value' : mean_abs , 'zero_crossings' : zero_crossing ( threshold ) , 'slope_sign_changes' : slope_sign_changes ( threshold ) , 'waveform_length' : waveform_length , 'integrated_emg' : abs_sum , 'emg_var' : emg_var , 'simple square integral' : abs_energy , 'root_mean_square' : root_mean_square , 'willison_amplitude' : willison_amplitude ( threshold ) , }
Return a dictionary of popular features used for EMG time series classification .
138
14
231,566
def means_abs_diff ( X ) : return np . mean ( np . abs ( np . diff ( X , axis = 1 ) ) , axis = 1 )
mean absolute temporal derivative
35
4
231,567
def mse ( X ) : return np . mean ( np . square ( np . abs ( np . fft . fft ( X , axis = 1 ) ) ) , axis = 1 )
computes mean spectral energy for each variable in a segmented time series
41
14
231,568
def mean_crossings ( X ) : X = np . atleast_3d ( X ) N = X . shape [ 0 ] D = X . shape [ 2 ] mnx = np . zeros ( ( N , D ) ) for i in range ( D ) : pos = X [ : , : , i ] > 0 npos = ~ pos c = ( pos [ : , : - 1 ] & npos [ : , 1 : ] ) | ( npos [ : , : - 1 ] & pos [ : , 1 : ] ) mnx [ : , i ] = np . count_nonzero ( c , axis = 1 ) return mnx
Computes number of mean crossings for each variable in a segmented time series
145
15
231,569
def corr2 ( X ) : X = np . atleast_3d ( X ) N = X . shape [ 0 ] D = X . shape [ 2 ] if D == 1 : return np . zeros ( N , dtype = np . float ) trii = np . triu_indices ( D , k = 1 ) DD = len ( trii [ 0 ] ) r = np . zeros ( ( N , DD ) ) for i in np . arange ( N ) : rmat = np . corrcoef ( X [ i ] ) # get the ith window from each signal, result will be DxD r [ i ] = rmat [ trii ] return r
computes correlations between all variable pairs in a segmented time series
152
13
231,570
def waveform_length ( X ) : return np . sum ( np . abs ( np . diff ( X , axis = 1 ) ) , axis = 1 )
cumulative length of the waveform over a segment for each variable in the segmented time series
34
19
231,571
def root_mean_square ( X ) : segment_width = X . shape [ 1 ] return np . sqrt ( np . sum ( X * X , axis = 1 ) / segment_width )
root mean square for each variable in the segmented time series
43
12
231,572
def split ( self , X , y ) : check_ts_data ( X , y ) Xt , Xc = get_ts_data_parts ( X ) Ns = len ( Xt ) Xt_new , y_new = self . _ts_slice ( Xt , y ) if Xc is not None : Xc_new = np . concatenate ( [ Xc ] * self . n_splits ) X_new = TS_Data ( Xt_new , Xc_new ) else : X_new = np . array ( Xt_new ) cv = self . _make_indices ( Ns ) return X_new , y_new , cv
Splits time series data and target arrays and generates splitting indices
153
12
231,573
def _ts_slice ( self , Xt , y ) : Ns = len ( Xt ) Xt_new = [ ] for i in range ( self . n_splits ) : for j in range ( Ns ) : Njs = int ( len ( Xt [ j ] ) / self . n_splits ) Xt_new . append ( Xt [ j ] [ ( Njs * i ) : ( Njs * ( i + 1 ) ) ] ) Xt_new = np . array ( Xt_new ) if len ( np . atleast_1d ( y [ 0 ] ) ) == len ( Xt [ 0 ] ) : # y is a time series y_new = [ ] for i in range ( self . n_splits ) : for j in range ( Ns ) : Njs = int ( len ( y [ j ] ) / self . n_splits ) y_new . append ( y [ j ] [ ( Njs * i ) : ( Njs * ( i + 1 ) ) ] ) y_new = np . array ( y_new ) else : # y is contextual to each series y_new = np . concatenate ( [ y for i in range ( self . n_splits ) ] ) return Xt_new , y_new
takes time series data and splits each series into temporal folds
286
12
231,574
def _make_indices ( self , Ns ) : N_new = int ( Ns * self . n_splits ) test = [ np . full ( N_new , False ) for i in range ( self . n_splits ) ] for i in range ( self . n_splits ) : test [ i ] [ np . arange ( Ns * i , Ns * ( i + 1 ) ) ] = True train = [ np . logical_not ( test [ i ] ) for i in range ( self . n_splits ) ] test = [ np . arange ( N_new ) [ test [ i ] ] for i in range ( self . n_splits ) ] train = [ np . arange ( N_new ) [ train [ i ] ] for i in range ( self . n_splits ) ] cv = list ( zip ( train , test ) ) return cv
makes indices for cross validation
199
5
231,575
def transform ( self , X , y , sample_weight = None ) : check_ts_data_with_ts_target ( X , y ) Xt , Xc = get_ts_data_parts ( X ) N = len ( Xt ) # number of time series # transformed data yt = [ ] Xtt = [ ] swt = sample_weight Nt = [ ] for i in range ( N ) : Xi , yi = self . _transform ( Xt [ i ] , y [ i ] ) yt += yi Xtt += Xi Nt . append ( len ( yi ) ) # number of contiguous class instances if Xc is not None : Xct = expand_variables_to_segments ( Xc , Nt ) Xtt = TS_Data ( Xtt , Xct ) if sample_weight is not None : swt = expand_variables_to_segments ( sample_weight , Nt ) return Xtt , yt , swt
Transforms the time series data with run length encoding of the target variable Note this transformation changes the number of samples in the data If sample_weight is provided it is transformed to align to the new target encoding
215
41
231,576
def _rle ( self , a ) : ia = np . asarray ( a ) n = len ( ia ) y = np . array ( ia [ 1 : ] != ia [ : - 1 ] ) # pairwise unequal (string safe) i = np . append ( np . where ( y ) , n - 1 ) # must include last element posi z = np . diff ( np . append ( - 1 , i ) ) # run lengths p = np . cumsum ( np . append ( 0 , z ) ) [ : - 1 ] # positions return ( z , p , ia [ i ] )
rle implementation credit to Thomas Browne from his SOF post Sept 2015
135
14
231,577
def _transform ( self , X , y ) : z , p , y_rle = self . _rle ( y ) p = np . append ( p , len ( y ) ) big_enough = p [ 1 : ] - p [ : - 1 ] >= self . min_length Xt = [ ] for i in range ( len ( y_rle ) ) : if ( big_enough [ i ] ) : Xt . append ( X [ p [ i ] : p [ i + 1 ] ] ) yt = y_rle [ big_enough ] . tolist ( ) return Xt , yt
Transforms single series
135
4
231,578
def get_ts_data_parts ( X ) : if not isinstance ( X , TS_Data ) : return X , None return X . ts_data , X . context_data
Separates time series data object into time series variables and contextual variables
40
14
231,579
def check_ts_data_with_ts_target ( X , y = None ) : if y is not None : Nx = len ( X ) Ny = len ( y ) if Nx != Ny : raise ValueError ( "Number of time series different in X (%d) and y (%d)" % ( Nx , Ny ) ) Xt , _ = get_ts_data_parts ( X ) Ntx = np . array ( [ len ( Xt [ i ] ) for i in np . arange ( Nx ) ] ) Nty = np . array ( [ len ( np . atleast_1d ( y [ i ] ) ) for i in np . arange ( Nx ) ] ) if np . count_nonzero ( Nty == Ntx ) == Nx : return else : raise ValueError ( "Invalid time series lengths.\n" "Ns: " , Nx , "Ntx: " , Ntx , "Nty: " , Nty )
Checks time series data with time series target is good . If not raises value error .
215
18
231,580
def ts_stats ( Xt , y , fs = 1.0 , class_labels = None ) : check_ts_data ( Xt ) Xt , Xs = get_ts_data_parts ( Xt ) if Xs is not None : S = len ( np . atleast_1d ( Xs [ 0 ] ) ) else : S = 0 C = np . max ( y ) + 1 # number of classes if class_labels is None : class_labels = np . arange ( C ) N = len ( Xt ) if Xt [ 0 ] . ndim > 1 : D = Xt [ 0 ] . shape [ 1 ] else : D = 1 Ti = np . array ( [ Xt [ i ] . shape [ 0 ] for i in range ( N ) ] , dtype = np . float64 ) / fs ic = np . array ( [ y == i for i in range ( C ) ] ) Tic = [ Ti [ ic [ i ] ] for i in range ( C ) ] T = np . sum ( Ti ) total = { "n_series" : N , "n_classes" : C , "n_TS_vars" : D , "n_context_vars" : S , "Total_Time" : T , "Series_Time_Mean" : np . mean ( Ti ) , "Series_Time_Std" : np . std ( Ti ) , "Series_Time_Range" : ( np . min ( Ti ) , np . max ( Ti ) ) } by_class = { "Class_labels" : class_labels , "n_series" : np . array ( [ len ( Tic [ i ] ) for i in range ( C ) ] ) , "Total_Time" : np . array ( [ np . sum ( Tic [ i ] ) for i in range ( C ) ] ) , "Series_Time_Mean" : np . array ( [ np . mean ( Tic [ i ] ) for i in range ( C ) ] ) , "Series_Time_Std" : np . array ( [ np . std ( Tic [ i ] ) for i in range ( C ) ] ) , "Series_Time_Min" : np . array ( [ np . min ( Tic [ i ] ) for i in range ( C ) ] ) , "Series_Time_Max" : np . array ( [ np . max ( Tic [ i ] ) for i in range ( C ) ] ) } results = { 'total' : total , 'by_class' : by_class } return results
Generates some helpful statistics about the data X
571
9
231,581
def load_watch ( ) : module_path = dirname ( __file__ ) data = np . load ( module_path + "/data/watch_dataset.npy" ) . item ( ) return data
Loads some of the 6 - axis inertial sensor data from my smartwatch project . The sensor data was recorded as study subjects performed sets of 20 shoulder exercise repetitions while wearing a smartwatch . It is a multivariate time series .
47
48
231,582
def shuffle_data ( X , y = None , sample_weight = None ) : if len ( X ) > 1 : ind = np . arange ( len ( X ) , dtype = np . int ) np . random . shuffle ( ind ) Xt = X [ ind ] yt = y swt = sample_weight if yt is not None : yt = yt [ ind ] if swt is not None : swt = swt [ ind ] return Xt , yt , swt else : return X , y , sample_weight
Shuffles indices X y and sample_weight together
119
11
231,583
def expand_variables_to_segments ( v , Nt ) : N_v = len ( np . atleast_1d ( v [ 0 ] ) ) return np . concatenate ( [ np . full ( ( Nt [ i ] , N_v ) , v [ i ] ) for i in np . arange ( len ( v ) ) ] )
expands contextual variables v by repeating each instance as specified in Nt
82
14
231,584
def sliding_window ( time_series , width , step , order = 'F' ) : w = np . hstack ( [ time_series [ i : 1 + i - width or None : step ] for i in range ( 0 , width ) ] ) result = w . reshape ( ( int ( len ( w ) / width ) , width ) , order = 'F' ) if order == 'F' : return result else : return np . ascontiguousarray ( result )
Segments univariate time series with sliding window
103
9
231,585
def sliding_tensor ( mv_time_series , width , step , order = 'F' ) : D = mv_time_series . shape [ 1 ] data = [ sliding_window ( mv_time_series [ : , j ] , width , step , order ) for j in range ( D ) ] return np . stack ( data , axis = 2 )
segments multivariate time series with sliding window
81
9
231,586
def transform ( self , X , y = None , sample_weight = None ) : check_ts_data ( X , y ) Xt , Xc = get_ts_data_parts ( X ) yt = y N = len ( Xt ) # number of time series if Xt [ 0 ] . ndim > 1 : Xt = np . array ( [ sliding_tensor ( Xt [ i ] , self . width , self . _step , self . order ) for i in np . arange ( N ) ] ) else : Xt = np . array ( [ sliding_window ( Xt [ i ] , self . width , self . _step , self . order ) for i in np . arange ( N ) ] ) Nt = [ len ( Xt [ i ] ) for i in np . arange ( len ( Xt ) ) ] Xt = np . concatenate ( Xt ) if Xc is not None : Xc = expand_variables_to_segments ( Xc , Nt ) Xt = TS_Data ( Xt , Xc ) if yt is not None : yt = np . array ( [ sliding_window ( yt [ i ] , self . width , self . _step , self . order ) for i in np . arange ( N ) ] ) yt = np . concatenate ( yt ) yt = self . y_func ( yt ) if self . shuffle is True : check_random_state ( self . random_state ) Xt , yt , _ = shuffle_data ( Xt , yt ) return Xt , yt , None
Transforms the time series data into segments Note this transformation changes the number of samples in the data If y is provided it is segmented and transformed to align to the new samples as per y_func Currently sample weights always returned as None
358
47
231,587
def transform ( self , X , y = None , sample_weight = None ) : check_ts_data ( X , y ) Xt , Xc = get_ts_data_parts ( X ) yt = y swt = sample_weight Xt = self . _mv_resize ( Xt ) if Xc is not None : Xt = TS_Data ( Xt , Xc ) if yt is not None and len ( np . atleast_1d ( yt [ 0 ] ) ) > 1 : # y is a time series yt = self . _mv_resize ( yt ) swt = None elif yt is not None : # todo: is this needed? yt = np . array ( yt ) return Xt , yt , swt
Transforms the time series data into fixed length segments using padding and or truncation If y is a time series and passed it will be transformed as well
177
30
231,588
def _check_data ( self , X ) : if len ( X ) > 1 : sval = np . unique ( X [ 0 ] [ : , 1 ] ) if np . all ( [ np . all ( np . unique ( X [ i ] [ : , 1 ] ) == sval ) for i in range ( 1 , len ( X ) ) ] ) : pass else : raise ValueError ( "Unique identifier var_types not consistent between time series" )
Checks that unique identifiers vaf_types are consistent between time series .
99
15
231,589
def _check_features ( self , features , Xti ) : N = Xti . shape [ 0 ] N_fts = len ( features ) fshapes = np . zeros ( ( N_fts , 2 ) , dtype = np . int ) keys = [ key for key in features ] for i in np . arange ( N_fts ) : fshapes [ i ] = np . row_stack ( features [ keys [ i ] ] ( Xti ) ) . shape # make sure each feature returns an array shape [N, ] if not np . all ( fshapes [ : , 0 ] == N ) : raise ValueError ( "feature function returned array with invalid length, " , np . array ( features . keys ( ) ) [ fshapes [ : , 0 ] != N ] ) return { keys [ i ] : fshapes [ i , 1 ] for i in range ( N_fts ) }
tests output of each feature against a segmented time series X
198
12
231,590
def _generate_feature_labels ( self , X ) : Xt , Xc = get_ts_data_parts ( X ) ftr_sizes = self . _check_features ( self . features , Xt [ 0 : 3 ] ) f_labels = [ ] # calculated features for key in ftr_sizes : for i in range ( ftr_sizes [ key ] ) : f_labels += [ key + '_' + str ( i ) ] # contextual features if Xc is not None : Ns = len ( np . atleast_1d ( Xc [ 0 ] ) ) s_labels = [ "context_" + str ( i ) for i in range ( Ns ) ] f_labels += s_labels return f_labels
Generates string feature labels
176
5
231,591
def _retrieve_indices ( cols ) : if isinstance ( cols , int ) : return [ cols ] elif isinstance ( cols , slice ) : start = cols . start if cols . start else 0 stop = cols . stop step = cols . step if cols . step else 1 return list ( range ( start , stop , step ) ) elif isinstance ( cols , list ) and cols : if isinstance ( cols [ 0 ] , bool ) : return np . flatnonzero ( np . asarray ( cols ) ) elif isinstance ( cols [ 0 ] , int ) : return cols else : raise TypeError ( 'No valid column specifier. Only a scalar, list or slice of all' 'integers or a boolean mask are allowed.' )
Retrieve a list of indices corresponding to the provided column specification .
177
13
231,592
def _validate ( self ) : if self . f_labels is None : raise NotFittedError ( 'FeatureRepMix' ) if not self . transformers : return names , transformers , _ = zip ( * self . transformers ) # validate names self . _validate_names ( names ) # validate transformers for trans in transformers : if not isinstance ( trans , FeatureRep ) : raise TypeError ( "All transformers must be an instance of FeatureRep." " '%s' (type %s) doesn't." % ( trans , type ( trans ) ) )
Internal function to validate the transformer before applying all internal transformers .
126
13
231,593
def transform ( self , X ) : if self . func is None : return X else : Xt , Xc = get_ts_data_parts ( X ) n_samples = len ( Xt ) Xt = self . func ( Xt , * * self . func_kwargs ) if len ( Xt ) != n_samples : raise ValueError ( "FunctionTransformer changes sample number (not supported)." ) if Xc is not None : Xt = TS_Data ( Xt , Xc ) return Xt
Transforms the time series data based on the provided function . Note this transformation must not change the number of samples in the data .
115
26
231,594
def build_payload ( self , payload ) : remaining_size = self . MAX_SEGMENT_PAYLOAD_SIZE for part in self . parts : part_payload = part . pack ( remaining_size ) payload . write ( part_payload ) remaining_size -= len ( part_payload )
Build payload of all parts and write them into the payload buffer
68
12
231,595
def escape ( value ) : if isinstance ( value , ( tuple , list ) ) : return "(" + ", " . join ( [ escape ( arg ) for arg in value ] ) + ")" else : typ = by_python_type . get ( value . __class__ ) if typ is None : raise InterfaceError ( "Unsupported python input: %s (%s)" % ( value , value . __class__ ) ) return typ . to_sql ( value )
Escape a single value .
100
6
231,596
def escape_values ( values ) : if isinstance ( values , ( tuple , list ) ) : return tuple ( [ escape ( value ) for value in values ] ) elif isinstance ( values , dict ) : return dict ( [ ( key , escape ( value ) ) for ( key , value ) in values . items ( ) ] ) else : raise InterfaceError ( "escape_values expects list, tuple or dict" )
Escape multiple values from a list tuple or dict .
89
11
231,597
def prepare ( cls , value ) : pfield = struct . pack ( 'b' , cls . type_code ) if isinstance ( value , string_types ) : value = datetime . datetime . strptime ( value , "%Y-%m-%d" ) year = value . year | 0x8000 # for some unknown reasons year has to be bit-or'ed with 0x8000 month = value . month - 1 # for some unknown reasons HANA counts months starting from zero pfield += cls . _struct . pack ( year , month , value . day ) return pfield
Pack datetime value into proper binary format
130
8
231,598
def prepare ( cls , value ) : pfield = struct . pack ( 'b' , cls . type_code ) if isinstance ( value , string_types ) : if "." in value : value = datetime . datetime . strptime ( value , "%H:%M:%S.%f" ) else : value = datetime . datetime . strptime ( value , "%H:%M:%S" ) millisecond = value . second * 1000 + value . microsecond // 1000 hour = value . hour | 0x80 # for some unknown reasons hour has to be bit-or'ed with 0x80 pfield += cls . _struct . pack ( hour , value . minute , millisecond ) return pfield
Pack time value into proper binary format
162
7
231,599
def prepare ( cls , value , length = 0 , position = 0 , is_last_data = True ) : hstruct = WriteLobHeader . header_struct lob_option_dataincluded = WriteLobHeader . LOB_OPTION_DATAINCLUDED if length > 0 else 0 lob_option_lastdata = WriteLobHeader . LOB_OPTION_LASTDATA if is_last_data else 0 options = lob_option_dataincluded | lob_option_lastdata pfield = hstruct . pack ( cls . type_code , options , length , position ) return pfield
Prepare Lob header . Note that the actual lob data is NOT written here but appended after the parameter block for each row!
134
26