idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
30,400
def main ( ) : config . parse_args ( ) if cfg . CONF . kafka_metrics . enabled : prepare_processes ( cfg . CONF . kafka_metrics , cfg . CONF . repositories . metrics_driver ) if cfg . CONF . kafka_alarm_history . enabled : prepare_processes ( cfg . CONF . kafka_alarm_history , cfg . CONF . repositories . alarm_state_hi...
Start persister .
30,401
def _get_config_files ( ) : conf_files = cfg . find_config_files ( project = 'monasca' , prog = 'monasca-persister' ) if len ( conf_files ) == 0 : old_conf_files = cfg . find_config_files ( project = 'monasca' , prog = 'persister' ) if len ( old_conf_files ) > 0 : LOG . warning ( 'Found deprecated old location "{}" ' '...
Get the possible configuration files accepted by oslo . config
30,402
def load_conf_modules ( ) : for modname in _list_module_names ( ) : mod = importutils . import_module ( 'monasca_persister.conf.' + modname ) required_funcs = [ 'register_opts' , 'list_opts' ] for func in required_funcs : if hasattr ( mod , func ) : yield mod
Load all modules that contain configuration .
30,403
def list_opts ( ) : for mod in load_conf_modules ( ) : mod_opts = mod . list_opts ( ) if type ( mod_opts ) is list : for single_mod_opts in mod_opts : yield single_mod_opts [ 0 ] , single_mod_opts [ 1 ] else : yield mod_opts [ 0 ] , mod_opts [ 1 ]
List all conf modules opts .
30,404
def message ( title = "" , text = "" , width = DEFAULT_WIDTH , height = DEFAULT_HEIGHT , timeout = None ) : return _simple_dialog ( Gtk . MessageType . INFO , text , title , width , height , timeout )
Display a simple message
30,405
def error ( title = "" , text = "" , width = DEFAULT_WIDTH , height = DEFAULT_HEIGHT , timeout = None ) : return _simple_dialog ( Gtk . MessageType . ERROR , text , title , width , height , timeout )
Display a simple error
30,406
def warning ( title = "" , text = "" , width = DEFAULT_WIDTH , height = DEFAULT_HEIGHT , timeout = None ) : return _simple_dialog ( Gtk . MessageType . WARNING , text , title , width , height , timeout )
Display a simple warning
30,407
def entry ( text = "" , placeholder = "" , title = "" , width = DEFAULT_WIDTH , height = DEFAULT_HEIGHT , timeout = None ) : dialog = ZEntryMessage ( text , placeholder , title , width , height , timeout ) dialog . run ( ) return dialog . response
Display a text input
30,408
def password ( text = "" , placeholder = "" , title = "" , width = DEFAULT_WIDTH , height = DEFAULT_HEIGHT , timeout = None ) : dialog = ZEntryPassword ( text , placeholder , title , width , height , timeout ) dialog . run ( ) return dialog . response
Display a text input with hidden characters
30,409
def zlist ( columns , items , print_columns = None , text = "" , title = "" , width = DEFAULT_WIDTH , height = ZLIST_HEIGHT , timeout = None ) : dialog = ZList ( columns , items , print_columns , text , title , width , height , timeout ) dialog . run ( ) return dialog . response
Display a list of values
30,410
def file_selection ( multiple = False , directory = False , save = False , confirm_overwrite = False , filename = None , title = "" , width = DEFAULT_WIDTH , height = DEFAULT_HEIGHT , timeout = None ) : dialog = ZFileSelection ( multiple , directory , save , confirm_overwrite , filename , title , width , height , timeo...
Open a file selection window
30,411
def calendar ( text = "" , day = None , month = None , title = "" , width = DEFAULT_WIDTH , height = DEFAULT_HEIGHT , timeout = None ) : dialog = ZCalendar ( text , day , month , title , width , height , timeout ) dialog . run ( ) return dialog . response
Display a calendar
30,412
def color_selection ( show_palette = False , opacity_control = False , title = "" , width = DEFAULT_WIDTH , height = DEFAULT_HEIGHT , timeout = None ) : dialog = ZColorSelection ( show_palette , opacity_control , title , width , height , timeout ) dialog . run ( ) return dialog . response
Display a color selection dialog
30,413
def scale ( text = "" , value = 0 , min = 0 , max = 100 , step = 1 , draw_value = True , title = "" , width = DEFAULT_WIDTH , height = DEFAULT_HEIGHT , timeout = None ) : dialog = ZScale ( text , value , min , max , step , draw_value , title , width , height , timeout ) dialog . run ( ) return dialog . response
Select a number with a range widget
30,414
def raw_on ( serial ) : def flush_to_msg ( serial , msg ) : data = serial . read_until ( msg ) if not data . endswith ( msg ) : if COMMAND_LINE_FLAG : print ( data ) raise IOError ( 'Could not enter raw REPL.' ) def flush ( serial ) : n = serial . inWaiting ( ) while n > 0 : serial . read ( n ) n = serial . inWaiting (...
Puts the device into raw mode .
30,415
def clean_error ( err ) : if err : decoded = err . decode ( 'utf-8' ) try : return decoded . split ( '\r\n' ) [ - 2 ] except Exception : return decoded return 'There was an error.'
Take stderr bytes returned from MicroPython and attempt to create a non - verbose error message .
30,416
def version ( serial = None ) : try : out , err = execute ( [ 'import os' , 'print(os.uname())' , ] , serial ) if err : raise ValueError ( clean_error ( err ) ) except ValueError : raise except Exception : raise ValueError ( ) raw = out . decode ( 'utf-8' ) . strip ( ) raw = raw [ 1 : - 1 ] items = raw . split ( ', ' )...
Returns version information for MicroPython running on the connected device .
30,417
def main ( argv = None ) : if not argv : argv = sys . argv [ 1 : ] try : global COMMAND_LINE_FLAG COMMAND_LINE_FLAG = True parser = argparse . ArgumentParser ( description = _HELP_TEXT ) parser . add_argument ( 'command' , nargs = '?' , default = None , help = "One of 'ls', 'rm', 'put' or 'get'." ) parser . add_argumen...
Entry point for the command line tool ufs .
30,418
def http_context ( self , worker_ctx ) : http = { } if isinstance ( worker_ctx . entrypoint , HttpRequestHandler ) : try : request = worker_ctx . args [ 0 ] try : if request . mimetype == 'application/json' : data = request . data else : data = request . form except ClientDisconnected : data = { } urlparts = urlsplit (...
Attempt to extract HTTP context if an HTTP entrypoint was used .
30,419
def user_context ( self , worker_ctx , exc_info ) : user = { } for key in worker_ctx . context_data : for matcher in self . user_type_context_keys : if re . search ( matcher , key ) : user [ key ] = worker_ctx . context_data [ key ] break self . client . user_context ( user )
Merge any user context to include in the sentry payload .
30,420
def tags_context ( self , worker_ctx , exc_info ) : tags = { 'call_id' : worker_ctx . call_id , 'parent_call_id' : worker_ctx . immediate_parent_call_id , 'service_name' : worker_ctx . container . service_name , 'method_name' : worker_ctx . entrypoint . method_name } for key in worker_ctx . context_data : for matcher i...
Merge any tags to include in the sentry payload .
30,421
def extra_context ( self , worker_ctx , exc_info ) : extra = { } extra . update ( worker_ctx . context_data ) self . client . extra_context ( extra )
Merge any extra context to include in the sentry payload .
30,422
async def wait_until_serving ( self ) -> None : await asyncio . gather ( self . _receiving_loop_running . wait ( ) , self . _internal_loop_running . wait ( ) , loop = self . event_loop )
Await until the Endpoint is ready to receive events .
30,423
async def connect_to_endpoints ( self , * endpoints : ConnectionConfig ) -> None : self . _throw_if_already_connected ( * endpoints ) await asyncio . gather ( * ( self . _await_connect_to_endpoint ( endpoint ) for endpoint in endpoints ) , loop = self . event_loop )
Connect to the given endpoints and await until all connections are established .
30,424
def connect_to_endpoints_nowait ( self , * endpoints : ConnectionConfig ) -> None : self . _throw_if_already_connected ( * endpoints ) for endpoint in endpoints : asyncio . ensure_future ( self . _await_connect_to_endpoint ( endpoint ) )
Connect to the given endpoints as soon as they become available but do not block .
30,425
async def wait_for ( self , event_type : Type [ TWaitForEvent ] ) -> TWaitForEvent : async for event in self . stream ( event_type , num_events = 1 ) : return event
Wait for a single instance of an event that matches the specified event type .
30,426
def add_to_heap ( self , heap , descriptors = 'stale' , data = 'stale' ) : if descriptors not in [ 'stale' , 'all' , 'none' ] : raise ValueError ( "descriptors must be one of 'stale', 'all', 'none'" ) if data not in [ 'stale' , 'all' , 'none' ] : raise ValueError ( "data must be one of 'stale', 'all', 'none'" ) for ite...
Update a heap to contains all the new items and item descriptors since the last call .
30,427
def get ( self , loop = None ) : self . _clear_done_waiters ( ) if not self . _waiters : try : heap = self . get_nowait ( ) except spead2 . Empty : pass else : yield raise Return ( heap ) if loop is None : loop = self . _loop waiter = trollius . Future ( loop = loop ) self . _waiters . append ( waiter ) self . _start_l...
Coroutine that waits for a heap to become available and returns it .
30,428
def parse_range_list ( ranges ) : if not ranges : return [ ] parts = ranges . split ( ',' ) out = [ ] for part in parts : fields = part . split ( '-' , 1 ) if len ( fields ) == 2 : start = int ( fields [ 0 ] ) end = int ( fields [ 1 ] ) out . extend ( range ( start , end + 1 ) ) else : out . append ( int ( fields [ 0 ]...
Split a string like 2 3 - 5 8 9 - 11 into a list of integers . This is intended to ease adding command - line options for dealing with affinity .
30,429
def _parse_format ( cls , fmt ) : fields = [ ] if not fmt : raise ValueError ( 'empty format' ) for code , length in fmt : if length == 0 : raise ValueError ( 'zero-length field (bug_compat mismatch?)' ) if ( ( code in ( 'u' , 'i' ) and length in ( 8 , 16 , 32 , 64 ) ) or ( code == 'f' and length in ( 32 , 64 ) ) ) : f...
Attempt to convert a SPEAD format specification to a numpy dtype . Where necessary O is used .
30,430
def itemsize_bits ( self ) : if self . dtype is not None : return self . dtype . itemsize * 8 else : return sum ( x [ 1 ] for x in self . format )
Number of bits per element
30,431
def dynamic_shape ( self , max_elements ) : known = 1 unknown_pos = - 1 for i , x in enumerate ( self . shape ) : if x is not None : known *= x else : assert unknown_pos == - 1 , 'Shape has multiple unknown dimensions' unknown_pos = i if unknown_pos == - 1 : return self . shape else : shape = list ( self . shape ) if k...
Determine the dynamic shape given incoming data that is big enough to hold max_elements elements .
30,432
def update ( self , heap ) : for descriptor in heap . get_descriptors ( ) : item = Item . from_raw ( descriptor , flavour = heap . flavour ) self . _add_item ( item ) updated_items = { } for raw_item in heap . get_items ( ) : if raw_item . id <= STREAM_CTRL_ID : continue try : item = self . _by_id [ raw_item . id ] exc...
Update the item descriptors and items from an incoming heap .
30,433
def build_landmark_op ( self ) : tasklogger . log_start ( "landmark operator" ) is_sparse = sparse . issparse ( self . kernel ) tasklogger . log_start ( "SVD" ) _ , _ , VT = randomized_svd ( self . diff_aff , n_components = self . n_svd , random_state = self . random_state ) tasklogger . log_complete ( "SVD" ) tasklogg...
Build the landmark operator
30,434
def build_kernel ( self ) : tasklogger . log_start ( "subgraphs" ) self . subgraphs = [ ] from . api import Graph for i , idx in enumerate ( self . samples ) : tasklogger . log_debug ( "subgraph {}: sample {}, " "n = {}, knn = {}" . format ( i , idx , np . sum ( self . sample_idx == idx ) , self . knn ) ) data = self ....
Build the MNN kernel .
30,435
def from_igraph ( G , attribute = "weight" , ** kwargs ) : if 'precomputed' in kwargs : if kwargs [ 'precomputed' ] != 'adjacency' : warnings . warn ( "Cannot build graph from igraph with precomputed={}. " "Use 'adjacency' instead." . format ( kwargs [ 'precomputed' ] ) , UserWarning ) del kwargs [ 'precomputed' ] try ...
Convert an igraph . Graph to a graphtools . Graph
30,436
def _reduce_data ( self ) : if self . n_pca is not None and self . n_pca < self . data . shape [ 1 ] : tasklogger . log_start ( "PCA" ) if sparse . issparse ( self . data ) : if isinstance ( self . data , sparse . coo_matrix ) or isinstance ( self . data , sparse . lil_matrix ) or isinstance ( self . data , sparse . do...
Private method to reduce data dimension .
30,437
def transform ( self , Y ) : try : return self . data_pca . transform ( Y ) except AttributeError : try : if Y . shape [ 1 ] != self . data . shape [ 1 ] : raise ValueError return Y except IndexError : raise ValueError except ValueError : raise ValueError ( "data of shape {} cannot be transformed" " to graph built on d...
Transform input data Y to reduced data space defined by self . data
30,438
def inverse_transform ( self , Y , columns = None ) : try : if not hasattr ( self , "data_pca" ) : try : if Y . shape [ 1 ] != self . data_nu . shape [ 1 ] : raise ValueError except IndexError : raise ValueError if columns is None : return Y else : columns = np . array ( [ columns ] ) . flatten ( ) return Y [ : , colum...
Transform input data Y to ambient data space defined by self . data
30,439
def _build_kernel ( self ) : kernel = self . build_kernel ( ) kernel = self . symmetrize_kernel ( kernel ) kernel = self . apply_anisotropy ( kernel ) if ( kernel - kernel . T ) . max ( ) > 1e-5 : warnings . warn ( "K should be symmetric" , RuntimeWarning ) if np . any ( kernel . diagonal == 0 ) : warnings . warn ( "K ...
Private method to build kernel matrix
30,440
def diff_aff ( self ) : row_degrees = np . array ( self . kernel . sum ( axis = 1 ) ) . reshape ( - 1 , 1 ) col_degrees = np . array ( self . kernel . sum ( axis = 0 ) ) . reshape ( 1 , - 1 ) if sparse . issparse ( self . kernel ) : return self . kernel . multiply ( 1 / np . sqrt ( row_degrees ) ) . multiply ( 1 / np ....
Symmetric diffusion affinity matrix
30,441
def to_pygsp ( self , ** kwargs ) : from . import api if 'precomputed' in kwargs : if kwargs [ 'precomputed' ] != 'affinity' : warnings . warn ( "Cannot build PyGSPGraph with precomputed={}. " "Using 'affinity' instead." . format ( kwargs [ 'precomputed' ] ) , UserWarning ) del kwargs [ 'precomputed' ] if 'use_pygsp' i...
Convert to a PyGSP graph
30,442
def to_igraph ( self , attribute = "weight" , ** kwargs ) : try : import igraph as ig except ImportError : raise ImportError ( "Please install igraph with " "`pip install --user python-igraph`." ) try : W = self . W except AttributeError : W = self . K . copy ( ) W = utils . set_diagonal ( W , 0 ) return ig . Graph . W...
Convert to an igraph Graph
30,443
def to_pickle ( self , path ) : if int ( sys . version . split ( "." ) [ 1 ] ) < 7 and isinstance ( self , pygsp . graphs . Graph ) : logger = self . logger self . logger = logger . name with open ( path , 'wb' ) as f : pickle . dump ( self , f ) if int ( sys . version . split ( "." ) [ 1 ] ) < 7 and isinstance ( self ...
Save the current Graph to a pickle .
30,444
def _build_weight_from_kernel ( self , kernel ) : weight = kernel . copy ( ) self . _diagonal = weight . diagonal ( ) . copy ( ) weight = utils . set_diagonal ( weight , 0 ) return weight
Private method to build an adjacency matrix from a kernel matrix
30,445
def _check_extension_shape ( self , Y ) : if len ( Y . shape ) != 2 : raise ValueError ( "Expected a 2D matrix. Y has shape {}" . format ( Y . shape ) ) if not Y . shape [ 1 ] == self . data_nu . shape [ 1 ] : if Y . shape [ 1 ] == self . data . shape [ 1 ] : Y = self . transform ( Y ) else : if self . data . shape [ 1...
Private method to check if new data matches self . data
30,446
def get_stop_times ( feed : "Feed" , date : Optional [ str ] = None ) -> DataFrame : f = feed . stop_times . copy ( ) if date is None : return f g = feed . get_trips ( date ) return f [ f [ "trip_id" ] . isin ( g [ "trip_id" ] ) ]
Return a subset of feed . stop_times .
30,447
def valid_str ( x : str ) -> bool : if isinstance ( x , str ) and x . strip ( ) : return True else : return False
Return True if x is a non - blank string ; otherwise return False .
30,448
def valid_date ( x : str ) -> bool : try : if x != dt . datetime . strptime ( x , DATE_FORMAT ) . strftime ( DATE_FORMAT ) : raise ValueError return True except ValueError : return False
Retrun True if x is a valid YYYYMMDD date ; otherwise return False .
30,449
def valid_url ( x : str ) -> bool : if isinstance ( x , str ) and re . match ( URL_PATTERN , x ) : return True else : return False
Return True if x is a valid URL ; otherwise return False .
30,450
def valid_email ( x : str ) -> bool : if isinstance ( x , str ) and re . match ( EMAIL_PATTERN , x ) : return True else : return False
Return True if x is a valid email address ; otherwise return False .
30,451
def valid_color ( x : str ) -> bool : if isinstance ( x , str ) and re . match ( COLOR_PATTERN , x ) : return True else : return False
Return True if x a valid hexadecimal color string without the leading hash ; otherwise return False .
30,452
def check_for_required_columns ( problems : List , table : str , df : DataFrame ) -> List : r = cs . GTFS_REF req_columns = r . loc [ ( r [ "table" ] == table ) & r [ "column_required" ] , "column" ] . values for col in req_columns : if col not in df . columns : problems . append ( [ "error" , f"Missing column {col}" ,...
Check that the given GTFS table has the required columns .
30,453
def check_for_invalid_columns ( problems : List , table : str , df : DataFrame ) -> List : r = cs . GTFS_REF valid_columns = r . loc [ r [ "table" ] == table , "column" ] . values for col in df . columns : if col not in valid_columns : problems . append ( [ "warning" , f"Unrecognized column {col}" , table , [ ] ] ) ret...
Check for invalid columns in the given GTFS DataFrame .
30,454
def check_table ( problems : List , table : str , df : DataFrame , condition , message : str , type_ : str = "error" , ) -> List : indices = df . loc [ condition ] . index . tolist ( ) if indices : problems . append ( [ type_ , message , table , indices ] ) return problems
Check the given GTFS table for the given problem condition .
30,455
def check_column ( problems : List , table : str , df : DataFrame , column : str , checker , type_ : str = "error" , * , column_required : bool = True , ) -> List : f = df . copy ( ) if not column_required : if column not in f . columns : f [ column ] = np . nan f = f . dropna ( subset = [ column ] ) cond = ~ f [ colum...
Check the given column of the given GTFS with the given problem checker .
30,456
def format_problems ( problems : List , * , as_df : bool = False ) -> Union [ List , DataFrame ] : if as_df : problems = pd . DataFrame ( problems , columns = [ "type" , "message" , "table" , "rows" ] ) . sort_values ( [ "type" , "table" ] ) return problems
Format the given problems list as a DataFrame .
30,457
def validate ( feed : "Feed" , * , as_df : bool = True , include_warnings : bool = True ) -> Union [ List , DataFrame ] : problems = [ ] checkers = [ "check_agency" , "check_calendar" , "check_calendar_dates" , "check_fare_attributes" , "check_fare_rules" , "check_feed_info" , "check_frequencies" , "check_routes" , "ch...
Check whether the given feed satisfies the GTFS .
30,458
def summarize ( feed : "Feed" , table : str = None ) -> DataFrame : gtfs_tables = cs . GTFS_REF . table . unique ( ) if table is not None : if table not in gtfs_tables : raise ValueError ( f"{table} is not a GTFS table" ) else : tables = [ table ] else : tables = gtfs_tables frames = [ ] for table in tables : f = getat...
Return a DataFrame summarizing all GTFS tables in the given feed or in the given table if specified .
30,459
def compute_feed_stats ( feed : "Feed" , trip_stats : DataFrame , dates : List [ str ] ) -> DataFrame : dates = feed . restrict_dates ( dates ) if not dates : return pd . DataFrame ( ) ts = trip_stats . copy ( ) activity = feed . compute_trip_activity ( dates ) stop_times = feed . stop_times . copy ( ) ts [ [ "start_ti...
Compute some feed stats for the given dates and trip stats .
30,460
def compute_feed_time_series ( feed : "Feed" , trip_stats : DataFrame , dates : List [ str ] , freq : str = "5Min" ) -> DataFrame : rts = feed . compute_route_time_series ( trip_stats , dates , freq = freq ) if rts . empty : return pd . DataFrame ( ) cols = [ "num_trip_starts" , "num_trip_ends" , "num_trips" , "service...
Compute some feed stats in time series form for the given dates and trip stats .
30,461
def create_shapes ( feed : "Feed" , * , all_trips : bool = False ) -> "Feed" : feed = feed . copy ( ) if all_trips : trip_ids = feed . trips [ "trip_id" ] else : trip_ids = feed . trips [ feed . trips [ "shape_id" ] . isnull ( ) ] [ "trip_id" ] f = feed . stop_times [ feed . stop_times [ "trip_id" ] . isin ( trip_ids )...
Given a feed create a shape for every trip that is missing a shape ID . Do this by connecting the stops on the trip with straight lines . Return the resulting feed which has updated shapes and trips tables .
30,462
def compute_convex_hull ( feed : "Feed" ) -> Polygon : m = sg . MultiPoint ( feed . stops [ [ "stop_lon" , "stop_lat" ] ] . values ) return m . convex_hull
Return a Shapely Polygon representing the convex hull formed by the stops of the given Feed .
30,463
def restrict_to_routes ( feed : "Feed" , route_ids : List [ str ] ) -> "Feed" : feed = feed . copy ( ) feed . routes = feed . routes [ feed . routes [ "route_id" ] . isin ( route_ids ) ] . copy ( ) feed . trips = feed . trips [ feed . trips [ "route_id" ] . isin ( route_ids ) ] . copy ( ) trip_ids = feed . trips [ "tri...
Build a new feed by restricting this one to only the stops trips shapes etc . used by the routes with the given list of route IDs . Return the resulting feed .
30,464
def restrict_to_polygon ( feed : "Feed" , polygon : Polygon ) -> "Feed" : feed = feed . copy ( ) stop_ids = feed . get_stops_in_polygon ( polygon ) [ "stop_id" ] st = feed . stop_times . copy ( ) trip_ids = st [ st [ "stop_id" ] . isin ( stop_ids ) ] [ "trip_id" ] feed . trips = feed . trips [ feed . trips [ "trip_id" ...
Build a new feed by restricting this one to only the trips that have at least one stop intersecting the given Shapely polygon then restricting stops routes stop times etc . to those associated with that subset of trips . Return the resulting feed .
30,465
def is_active_trip ( feed : "Feed" , trip_id : str , date : str ) -> bool : service = feed . _trips_i . at [ trip_id , "service_id" ] caldg = feed . _calendar_dates_g if caldg is not None : if ( service , date ) in caldg . groups : et = caldg . get_group ( ( service , date ) ) [ "exception_type" ] . iat [ 0 ] if et == ...
Return True if the feed . calendar or feed . calendar_dates says that the trip runs on the given date ; return False otherwise .
30,466
def get_trips ( feed : "Feed" , date : Optional [ str ] = None , time : Optional [ str ] = None ) -> DataFrame : if feed . trips is None or date is None : return feed . trips f = feed . trips . copy ( ) f [ "is_active" ] = f [ "trip_id" ] . map ( lambda trip_id : feed . is_active_trip ( trip_id , date ) ) f = f [ f [ "...
Return a subset of feed . trips .
30,467
def compute_busiest_date ( feed : "Feed" , dates : List [ str ] ) -> str : f = feed . compute_trip_activity ( dates ) s = [ ( f [ c ] . sum ( ) , c ) for c in f . columns if c != "trip_id" ] return max ( s ) [ 1 ]
Given a list of dates return the first date that has the maximum number of active trips .
30,468
def locate_trips ( feed : "Feed" , date : str , times : List [ str ] ) -> DataFrame : if not hp . is_not_null ( feed . stop_times , "shape_dist_traveled" ) : raise ValueError ( "feed.stop_times needs to have a non-null shape_dist_traveled " "column. You can create it, possibly with some inaccuracies, " "via feed2 = fee...
Return the positions of all trips active on the given date and times
30,469
def trip_to_geojson ( feed : "Feed" , trip_id : str , * , include_stops : bool = False ) -> Dict : t = feed . trips . copy ( ) t = t [ t [ "trip_id" ] == trip_id ] . copy ( ) shid = t [ "shape_id" ] . iat [ 0 ] geometry_by_shape = feed . build_geometry_by_shape ( use_utm = False , shape_ids = [ shid ] ) if not geometry...
Return a GeoJSON representation of the given trip optionally with its stops .
30,470
def clean_column_names ( df : DataFrame ) -> DataFrame : f = df . copy ( ) f . columns = [ col . strip ( ) for col in f . columns ] return f
Strip the whitespace from all column names in the given DataFrame and return the result .
30,471
def drop_zombies ( feed : "Feed" ) -> "Feed" : feed = feed . copy ( ) ids = feed . stop_times [ "stop_id" ] . unique ( ) f = feed . stops cond = f [ "stop_id" ] . isin ( ids ) if "location_type" in f . columns : cond |= f [ "location_type" ] != 0 feed . stops = f [ cond ] . copy ( ) ids = feed . stop_times [ "trip_id" ...
In the given Feed drop stops with no stop times trips with no stop times shapes with no trips routes with no trips and services with no trips in that order . Return the resulting Feed .
30,472
def clean_ids ( feed : "Feed" ) -> "Feed" : feed = feed . copy ( ) for table in cs . GTFS_REF [ "table" ] . unique ( ) : f = getattr ( feed , table ) if f is None : continue for column in cs . GTFS_REF . loc [ cs . GTFS_REF [ "table" ] == table , "column" ] : if column in f . columns and column . endswith ( "_id" ) : t...
In the given Feed strip whitespace from all string IDs and then replace every remaining whitespace chunk with an underscore . Return the resulting Feed .
30,473
def aggregate_routes ( feed : "Feed" , by : str = "route_short_name" , route_id_prefix : str = "route_" ) -> "Feed" : if by not in feed . routes . columns : raise ValueError ( f"Column {by} not in feed.routes" ) feed = feed . copy ( ) routes = feed . routes n = routes . groupby ( by ) . ngroups k = int ( math . log10 (...
Aggregate routes by route short name say and assign new route IDs .
30,474
def drop_invalid_columns ( feed : "Feed" ) -> "Feed" : feed = feed . copy ( ) for table , group in cs . GTFS_REF . groupby ( "table" ) : f = getattr ( feed , table ) if f is None : continue valid_columns = group [ "column" ] . values for col in f . columns : if col not in valid_columns : print ( f"{table}: dropping inv...
Drop all DataFrame columns of the given Feed that are not listed in the GTFS . Return the resulting new Feed .
30,475
def get_routes ( feed : "Feed" , date : Optional [ str ] = None , time : Optional [ str ] = None ) -> DataFrame : if date is None : return feed . routes . copy ( ) trips = feed . get_trips ( date , time ) R = trips [ "route_id" ] . unique ( ) return feed . routes [ feed . routes [ "route_id" ] . isin ( R ) ]
Return a subset of feed . routes
30,476
def compute_route_stats ( feed : "Feed" , trip_stats_subset : DataFrame , dates : List [ str ] , headway_start_time : str = "07:00:00" , headway_end_time : str = "19:00:00" , * , split_directions : bool = False , ) -> DataFrame : dates = feed . restrict_dates ( dates ) if not dates : return pd . DataFrame ( ) ts = trip...
Compute route stats for all the trips that lie in the given subset of trip stats and that start on the given dates .
30,477
def compute_route_time_series ( feed : "Feed" , trip_stats_subset : DataFrame , dates : List [ str ] , freq : str = "5Min" , * , split_directions : bool = False , ) -> DataFrame : dates = feed . restrict_dates ( dates ) if not dates : return pd . DataFrame ( ) activity = feed . compute_trip_activity ( dates ) ts = trip...
Compute route stats in time series form for the trips that lie in the trip stats subset and that start on the given dates .
30,478
def build_route_timetable ( feed : "Feed" , route_id : str , dates : List [ str ] ) -> DataFrame : dates = feed . restrict_dates ( dates ) if not dates : return pd . DataFrame ( ) t = pd . merge ( feed . trips , feed . stop_times ) t = t [ t [ "route_id" ] == route_id ] . copy ( ) a = feed . compute_trip_activity ( dat...
Return a timetable for the given route and dates .
30,479
def route_to_geojson ( feed : "Feed" , route_id : str , date : Optional [ str ] = None , * , include_stops : bool = False , ) -> Dict : shapes = ( feed . get_trips ( date = date ) . loc [ lambda x : x [ "route_id" ] == route_id , "shape_id" ] . unique ( ) ) if not shapes . size : return { "type" : "FeatureCollection" ,...
Return a GeoJSON rendering of the route and optionally its stops .
30,480
def build_geometry_by_shape ( feed : "Feed" , shape_ids : Optional [ List [ str ] ] = None , * , use_utm : bool = False , ) -> Dict : if feed . shapes is None : return { } d = { } shapes = feed . shapes . copy ( ) if shape_ids is not None : shapes = shapes [ shapes [ "shape_id" ] . isin ( shape_ids ) ] if use_utm : for...
Return a dictionary with structure shape_id - > Shapely LineString of shape .
30,481
def append_dist_to_shapes ( feed : "Feed" ) -> "Feed" : if feed . shapes is None : raise ValueError ( "This function requires the feed to have a shapes.txt file" ) feed = feed . copy ( ) f = feed . shapes m_to_dist = hp . get_convert_dist ( "m" , feed . dist_units ) def compute_dist ( group ) : group = group . sort_val...
Calculate and append the optional shape_dist_traveled field in feed . shapes in terms of the distance units feed . dist_units . Return the resulting Feed .
30,482
def geometrize_shapes ( shapes : DataFrame , * , use_utm : bool = False ) -> DataFrame : import geopandas as gpd f = shapes . copy ( ) . sort_values ( [ "shape_id" , "shape_pt_sequence" ] ) def my_agg ( group ) : d = { } d [ "geometry" ] = sg . LineString ( group [ [ "shape_pt_lon" , "shape_pt_lat" ] ] . values ) retur...
Given a GTFS shapes DataFrame convert it to a GeoPandas GeoDataFrame and return the result . The result has a geometry column of WGS84 LineStrings instead of the columns shape_pt_sequence shape_pt_lon shape_pt_lat and shape_dist_traveled . If use_utm then use local UTM coordinates for the geometries .
30,483
def get_segment_length ( linestring : LineString , p : Point , q : Optional [ Point ] = None ) -> float : d_p = linestring . project ( p ) if q is not None : d_q = linestring . project ( q ) d = abs ( d_p - d_q ) else : d = d_p return d
Given a Shapely linestring and two Shapely points project the points onto the linestring and return the distance along the linestring between the two points . If q is None then return the distance from the start of the linestring to the projection of p . The distance is measured in the native coordinates of the linestr...
30,484
def get_convert_dist ( dist_units_in : str , dist_units_out : str ) -> Callable [ [ float ] , float ] : di , do = dist_units_in , dist_units_out DU = cs . DIST_UNITS if not ( di in DU and do in DU ) : raise ValueError ( f"Distance units must lie in {DU}" ) d = { "ft" : { "ft" : 1 , "m" : 0.3048 , "mi" : 1 / 5280 , "km"...
Return a function of the form
30,485
def almost_equal ( f : DataFrame , g : DataFrame ) -> bool : if f . empty or g . empty : return f . equals ( g ) else : F = ( f . sort_index ( axis = 1 ) . sort_values ( list ( f . columns ) ) . reset_index ( drop = True ) ) G = ( g . sort_index ( axis = 1 ) . sort_values ( list ( g . columns ) ) . reset_index ( drop =...
Return True if and only if the given DataFrames are equal after sorting their columns names sorting their values and reseting their indices .
30,486
def linestring_to_utm ( linestring : LineString ) -> LineString : proj = lambda x , y : utm . from_latlon ( y , x ) [ : 2 ] return transform ( proj , linestring )
Given a Shapely LineString in WGS84 coordinates convert it to the appropriate UTM coordinates . If inverse then do the inverse .
30,487
def get_active_trips_df ( trip_times : DataFrame ) -> DataFrame : active_trips = ( pd . concat ( [ pd . Series ( 1 , trip_times . start_time ) , pd . Series ( - 1 , trip_times . end_time ) , ] ) . groupby ( level = 0 , sort = True ) . sum ( ) . cumsum ( ) . ffill ( ) ) return active_trips
Count the number of trips in trip_times that are active at any given time .
30,488
def combine_time_series ( time_series_dict : Dict , kind : str , * , split_directions : bool = False ) -> DataFrame : if kind not in [ "stop" , "route" ] : raise ValueError ( "kind must be 'stop' or 'route'" ) names = [ "indicator" ] if kind == "stop" : names . append ( "stop_id" ) else : names . append ( "route_id" ) ...
Combine the many time series DataFrames in the given dictionary into one time series DataFrame with hierarchical columns .
30,489
def compute_stop_stats_base ( stop_times_subset : DataFrame , trip_subset : DataFrame , headway_start_time : str = "07:00:00" , headway_end_time : str = "19:00:00" , * , split_directions : bool = False , ) -> DataFrame : if trip_subset . empty : return pd . DataFrame ( ) f = pd . merge ( stop_times_subset , trip_subset...
Given a subset of a stop times DataFrame and a subset of a trips DataFrame return a DataFrame that provides summary stats about the stops in the inner join of the two DataFrames .
30,490
def compute_stop_time_series_base ( stop_times_subset : DataFrame , trip_subset : DataFrame , freq : str = "5Min" , date_label : str = "20010101" , * , split_directions : bool = False , ) -> DataFrame : if trip_subset . empty : return pd . DataFrame ( ) f = pd . merge ( stop_times_subset , trip_subset ) if split_direct...
Given a subset of a stop times DataFrame and a subset of a trips DataFrame return a DataFrame that provides a summary time series about the stops in the inner join of the two DataFrames .
30,491
def get_stops ( feed : "Feed" , date : Optional [ str ] = None , trip_id : Optional [ str ] = None , route_id : Optional [ str ] = None , * , in_stations : bool = False , ) -> DataFrame : s = feed . stops . copy ( ) if date is not None : A = feed . get_stop_times ( date ) [ "stop_id" ] s = s [ s [ "stop_id" ] . isin ( ...
Return a section of feed . stops .
30,492
def build_geometry_by_stop ( feed : "Feed" , stop_ids : Optional [ List [ str ] ] = None , * , use_utm : bool = False , ) -> Dict : d = { } stops = feed . stops . copy ( ) if stop_ids is not None : stops = stops [ stops [ "stop_id" ] . isin ( stop_ids ) ] stops = stops [ stops . stop_lat . notna ( ) & stops . stop_lon ...
Return a dictionary with the structure stop_id - > Shapely Point with coordinates of the stop .
30,493
def compute_stop_stats ( feed : "Feed" , dates : List [ str ] , stop_ids : Optional [ List [ str ] ] = None , headway_start_time : str = "07:00:00" , headway_end_time : str = "19:00:00" , * , split_directions : bool = False , ) -> DataFrame : dates = feed . restrict_dates ( dates ) if not dates : return pd . DataFrame ...
Compute stats for all stops for the given dates . Optionally restrict to the stop IDs given .
30,494
def build_stop_timetable ( feed : "Feed" , stop_id : str , dates : List [ str ] ) -> DataFrame : dates = feed . restrict_dates ( dates ) if not dates : return pd . DataFrame ( ) t = pd . merge ( feed . trips , feed . stop_times ) t = t [ t [ "stop_id" ] == stop_id ] . copy ( ) a = feed . compute_trip_activity ( dates )...
Return a DataFrame containing the timetable for the given stop ID and dates .
30,495
def get_stops_in_polygon ( feed : "Feed" , polygon : Polygon , geo_stops = None ) -> DataFrame : if geo_stops is not None : f = geo_stops . copy ( ) else : f = geometrize_stops ( feed . stops ) cols = f . columns f [ "hit" ] = f [ "geometry" ] . within ( polygon ) f = f [ f [ "hit" ] ] [ cols ] return ungeometrize_stop...
Return the slice of feed . stops that contains all stops that lie within the given Shapely Polygon object that is specified in WGS84 coordinates .
30,496
def geometrize_stops ( stops : List [ str ] , * , use_utm : bool = False ) -> DataFrame : import geopandas as gpd g = ( stops . assign ( geometry = lambda x : [ sg . Point ( p ) for p in x [ [ "stop_lon" , "stop_lat" ] ] . values ] ) . drop ( [ "stop_lon" , "stop_lat" ] , axis = 1 ) . pipe ( lambda x : gpd . GeoDataFra...
Given a stops DataFrame convert it to a GeoPandas GeoDataFrame and return the result .
30,497
def map_stops ( feed : "Feed" , stop_ids : List [ str ] , stop_style : Dict = STOP_STYLE ) : import folium as fl my_map = fl . Map ( tiles = "cartodbpositron" ) group = fl . FeatureGroup ( name = "Stops" ) stops = feed . stops . loc [ lambda x : x . stop_id . isin ( stop_ids ) ] . fillna ( "n/a" ) for prop in stops . t...
Return a Folium map showing the given stops .
30,498
def get_dates ( feed : "Feed" , * , as_date_obj : bool = False ) -> List [ str ] : dates = [ ] if feed . calendar is not None and not feed . calendar . empty : if "start_date" in feed . calendar . columns : dates . append ( feed . calendar [ "start_date" ] . min ( ) ) if "end_date" in feed . calendar . columns : dates ...
Return a list of dates for which the given Feed is valid which could be the empty list if the Feed has no calendar information .
30,499
def write_gtfs ( feed : "Feed" , path : Path , ndigits : int = 6 ) -> None : path = Path ( path ) if path . suffix == ".zip" : zipped = True tmp_dir = tempfile . TemporaryDirectory ( ) new_path = Path ( tmp_dir . name ) else : zipped = False if not path . exists ( ) : path . mkdir ( ) new_path = path for table in cs . ...
Export the given feed to the given path . If the path end in . zip then write the feed as a zip archive . Otherwise assume the path is a directory and write the feed as a collection of CSV files to that directory creating the directory if it does not exist . Round all decimals to ndigits decimal places . All distances ...