idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
15,700
def extract_labels ( self ) -> np . ndarray : condition_idxs , epoch_idxs , _ = np . where ( self ) _ , unique_epoch_idxs = np . unique ( epoch_idxs , return_index = True ) return condition_idxs [ unique_epoch_idxs ]
Extract condition labels .
15,701
def fit ( self , X , y = None ) : logger . info ( 'Starting Probabilistic SRM' ) if len ( X ) <= 1 : raise ValueError ( "There are not enough subjects " "({0:d}) to train the model." . format ( len ( X ) ) ) number_subjects = len ( X ) number_subjects_vec = self . comm . allgather ( number_subjects ) for rank in range ( self . comm . Get_size ( ) ) : if number_subjects_vec [ rank ] != number_subjects : raise ValueError ( "Not all ranks have same number of subjects" ) shape0 = np . zeros ( ( number_subjects , ) , dtype = np . int ) shape1 = np . zeros ( ( number_subjects , ) , dtype = np . int ) for subject in range ( number_subjects ) : if X [ subject ] is not None : assert_all_finite ( X [ subject ] ) shape0 [ subject ] = X [ subject ] . shape [ 0 ] shape1 [ subject ] = X [ subject ] . shape [ 1 ] shape0 = self . comm . allreduce ( shape0 , op = MPI . SUM ) shape1 = self . comm . allreduce ( shape1 , op = MPI . SUM ) number_trs = np . min ( shape1 ) for subject in range ( number_subjects ) : if shape1 [ subject ] < self . features : raise ValueError ( "There are not enough samples to train the model with " "{0:d} features." . format ( self . features ) ) if shape1 [ subject ] != number_trs : raise ValueError ( "Different number of samples between subjects" "." ) self . sigma_s_ , self . w_ , self . mu_ , self . rho2_ , self . s_ = self . _srm ( X ) return self
Compute the probabilistic Shared Response Model
15,702
def transform ( self , X , y = None ) : if hasattr ( self , 'w_' ) is False : raise NotFittedError ( "The model fit has not been run yet." ) if len ( X ) != len ( self . w_ ) : raise ValueError ( "The number of subjects does not match the one" " in the model." ) s = [ None ] * len ( X ) for subject in range ( len ( X ) ) : if X [ subject ] is not None : s [ subject ] = self . w_ [ subject ] . T . dot ( X [ subject ] ) return s
Use the model to transform matrix to Shared Response space
15,703
def transform_subject ( self , X ) : if hasattr ( self , 'w_' ) is False : raise NotFittedError ( "The model fit has not been run yet." ) if X . shape [ 1 ] != self . s_ . shape [ 1 ] : raise ValueError ( "The number of timepoints(TRs) does not match the" "one in the model." ) w = self . _update_transform_subject ( X , self . s_ ) return w
Transform a new subject using the existing model . The subject is assumed to have recieved equivalent stimulation
15,704
def fit ( self , X , y = None ) : X = copy . deepcopy ( X ) if type ( X ) is not list : X = check_array ( X ) X = [ X ] n_train = len ( X ) for i in range ( n_train ) : X [ i ] = X [ i ] . T self . classes_ = np . arange ( self . n_events ) n_dim = X [ 0 ] . shape [ 0 ] for i in range ( n_train ) : assert ( X [ i ] . shape [ 0 ] == n_dim ) for i in range ( n_train ) : X [ i ] = stats . zscore ( X [ i ] , axis = 1 , ddof = 1 ) log_gamma = [ ] for i in range ( n_train ) : log_gamma . append ( np . zeros ( ( X [ i ] . shape [ 1 ] , self . n_events ) ) ) step = 1 best_ll = float ( "-inf" ) self . ll_ = np . empty ( ( 0 , n_train ) ) while step <= self . n_iter : iteration_var = self . step_var ( step ) seg_prob = [ np . exp ( lg ) / np . sum ( np . exp ( lg ) , axis = 0 ) for lg in log_gamma ] mean_pat = np . empty ( ( n_train , n_dim , self . n_events ) ) for i in range ( n_train ) : mean_pat [ i , : , : ] = X [ i ] . dot ( seg_prob [ i ] ) mean_pat = np . mean ( mean_pat , axis = 0 ) self . ll_ = np . append ( self . ll_ , np . empty ( ( 1 , n_train ) ) , axis = 0 ) for i in range ( n_train ) : logprob = self . _logprob_obs ( X [ i ] , mean_pat , iteration_var ) log_gamma [ i ] , self . ll_ [ - 1 , i ] = self . _forward_backward ( logprob ) if np . mean ( self . ll_ [ - 1 , : ] ) < best_ll : self . ll_ = self . ll_ [ : - 1 , : ] break self . segments_ = [ np . exp ( lg ) for lg in log_gamma ] self . event_var_ = iteration_var self . event_pat_ = mean_pat best_ll = np . mean ( self . ll_ [ - 1 , : ] ) logger . debug ( "Fitting step %d, LL=%f" , step , best_ll ) step += 1 return self
Learn a segmentation on training data
15,705
def _logprob_obs ( self , data , mean_pat , var ) : n_vox = data . shape [ 0 ] t = data . shape [ 1 ] data_z = stats . zscore ( data , axis = 0 , ddof = 1 ) mean_pat_z = stats . zscore ( mean_pat , axis = 0 , ddof = 1 ) logprob = np . empty ( ( t , self . n_events ) ) if type ( var ) is not np . ndarray : var = var * np . ones ( self . n_events ) for k in range ( self . n_events ) : logprob [ : , k ] = - 0.5 * n_vox * np . log ( 2 * np . pi * var [ k ] ) - 0.5 * np . sum ( ( data_z . T - mean_pat_z [ : , k ] ) . T ** 2 , axis = 0 ) / var [ k ] logprob /= n_vox return logprob
Log probability of observing each timepoint under each event model
15,706
def _log ( self , x ) : xshape = x . shape _x = x . flatten ( ) y = utils . masked_log ( _x ) return y . reshape ( xshape )
Modified version of np . log that manually sets values < = 0 to - inf
15,707
def set_event_patterns ( self , event_pat ) : if event_pat . shape [ 1 ] != self . n_events : raise ValueError ( ( "Number of columns of event_pat must match " "number of events" ) ) self . event_pat_ = event_pat . copy ( )
Set HMM event patterns manually
15,708
def calc_weighted_event_var ( self , D , weights , event_pat ) : Dz = stats . zscore ( D , axis = 1 , ddof = 1 ) ev_var = np . empty ( event_pat . shape [ 1 ] ) for e in range ( event_pat . shape [ 1 ] ) : nz = weights [ : , e ] > np . max ( weights [ : , e ] ) / 1000 sumsq = np . dot ( weights [ nz , e ] , np . sum ( np . square ( Dz [ nz , : ] - event_pat [ : , e ] ) , axis = 1 ) ) ev_var [ e ] = sumsq / ( np . sum ( weights [ nz , e ] ) - np . sum ( np . square ( weights [ nz , e ] ) ) / np . sum ( weights [ nz , e ] ) ) ev_var = ev_var / D . shape [ 1 ] return ev_var
Computes normalized weighted variance around event pattern
15,709
def model_prior ( self , t ) : lg , test_ll = self . _forward_backward ( np . zeros ( ( t , self . n_events ) ) ) segments = np . exp ( lg ) return segments , test_ll
Returns the prior probability of the HMM
15,710
def chain_getattr ( obj , attr , value = None ) : try : return _resolve_value ( safe_chain_getattr ( obj , attr ) ) except AttributeError : return value
Get chain attribute for an object .
15,711
def iter_festival_countdown ( countdown : Optional [ int ] = None , date_obj : MDate = None , lang : str = 'zh-Hans' ) -> FestivalCountdownIterable : factory = FestivalFactory ( lang = lang ) return factory . iter_festival_countdown ( countdown , date_obj )
Return countdown of festivals .
15,712
def parse_year_days ( year_info ) : leap_month , leap_days = _parse_leap ( year_info ) res = leap_days for month in range ( 1 , 13 ) : res += ( year_info >> ( 16 - month ) ) % 2 + 29 return res
Parse year days from a year info .
15,713
def _iter_year_month ( year_info ) : leap_month , leap_days = _parse_leap ( year_info ) months = [ ( i , 0 ) for i in range ( 1 , 13 ) ] if leap_month > 0 : months . insert ( leap_month , ( leap_month , 1 ) ) for month , leap in months : if leap : days = leap_days else : days = ( year_info >> ( 16 - month ) ) % 2 + 29 yield month , days , leap
Iter the month days in a lunar year .
15,714
def model_typedefs ( vk , model ) : model [ 'typedefs' ] = { } bitmasks = [ x for x in vk [ 'registry' ] [ 'types' ] [ 'type' ] if x . get ( '@category' ) == 'bitmask' ] basetypes = [ x for x in vk [ 'registry' ] [ 'types' ] [ 'type' ] if x . get ( '@category' ) == 'basetype' ] for typedef in bitmasks + basetypes : if not typedef . get ( 'type' ) : continue model [ 'typedefs' ] [ typedef [ 'name' ] ] = typedef [ 'type' ] handles = [ x for x in vk [ 'registry' ] [ 'types' ] [ 'type' ] if x . get ( '@category' ) == 'handle' ] for handle in handles : if 'name' not in handle or 'type' not in handle : continue n = handle [ 'name' ] t = handle [ 'type' ] if t == 'VK_DEFINE_HANDLE' : model [ 'typedefs' ] [ 'struct %s_T' % n ] = '*%s' % n if t == 'VK_DEFINE_HANDLE' : model [ 'typedefs' ] [ n ] = 'uint64_t' for name in [ 'Display' , 'xcb_connection_t' , 'wl_display' , 'wl_surface' , 'MirConnection' , 'MirSurface' , 'ANativeWindow' , 'SECURITY_ATTRIBUTES' ] : model [ 'typedefs' ] [ name ] = 'struct %s' % name model [ 'typedefs' ] . update ( { 'Window' : 'uint32_t' , 'VisualID' : 'uint32_t' , 'xcb_window_t' : 'uint32_t' , 'xcb_visualid_t' : 'uint32_t' } )
Fill the model with typedefs
15,715
def model_macros ( vk , model ) : model [ 'macros' ] = { } macros = [ x for x in vk [ 'registry' ] [ 'enums' ] if x . get ( '@type' ) not in ( 'bitmask' , 'enum' ) ] special_values = { '1000.0f' : '1000.0' , '(~0U)' : 0xffffffff , '(~0ULL)' : - 1 , '(~0U-1)' : 0xfffffffe , '(~0U-2)' : 0xfffffffd } for macro in macros [ 0 ] [ 'enum' ] : if '@name' not in macro or '@value' not in macro : continue name = macro [ '@name' ] value = macro [ '@value' ] if value in special_values : value = special_values [ value ] model [ 'macros' ] [ name ] = value for ext in get_extensions_filtered ( vk ) : model [ 'macros' ] [ ext [ '@name' ] ] = 1 for req in ext [ 'require' ] : for enum in req [ 'enum' ] : ename = enum [ '@name' ] evalue = parse_constant ( enum , int ( ext [ '@number' ] ) ) if enum . get ( '@extends' ) == 'VkResult' : model [ 'enums' ] [ 'VkResult' ] [ ename ] = evalue else : model [ 'macros' ] [ ename ] = evalue
Fill the model with macros
15,716
def model_funcpointers ( vk , model ) : model [ 'funcpointers' ] = { } funcs = [ x for x in vk [ 'registry' ] [ 'types' ] [ 'type' ] if x . get ( '@category' ) == 'funcpointer' ] structs = [ x for x in vk [ 'registry' ] [ 'types' ] [ 'type' ] if x . get ( '@category' ) == 'struct' ] for f in funcs : pfn_name = f [ 'name' ] for s in structs : if 'member' not in s : continue for m in s [ 'member' ] : if m [ 'type' ] == pfn_name : struct_name = s [ '@name' ] model [ 'funcpointers' ] [ pfn_name ] = struct_name
Fill the model with function pointer
15,717
def model_exceptions ( vk , model ) : model [ 'exceptions' ] = { } model [ 'errors' ] = { } all_codes = model [ 'enums' ] [ 'VkResult' ] success_names = set ( ) error_names = set ( ) commands = [ x for x in vk [ 'registry' ] [ 'commands' ] [ 'command' ] ] for command in commands : successes = command . get ( '@successcodes' , '' ) . split ( ',' ) errors = command . get ( '@errorcodes' , '' ) . split ( ',' ) success_names . update ( successes ) error_names . update ( errors ) for key , value in all_codes . items ( ) : if key . startswith ( 'VK_RESULT' ) or key == 'VK_SUCCESS' : continue name = inflection . camelize ( key . lower ( ) ) if key in success_names : model [ 'exceptions' ] [ value ] = name elif key in error_names : model [ 'errors' ] [ value ] = name else : print ( 'Warning: return code %s unused' % key )
Fill the model with exceptions and errors
15,718
def model_constructors ( vk , model ) : model [ 'constructors' ] = [ ] structs = [ x for x in vk [ 'registry' ] [ 'types' ] [ 'type' ] if x . get ( '@category' ) in { 'struct' , 'union' } ] def parse_len ( member ) : mlen = member . get ( '@len' ) if not mlen : return None if ',' in mlen : mlen = mlen . split ( ',' ) [ 0 ] if 'latex' in mlen or 'null-terminated' in mlen : return None return mlen for struct in structs : if 'member' not in struct : continue model [ 'constructors' ] . append ( { 'name' : struct [ '@name' ] , 'members' : [ { 'name' : x [ 'name' ] , 'type' : x [ 'type' ] , 'default' : x . get ( '@values' ) , 'len' : parse_len ( x ) } for x in struct [ 'member' ] ] } )
Fill the model with constructors
15,719
def model_ext_functions ( vk , model ) : model [ 'ext_functions' ] = { 'instance' : { } , 'device' : { } } alias = { v : k for k , v in model [ 'alias' ] . items ( ) } for extension in get_extensions_filtered ( vk ) : for req in extension [ 'require' ] : if not req . get ( 'command' ) : continue ext_type = extension [ '@type' ] for x in req [ 'command' ] : name = x [ '@name' ] if name in alias . keys ( ) : model [ 'ext_functions' ] [ ext_type ] [ name ] = alias [ name ] else : model [ 'ext_functions' ] [ ext_type ] [ name ] = name
Fill the model with extensions functions
15,720
def model_alias ( vk , model ) : model [ 'alias' ] = { } for s in vk [ 'registry' ] [ 'types' ] [ 'type' ] : if s . get ( '@category' , None ) == 'handle' and s . get ( '@alias' ) : model [ 'alias' ] [ s [ '@alias' ] ] = s [ '@name' ] for c in vk [ 'registry' ] [ 'commands' ] [ 'command' ] : if c . get ( '@alias' ) : model [ 'alias' ] [ c [ '@alias' ] ] = c [ '@name' ]
Fill the model with alias since V1
15,721
def format_vk ( vk ) : for ext in get_extensions_filtered ( vk ) : req = ext [ 'require' ] if not isinstance ( req , list ) : ext [ 'require' ] = [ req ]
Format vk before using it
15,722
def generate_py ( ) : model = { } vk = init ( ) format_vk ( vk ) model_alias ( vk , model ) model_typedefs ( vk , model ) model_enums ( vk , model ) model_macros ( vk , model ) model_funcpointers ( vk , model ) model_exceptions ( vk , model ) model_constructors ( vk , model ) model_functions ( vk , model ) model_ext_functions ( vk , model ) env = jinja2 . Environment ( autoescape = False , trim_blocks = True , lstrip_blocks = True , loader = jinja2 . FileSystemLoader ( HERE ) ) out_file = path . join ( HERE , path . pardir , 'vulkan' , '_vulkan.py' ) with open ( out_file , 'w' ) as out : out . write ( env . get_template ( 'vulkan.template.py' ) . render ( model = model ) )
Generate the python output file
15,723
def generate_cdef ( ) : include_libc_path = path . join ( HERE , 'fake_libc_include' ) include_vulkan_path = path . join ( HERE , 'vulkan_include' ) out_file = path . join ( HERE , path . pardir , 'vulkan' , 'vulkan.cdef.h' ) header = path . join ( include_vulkan_path , 'vulkan.h' ) command = [ 'cpp' , '-std=c99' , '-P' , '-nostdinc' , '-I' + include_libc_path , '-I' + include_vulkan_path , '-o' + out_file , '-DVK_USE_PLATFORM_XCB_KHR' , '-DVK_USE_PLATFORM_WAYLAND_KHR' , '-DVK_USE_PLATFORM_ANDROID_KHR' , '-DVK_USE_PLATFORM_WIN32_KHR' , '-DVK_USE_PLATFORM_XLIB_KHR' , header ] subprocess . run ( command , check = True )
Generate the cdef output file
15,724
def mock_django_connection ( disabled_features = None ) : db = connections . databases [ 'default' ] db [ 'PASSWORD' ] = '****' db [ 'USER' ] = '**Database disabled for unit tests**' ConnectionHandler . __getitem__ = MagicMock ( name = 'mock_connection' ) mock_connection = ConnectionHandler . __getitem__ . return_value if disabled_features : for feature in disabled_features : setattr ( mock_connection . features , feature , False ) mock_ops = mock_connection . ops def compiler ( queryset , connection , using , ** kwargs ) : result = MagicMock ( name = 'mock_connection.ops.compiler()' ) result . execute_sql . side_effect = NotSupportedError ( "Mock database tried to execute SQL for {} model." . format ( queryset . model . _meta . object_name ) ) result . has_results . side_effect = result . execute_sql . side_effect return result mock_ops . compiler . return_value . side_effect = compiler mock_ops . integer_field_range . return_value = ( - sys . maxsize - 1 , sys . maxsize ) mock_ops . max_name_length . return_value = sys . maxsize Model . refresh_from_db = Mock ( )
Overwrite the Django database configuration with a mocked version .
15,725
def find_all_models ( models ) : for model in models : yield model for parent in model . _meta . parents . keys ( ) : for parent_model in find_all_models ( ( parent , ) ) : yield parent_model
Yield all models and their parents .
15,726
def mocked_relations ( * models ) : patchers = [ ] for model in find_all_models ( models ) : if isinstance ( model . save , Mock ) : continue model_name = model . _meta . object_name patchers . append ( _patch_save ( model , model_name ) ) if hasattr ( model , 'objects' ) : patchers . append ( _patch_objects ( model , model_name ) ) for related_object in chain ( model . _meta . related_objects , model . _meta . many_to_many ) : name = related_object . name if name not in model . __dict__ and related_object . one_to_many : name += '_set' if name in model . __dict__ : if getattr ( model , name , None ) : patchers . append ( _patch_relation ( model , name , related_object ) ) return PatcherChain ( patchers , pass_mocks = False )
Mock all related field managers to make pure unit tests possible .
15,727
def decorate_callable ( self , target ) : def absorb_mocks ( test_case , * args ) : return target ( test_case ) should_absorb = not ( self . pass_mocks or isinstance ( target , type ) ) result = absorb_mocks if should_absorb else target for patcher in self . patchers : result = patcher ( result ) return result
Called as a decorator .
15,728
def _zero_on_type_error ( column_fn ) : if not column_fn : return column_fn if not callable ( column_fn ) : raise TypeError ( 'column functions must be callable' ) @ functools . wraps ( column_fn ) def wrapped ( column ) : try : return column_fn ( column ) except TypeError : if isinstance ( column , np . ndarray ) : return column . dtype . type ( ) else : raise return wrapped
Wrap a function on an np . ndarray to return 0 on a type error .
15,729
def _varargs_labels_as_list ( label_list ) : if len ( label_list ) == 0 : return [ ] elif not _is_non_string_iterable ( label_list [ 0 ] ) : return label_list elif len ( label_list ) == 1 : return label_list [ 0 ] else : raise ValueError ( "Labels {} contain more than list." . format ( label_list ) , "Pass just one list of labels." )
Return a list of labels for a list of labels or singleton list of list of labels .
15,730
def _assert_same ( values ) : assert len ( values ) > 0 first , rest = values [ 0 ] , values [ 1 : ] for v in rest : assert v == first return first
Assert that all values are identical and return the unique value .
15,731
def _collected_label ( collect , label ) : if not collect . __name__ . startswith ( '<' ) : return label + ' ' + collect . __name__ else : return label
Label of a collected column .
15,732
def _is_non_string_iterable ( value ) : if isinstance ( value , str ) : return False if hasattr ( value , '__iter__' ) : return True if isinstance ( value , collections . abc . Sequence ) : return True return False
Whether a value is iterable .
15,733
def _vertical_x ( axis , ticks = None , max_width = 5 ) : if ticks is None : ticks = axis . get_xticks ( ) if ( np . array ( ticks ) == np . rint ( ticks ) ) . all ( ) : ticks = np . rint ( ticks ) . astype ( np . int ) if max ( [ len ( str ( tick ) ) for tick in ticks ] ) > max_width : axis . set_xticklabels ( ticks , rotation = 'vertical' )
Switch labels to vertical if they are long .
15,734
def read_table ( cls , filepath_or_buffer , * args , ** vargs ) : try : path = urllib . parse . urlparse ( filepath_or_buffer ) . path if 'data8.berkeley.edu' in filepath_or_buffer : raise ValueError ( 'data8.berkeley.edu requires authentication, ' 'which is not supported.' ) except AttributeError : path = filepath_or_buffer try : if 'sep' not in vargs and path . endswith ( '.csv' ) : vargs [ 'sep' ] = ',' except AttributeError : pass df = pandas . read_table ( filepath_or_buffer , * args , ** vargs ) return cls . from_df ( df )
Read a table from a file or web address .
15,735
def _with_columns ( self , columns ) : table = type ( self ) ( ) for label , column in zip ( self . labels , columns ) : self . _add_column_and_format ( table , label , column ) return table
Create a table from a sequence of columns copying column labels .
15,736
def _add_column_and_format ( self , table , label , column ) : label = self . _as_label ( label ) table [ label ] = column if label in self . _formats : table . _formats [ label ] = self . _formats [ label ]
Add a column to table copying the formatter from self .
15,737
def from_df ( cls , df ) : t = cls ( ) labels = df . columns for label in df . columns : t . append_column ( label , df [ label ] ) return t
Convert a Pandas DataFrame into a Table .
15,738
def from_array ( cls , arr ) : return cls ( ) . with_columns ( [ ( f , arr [ f ] ) for f in arr . dtype . names ] )
Convert a structured NumPy array into a Table .
15,739
def column ( self , index_or_label ) : if ( isinstance ( index_or_label , str ) and index_or_label not in self . labels ) : raise ValueError ( 'The column "{}" is not in the table. The table contains ' 'these columns: {}' . format ( index_or_label , ', ' . join ( self . labels ) ) ) if ( isinstance ( index_or_label , int ) and not 0 <= index_or_label < len ( self . labels ) ) : raise ValueError ( 'The index {} is not in the table. Only indices between ' '0 and {} are valid' . format ( index_or_label , len ( self . labels ) - 1 ) ) return self . _columns [ self . _as_label ( index_or_label ) ]
Return the values of a column as an array .
15,740
def values ( self ) : dtypes = [ col . dtype for col in self . columns ] if len ( set ( dtypes ) ) > 1 : dtype = object else : dtype = None return np . array ( self . columns , dtype = dtype ) . T
Return data in self as a numpy array .
15,741
def apply ( self , fn , * column_or_columns ) : if not column_or_columns : return np . array ( [ fn ( row ) for row in self . rows ] ) else : if len ( column_or_columns ) == 1 and _is_non_string_iterable ( column_or_columns [ 0 ] ) : warnings . warn ( "column lists are deprecated; pass each as an argument" , FutureWarning ) column_or_columns = column_or_columns [ 0 ] rows = zip ( * self . select ( * column_or_columns ) . columns ) return np . array ( [ fn ( * row ) for row in rows ] )
Apply fn to each element or elements of column_or_columns . If no column_or_columns provided fn is applied to each row .
15,742
def set_format ( self , column_or_columns , formatter ) : if inspect . isclass ( formatter ) : formatter = formatter ( ) if callable ( formatter ) and not hasattr ( formatter , 'format_column' ) : formatter = _formats . FunctionFormatter ( formatter ) if not hasattr ( formatter , 'format_column' ) : raise Exception ( 'Expected Formatter or function: ' + str ( formatter ) ) for label in self . _as_labels ( column_or_columns ) : if formatter . converts_values : self [ label ] = formatter . convert_column ( self [ label ] ) self . _formats [ label ] = formatter return self
Set the format of a column .
15,743
def move_to_start ( self , column_label ) : self . _columns . move_to_end ( column_label , last = False ) return self
Move a column to the first in order .
15,744
def append ( self , row_or_table ) : if not row_or_table : return if isinstance ( row_or_table , Table ) : t = row_or_table columns = list ( t . select ( self . labels ) . _columns . values ( ) ) n = t . num_rows else : if ( len ( list ( row_or_table ) ) != self . num_columns ) : raise Exception ( 'Row should have ' + str ( self . num_columns ) + " columns" ) columns , n = [ [ value ] for value in row_or_table ] , 1 for i , column in enumerate ( self . _columns ) : if self . num_rows : self . _columns [ column ] = np . append ( self [ column ] , columns [ i ] ) else : self . _columns [ column ] = np . array ( columns [ i ] ) self . _num_rows += n return self
Append a row or all rows of a table . An appended table must have all columns of self .
15,745
def append_column ( self , label , values ) : if not isinstance ( label , str ) : raise ValueError ( 'The column label must be a string, but a ' '{} was given' . format ( label . __class__ . __name__ ) ) if not isinstance ( values , np . ndarray ) : if not _is_non_string_iterable ( values ) : values = [ values ] * max ( self . num_rows , 1 ) values = np . array ( tuple ( values ) ) if self . num_rows != 0 and len ( values ) != self . num_rows : raise ValueError ( 'Column length mismatch. New column does not have ' 'the same number of rows as table.' ) else : self . _num_rows = len ( values ) self . _columns [ label ] = values
Appends a column to the table or replaces a column .
15,746
def remove ( self , row_or_row_indices ) : if not row_or_row_indices : return if isinstance ( row_or_row_indices , int ) : rows_remove = [ row_or_row_indices ] else : rows_remove = row_or_row_indices for col in self . _columns : self . _columns [ col ] = [ elem for i , elem in enumerate ( self [ col ] ) if i not in rows_remove ] return self
Removes a row or multiple rows of a table in place .
15,747
def copy ( self , * , shallow = False ) : table = type ( self ) ( ) for label in self . labels : if shallow : column = self [ label ] else : column = np . copy ( self [ label ] ) self . _add_column_and_format ( table , label , column ) return table
Return a copy of a table .
15,748
def select ( self , * column_or_columns ) : labels = self . _varargs_as_labels ( column_or_columns ) table = type ( self ) ( ) for label in labels : self . _add_column_and_format ( table , label , np . copy ( self [ label ] ) ) return table
Return a table with only the columns in column_or_columns .
15,749
def drop ( self , * column_or_columns ) : exclude = _varargs_labels_as_list ( column_or_columns ) return self . select ( [ c for ( i , c ) in enumerate ( self . labels ) if i not in exclude and c not in exclude ] )
Return a Table with only columns other than selected label or labels .
15,750
def where ( self , column_or_label , value_or_predicate = None , other = None ) : column = self . _get_column ( column_or_label ) if other is not None : assert callable ( value_or_predicate ) , "Predicate required for 3-arg where" predicate = value_or_predicate other = self . _get_column ( other ) column = [ predicate ( y ) ( x ) for x , y in zip ( column , other ) ] elif value_or_predicate is not None : if not callable ( value_or_predicate ) : predicate = _predicates . are . equal_to ( value_or_predicate ) else : predicate = value_or_predicate column = [ predicate ( x ) for x in column ] return self . take ( np . nonzero ( column ) [ 0 ] )
Return a new Table containing rows where value_or_predicate returns True for values in column_or_label .
15,751
def sort ( self , column_or_label , descending = False , distinct = False ) : column = self . _get_column ( column_or_label ) if distinct : _ , row_numbers = np . unique ( column , return_index = True ) else : row_numbers = np . argsort ( column , axis = 0 , kind = 'mergesort' ) assert ( row_numbers < self . num_rows ) . all ( ) , row_numbers if descending : row_numbers = np . array ( row_numbers [ : : - 1 ] ) return self . take ( row_numbers )
Return a Table of rows sorted according to the values in a column .
15,752
def group ( self , column_or_label , collect = None ) : if _is_non_string_iterable ( column_or_label ) and len ( column_or_label ) != self . _num_rows : return self . groups ( column_or_label , collect ) self = self . copy ( shallow = True ) collect = _zero_on_type_error ( collect ) column = self . _get_column ( column_or_label ) if isinstance ( column_or_label , str ) or isinstance ( column_or_label , numbers . Integral ) : column_label = self . _as_label ( column_or_label ) del self [ column_label ] else : column_label = self . _unused_label ( 'group' ) groups = self . index_by ( column ) keys = sorted ( groups . keys ( ) ) if collect is None : labels = [ column_label , 'count' if column_label != 'count' else self . _unused_label ( 'count' ) ] columns = [ keys , [ len ( groups [ k ] ) for k in keys ] ] else : columns , labels = [ ] , [ ] for i , label in enumerate ( self . labels ) : labels . append ( _collected_label ( collect , label ) ) c = [ collect ( np . array ( [ row [ i ] for row in groups [ k ] ] ) ) for k in keys ] columns . append ( c ) grouped = type ( self ) ( ) . with_columns ( zip ( labels , columns ) ) assert column_label == self . _unused_label ( column_label ) grouped [ column_label ] = keys grouped . move_to_start ( column_label ) return grouped
Group rows by unique values in a column ; count or aggregate others .
15,753
def groups ( self , labels , collect = None ) : if not _is_non_string_iterable ( labels ) : return self . group ( labels , collect = collect ) collect = _zero_on_type_error ( collect ) columns = [ ] labels = self . _as_labels ( labels ) for label in labels : if label not in self . labels : raise ValueError ( "All labels must exist in the table" ) columns . append ( self . _get_column ( label ) ) grouped = self . group ( list ( zip ( * columns ) ) , lambda s : s ) grouped . _columns . popitem ( last = False ) counts = [ len ( v ) for v in grouped [ 0 ] ] for label in labels [ : : - 1 ] : grouped [ label ] = grouped . apply ( _assert_same , label ) grouped . move_to_start ( label ) if collect is None : count = 'count' if 'count' not in labels else self . _unused_label ( 'count' ) return grouped . select ( labels ) . with_column ( count , counts ) else : for label in grouped . labels : if label in labels : continue column = [ collect ( v ) for v in grouped [ label ] ] del grouped [ label ] grouped [ _collected_label ( collect , label ) ] = column return grouped
Group rows by multiple columns count or aggregate others .
15,754
def pivot_bin ( self , pivot_columns , value_column , bins = None , ** vargs ) : pivot_columns = _as_labels ( pivot_columns ) selected = self . select ( pivot_columns + [ value_column ] ) grouped = selected . groups ( pivot_columns , collect = lambda x : x ) if bins is not None : vargs [ 'bins' ] = bins _ , rbins = np . histogram ( self [ value_column ] , ** vargs ) vargs [ 'bins' ] = rbins binned = type ( self ) ( ) . with_column ( 'bin' , rbins ) for group in grouped . rows : col_label = "-" . join ( map ( str , group [ 0 : - 1 ] ) ) col_vals = group [ - 1 ] counts , _ = np . histogram ( col_vals , ** vargs ) binned [ col_label ] = np . append ( counts , 0 ) return binned
Form a table with columns formed by the unique tuples in pivot_columns containing counts per bin of the values associated with each tuple in the value_column .
15,755
def stack ( self , key , labels = None ) : rows , labels = [ ] , labels or self . labels for row in self . rows : [ rows . append ( ( getattr ( row , key ) , k , v ) ) for k , v in row . asdict ( ) . items ( ) if k != key and k in labels ] return type ( self ) ( [ key , 'column' , 'value' ] ) . with_rows ( rows )
Takes k original columns and returns two columns with col . 1 of all column names and col . 2 of all associated data .
15,756
def join ( self , column_label , other , other_label = None ) : if self . num_rows == 0 or other . num_rows == 0 : return None if not other_label : other_label = column_label self_rows = self . index_by ( column_label ) other_rows = other . index_by ( other_label ) joined_rows = [ ] for v , rows in self_rows . items ( ) : if v in other_rows : joined_rows += [ row + o for row in rows for o in other_rows [ v ] ] if not joined_rows : return None self_labels = list ( self . labels ) other_labels = [ self . _unused_label ( s ) for s in other . labels ] other_labels_map = dict ( zip ( other . labels , other_labels ) ) joined = type ( self ) ( self_labels + other_labels ) . with_rows ( joined_rows ) joined . _formats . update ( self . _formats ) for label in other . _formats : joined . _formats [ other_labels_map [ label ] ] = other . _formats [ label ] del joined [ other_labels_map [ other_label ] ] if column_label not in self . _formats and other_label in other . _formats : joined . _formats [ column_label ] = other . _formats [ other_label ] return joined . move_to_start ( column_label ) . sort ( column_label )
Creates a new table with the columns of self and other containing rows for all values of a column that appear in both tables .
15,757
def stats ( self , ops = ( min , max , np . median , sum ) ) : names = [ op . __name__ for op in ops ] ops = [ _zero_on_type_error ( op ) for op in ops ] columns = [ [ op ( column ) for op in ops ] for column in self . columns ] table = type ( self ) ( ) . with_columns ( zip ( self . labels , columns ) ) stats = table . _unused_label ( 'statistic' ) table [ stats ] = names table . move_to_start ( stats ) return table
Compute statistics for each column and place them in a table .
15,758
def _as_label ( self , index_or_label ) : if isinstance ( index_or_label , str ) : return index_or_label if isinstance ( index_or_label , numbers . Integral ) : return self . labels [ index_or_label ] else : raise ValueError ( str ( index_or_label ) + ' is not a label or index' )
Convert index to label .
15,759
def _unused_label ( self , label ) : original = label existing = self . labels i = 2 while label in existing : label = '{}_{}' . format ( original , i ) i += 1 return label
Generate an unused label .
15,760
def _get_column ( self , column_or_label ) : c = column_or_label if isinstance ( c , collections . Hashable ) and c in self . labels : return self [ c ] elif isinstance ( c , numbers . Integral ) : return self [ c ] elif isinstance ( c , str ) : raise ValueError ( 'label "{}" not in labels {}' . format ( c , self . labels ) ) else : assert len ( c ) == self . num_rows , 'column length mismatch' return c
Convert label to column and check column length .
15,761
def percentile ( self , p ) : percentiles = [ [ _util . percentile ( p , column ) ] for column in self . columns ] return self . _with_columns ( percentiles )
Return a new table with one row containing the pth percentile for each column .
15,762
def sample ( self , k = None , with_replacement = True , weights = None ) : n = self . num_rows if k is None : k = n index = np . random . choice ( n , k , replace = with_replacement , p = weights ) columns = [ [ c [ i ] for i in index ] for c in self . columns ] sample = self . _with_columns ( columns ) return sample
Return a new table where k rows are randomly sampled from the original table .
15,763
def split ( self , k ) : if not 1 <= k <= self . num_rows - 1 : raise ValueError ( "Invalid value of k. k must be between 1 and the" "number of rows - 1" ) rows = np . random . permutation ( self . num_rows ) first = self . take ( rows [ : k ] ) rest = self . take ( rows [ k : ] ) for column_label in self . _formats : first . _formats [ column_label ] = self . _formats [ column_label ] rest . _formats [ column_label ] = self . _formats [ column_label ] return first , rest
Return a tuple of two tables where the first table contains k rows randomly sampled and the second contains the remaining rows .
15,764
def with_row ( self , row ) : self = self . copy ( ) self . append ( row ) return self
Return a table with an additional row .
15,765
def with_rows ( self , rows ) : self = self . copy ( ) self . append ( self . _with_columns ( zip ( * rows ) ) ) return self
Return a table with additional rows .
15,766
def with_column ( self , label , values , * rest ) : if rest : return self . with_columns ( label , values , * rest ) new_table = self . copy ( ) new_table . append_column ( label , values ) return new_table
Return a new table with an additional or replaced column .
15,767
def with_columns ( self , * labels_and_values ) : if len ( labels_and_values ) == 1 : labels_and_values = labels_and_values [ 0 ] if isinstance ( labels_and_values , collections . abc . Mapping ) : labels_and_values = list ( labels_and_values . items ( ) ) if not isinstance ( labels_and_values , collections . abc . Sequence ) : labels_and_values = list ( labels_and_values ) if not labels_and_values : return self first = labels_and_values [ 0 ] if not isinstance ( first , str ) and hasattr ( first , '__iter__' ) : for pair in labels_and_values : assert len ( pair ) == 2 , 'incorrect columns format' labels_and_values = [ x for pair in labels_and_values for x in pair ] assert len ( labels_and_values ) % 2 == 0 , 'Even length sequence required' for i in range ( 0 , len ( labels_and_values ) , 2 ) : label , values = labels_and_values [ i ] , labels_and_values [ i + 1 ] self = self . with_column ( label , values ) return self
Return a table with additional or replaced columns .
15,768
def bin ( self , * columns , ** vargs ) : if columns : self = self . select ( * columns ) if 'normed' in vargs : vargs . setdefault ( 'density' , vargs . pop ( 'normed' ) ) density = vargs . get ( 'density' , False ) tag = 'density' if density else 'count' cols = list ( self . _columns . values ( ) ) _ , bins = np . histogram ( cols , ** vargs ) binned = type ( self ) ( ) . with_column ( 'bin' , bins ) for label in self . labels : counts , _ = np . histogram ( self [ label ] , bins = bins , density = density ) binned [ label + ' ' + tag ] = np . append ( counts , 0 ) return binned
Group values by bin and compute counts per bin by column .
15,769
def _use_html_if_available ( format_fn ) : def format_using_as_html ( v , label = False ) : if not label and hasattr ( v , 'as_html' ) : return v . as_html ( ) else : return format_fn ( v , label ) return format_using_as_html
Use the value s HTML rendering if available overriding format_fn .
15,770
def _get_column_formatters ( self , max_rows , as_html ) : formats = { s : self . _formats . get ( s , self . formatter ) for s in self . labels } cols = self . _columns . items ( ) fmts = [ formats [ k ] . format_column ( k , v [ : max_rows ] ) for k , v in cols ] if as_html : fmts = list ( map ( type ( self ) . _use_html_if_available , fmts ) ) return fmts
Return one value formatting function per column .
15,771
def as_text ( self , max_rows = 0 , sep = " | " ) : if not max_rows or max_rows > self . num_rows : max_rows = self . num_rows omitted = max ( 0 , self . num_rows - max_rows ) labels = self . _columns . keys ( ) fmts = self . _get_column_formatters ( max_rows , False ) rows = [ [ fmt ( label , label = True ) for fmt , label in zip ( fmts , labels ) ] ] for row in itertools . islice ( self . rows , max_rows ) : rows . append ( [ f ( v , label = False ) for v , f in zip ( row , fmts ) ] ) lines = [ sep . join ( row ) for row in rows ] if omitted : lines . append ( '... ({} rows omitted)' . format ( omitted ) ) return '\n' . join ( [ line . rstrip ( ) for line in lines ] )
Format table as text .
15,772
def as_html ( self , max_rows = 0 ) : if not max_rows or max_rows > self . num_rows : max_rows = self . num_rows omitted = max ( 0 , self . num_rows - max_rows ) labels = self . labels lines = [ ( 0 , '<table border="1" class="dataframe">' ) , ( 1 , '<thead>' ) , ( 2 , '<tr>' ) , ( 3 , ' ' . join ( '<th>' + label + '</th>' for label in labels ) ) , ( 2 , '</tr>' ) , ( 1 , '</thead>' ) , ( 1 , '<tbody>' ) , ] fmts = self . _get_column_formatters ( max_rows , True ) for row in itertools . islice ( self . rows , max_rows ) : lines += [ ( 2 , '<tr>' ) , ( 3 , ' ' . join ( '<td>' + fmt ( v , label = False ) + '</td>' for v , fmt in zip ( row , fmts ) ) ) , ( 2 , '</tr>' ) , ] lines . append ( ( 1 , '</tbody>' ) ) lines . append ( ( 0 , '</table>' ) ) if omitted : lines . append ( ( 0 , '<p>... ({} rows omitted)</p>' . format ( omitted ) ) ) return '\n' . join ( 4 * indent * ' ' + text for indent , text in lines )
Format table as HTML .
15,773
def index_by ( self , column_or_label ) : column = self . _get_column ( column_or_label ) index = { } for key , row in zip ( column , self . rows ) : index . setdefault ( key , [ ] ) . append ( row ) return index
Return a dict keyed by values in a column that contains lists of rows corresponding to each value .
15,774
def to_array ( self ) : dt = np . dtype ( list ( zip ( self . labels , ( c . dtype for c in self . columns ) ) ) ) arr = np . empty_like ( self . columns [ 0 ] , dt ) for label in self . labels : arr [ label ] = self [ label ] return arr
Convert the table to a structured NumPy array .
15,775
def plot ( self , column_for_xticks = None , select = None , overlay = True , width = 6 , height = 4 , ** vargs ) : options = self . default_options . copy ( ) options . update ( vargs ) if column_for_xticks is not None : x_data , y_labels = self . _split_column_and_labels ( column_for_xticks ) x_label = self . _as_label ( column_for_xticks ) else : x_data , y_labels = None , self . labels x_label = None if select is not None : y_labels = self . _as_labels ( select ) if x_data is not None : self = self . sort ( x_data ) x_data = np . sort ( x_data ) def draw ( axis , label , color ) : if x_data is None : axis . plot ( self [ label ] , color = color , ** options ) else : axis . plot ( x_data , self [ label ] , color = color , ** options ) self . _visualize ( x_label , y_labels , None , overlay , draw , _vertical_x , width = width , height = height )
Plot line charts for the table .
15,776
def bar ( self , column_for_categories = None , select = None , overlay = True , width = 6 , height = 4 , ** vargs ) : options = self . default_options . copy ( ) vargs [ 'align' ] = 'edge' options . update ( vargs ) xticks , labels = self . _split_column_and_labels ( column_for_categories ) if select is not None : labels = self . _as_labels ( select ) index = np . arange ( self . num_rows ) def draw ( axis , label , color ) : axis . bar ( index - 0.5 , self [ label ] , 1.0 , color = color , ** options ) def annotate ( axis , ticks ) : if ( ticks is not None ) : tick_labels = [ ticks [ int ( l ) ] if 0 <= l < len ( ticks ) else '' for l in axis . get_xticks ( ) ] axis . set_xticklabels ( tick_labels , stretch = 'ultra-condensed' ) self . _visualize ( column_for_categories , labels , xticks , overlay , draw , annotate , width = width , height = height )
Plot bar charts for the table .
15,777
def group_bar ( self , column_label , ** vargs ) : self . group ( column_label ) . bar ( column_label , ** vargs )
Plot a bar chart for the table .
15,778
def barh ( self , column_for_categories = None , select = None , overlay = True , width = 6 , ** vargs ) : options = self . default_options . copy ( ) vargs [ 'align' ] = 'edge' options . update ( vargs ) yticks , labels = self . _split_column_and_labels ( column_for_categories ) if select is not None : labels = self . _as_labels ( select ) n = len ( labels ) index = np . arange ( self . num_rows ) margin = 0.1 bwidth = 1 - 2 * margin if overlay : bwidth /= len ( labels ) if 'height' in options : height = options . pop ( 'height' ) else : height = max ( 4 , len ( index ) / 2 ) def draw ( axis , label , color ) : if overlay : ypos = index + margin + ( 1 - 2 * margin ) * ( n - 1 - labels . index ( label ) ) / n else : ypos = index axis . barh ( ypos , self [ label ] [ : : - 1 ] , bwidth , color = color , ** options ) ylabel = self . _as_label ( column_for_categories ) def annotate ( axis , ticks ) : axis . set_yticks ( index + 0.5 ) axis . set_yticklabels ( ticks [ : : - 1 ] , stretch = 'ultra-condensed' ) axis . set_xlabel ( axis . get_ylabel ( ) ) axis . set_ylabel ( ylabel ) self . _visualize ( '' , labels , yticks , overlay , draw , annotate , width = width , height = height )
Plot horizontal bar charts for the table .
15,779
def group_barh ( self , column_label , ** vargs ) : self . group ( column_label ) . barh ( column_label , ** vargs )
Plot a horizontal bar chart for the table .
15,780
def _visualize ( self , x_label , y_labels , ticks , overlay , draw , annotate , width = 6 , height = 4 ) : for label in y_labels : if not all ( isinstance ( x , numbers . Real ) for x in self [ label ] ) : raise ValueError ( "The column '{0}' contains non-numerical " "values. A plot cannot be drawn for this column." . format ( label ) ) n = len ( y_labels ) colors = list ( itertools . islice ( itertools . cycle ( self . chart_colors ) , n ) ) if overlay and n > 1 : _ , axis = plt . subplots ( figsize = ( width , height ) ) if x_label is not None : axis . set_xlabel ( x_label ) for label , color in zip ( y_labels , colors ) : draw ( axis , label , color ) if ticks is not None : annotate ( axis , ticks ) axis . legend ( y_labels , loc = 2 , bbox_to_anchor = ( 1.05 , 1 ) ) type ( self ) . plots . append ( axis ) else : fig , axes = plt . subplots ( n , 1 , figsize = ( width , height * n ) ) if not isinstance ( axes , collections . Iterable ) : axes = [ axes ] for axis , y_label , color in zip ( axes , y_labels , colors ) : draw ( axis , y_label , color ) axis . set_ylabel ( y_label , fontsize = 16 ) if x_label is not None : axis . set_xlabel ( x_label , fontsize = 16 ) if ticks is not None : annotate ( axis , ticks ) type ( self ) . plots . append ( axis )
Generic visualization that overlays or separates the draw function .
15,781
def _split_column_and_labels ( self , column_or_label ) : column = None if column_or_label is None else self . _get_column ( column_or_label ) labels = [ label for i , label in enumerate ( self . labels ) if column_or_label not in ( i , label ) ] return column , labels
Return the specified column and labels of other columns .
15,782
def pivot_hist ( self , pivot_column_label , value_column_label , overlay = True , width = 6 , height = 4 , ** vargs ) : warnings . warn ( "pivot_hist is deprecated; use " "hist(value_column_label, group=pivot_column_label), or " "with side_by_side=True if you really want side-by-side " "bars." ) pvt_labels = np . unique ( self [ pivot_column_label ] ) pvt_columns = [ self [ value_column_label ] [ np . where ( self [ pivot_column_label ] == pivot ) ] for pivot in pvt_labels ] n = len ( pvt_labels ) colors = list ( itertools . islice ( itertools . cycle ( self . chart_colors ) , n ) ) if overlay : plt . figure ( figsize = ( width , height ) ) vals , bins , patches = plt . hist ( pvt_columns , color = colors , ** vargs ) plt . legend ( pvt_labels ) else : _ , axes = plt . subplots ( n , 1 , figsize = ( width , height * n ) ) vals = [ ] bins = None for axis , label , column , color in zip ( axes , pvt_labels , pvt_columns , colors ) : if isinstance ( bins , np . ndarray ) : avals , abins , patches = axis . hist ( column , color = color , bins = bins , ** vargs ) else : avals , abins , patches = axis . hist ( column , color = color , ** vargs ) axis . set_xlabel ( label , fontsize = 16 ) vals . append ( avals ) if not isinstance ( bins , np . ndarray ) : bins = abins else : assert bins . all ( ) == abins . all ( ) , "Inconsistent bins in hist" t = type ( self ) ( ) t [ 'start' ] = bins [ 0 : - 1 ] t [ 'end' ] = bins [ 1 : ] for label , column in zip ( pvt_labels , vals ) : t [ label ] = column
Draw histograms of each category in a column .
15,783
def hist_of_counts ( self , * columns , overlay = True , bins = None , bin_column = None , group = None , side_by_side = False , width = 6 , height = 4 , ** vargs ) : if bin_column is not None and bins is None : bins = np . unique ( self . column ( bin_column ) ) for column in columns : if not _is_array_integer ( self . column ( column ) ) : raise ValueError ( 'The column {0} contains non-integer values ' 'When using hist_of_counts with bin_columns, ' 'all columns should contain counts.' . format ( column ) ) if vargs . get ( 'normed' , False ) or vargs . get ( 'density' , False ) : raise ValueError ( "hist_of_counts is for displaying counts only, " "and should not be used with the normed or " "density keyword arguments" ) vargs [ 'density' ] = False if bins is not None : if len ( bins ) < 2 : raise ValueError ( "bins must have at least two items" ) diffs = np . diff ( sorted ( bins ) ) normalized_diff_deviances = np . abs ( ( diffs - diffs [ 0 ] ) / diffs [ 0 ] ) if np . any ( normalized_diff_deviances > 1e-11 ) : raise ValueError ( "Bins of unequal size should not be used " "with hist_of_counts. Please use hist() and " "make sure to set normed=True" ) return self . hist ( * columns , overlay = overlay , bins = bins , bin_column = bin_column , group = group , side_by_side = side_by_side , width = width , height = height , ** vargs )
Plots one count - based histogram for each column in columns . The heights of each bar will represent the counts and all the bins must be of equal size .
15,784
def boxplot ( self , ** vargs ) : for col in self : if any ( isinstance ( cell , np . flexible ) for cell in self [ col ] ) : raise ValueError ( "The column '{0}' contains non-numerical " "values. A histogram cannot be drawn for this table." . format ( col ) ) columns = self . _columns . copy ( ) vargs [ 'labels' ] = columns . keys ( ) values = list ( columns . values ( ) ) plt . boxplot ( values , ** vargs )
Plots a boxplot for the table .
15,785
def plot_normal_cdf ( rbound = None , lbound = None , mean = 0 , sd = 1 ) : shade = rbound is not None or lbound is not None shade_left = rbound is not None and lbound is not None inf = 3.5 * sd step = 0.1 rlabel = rbound llabel = lbound if rbound is None : rbound = inf + mean rlabel = "$\infty$" if lbound is None : lbound = - inf + mean llabel = "-$\infty$" pdf_range = np . arange ( - inf + mean , inf + mean , step ) plt . plot ( pdf_range , stats . norm . pdf ( pdf_range , loc = mean , scale = sd ) , color = 'k' , lw = 1 ) cdf_range = np . arange ( lbound , rbound + step , step ) if shade : plt . fill_between ( cdf_range , stats . norm . pdf ( cdf_range , loc = mean , scale = sd ) , color = 'gold' ) if shade_left : cdf_range = np . arange ( - inf + mean , lbound + step , step ) plt . fill_between ( cdf_range , stats . norm . pdf ( cdf_range , loc = mean , scale = sd ) , color = 'darkblue' ) plt . ylim ( 0 , stats . norm . pdf ( 0 , loc = 0 , scale = sd ) * 1.25 ) plt . xlabel ( 'z' ) plt . ylabel ( '$\phi$(z)' , rotation = 90 ) plt . title ( "Normal Curve ~ ($\mu$ = {0}, $\sigma$ = {1}) " "{2} < z < {3}" . format ( mean , sd , llabel , rlabel ) , fontsize = 16 ) plt . show ( )
Plots a normal curve with specified parameters and area below curve shaded between lbound and rbound .
15,786
def proportions_from_distribution ( table , label , sample_size , column_name = 'Random Sample' ) : proportions = sample_proportions ( sample_size , table . column ( label ) ) return table . with_column ( 'Random Sample' , proportions )
Adds a column named column_name containing the proportions of a random draw using the distribution in label .
15,787
def table_apply ( table , func , subset = None ) : from . import Table df = table . to_df ( ) if subset is not None : subset = np . atleast_1d ( subset ) if any ( [ i not in df . columns for i in subset ] ) : err = np . where ( [ i not in df . columns for i in subset ] ) [ 0 ] err = "Column mismatch: {0}" . format ( [ subset [ i ] for i in err ] ) raise ValueError ( err ) for col in subset : df [ col ] = df [ col ] . apply ( func ) else : df = df . apply ( func ) if isinstance ( df , pd . Series ) : df = pd . DataFrame ( df ) . T tab = Table . from_df ( df ) return tab
Applies a function to each column and returns a Table .
15,788
def minimize ( f , start = None , smooth = False , log = None , array = False , ** vargs ) : if start is None : assert not array , "Please pass starting values explicitly when array=True" arg_count = f . __code__ . co_argcount assert arg_count > 0 , "Please pass starting values explicitly for variadic functions" start = [ 0 ] * arg_count if not hasattr ( start , '__len__' ) : start = [ start ] if array : objective = f else : @ functools . wraps ( f ) def objective ( args ) : return f ( * args ) if not smooth and 'method' not in vargs : vargs [ 'method' ] = 'Powell' result = optimize . minimize ( objective , start , ** vargs ) if log is not None : log ( result ) if len ( start ) == 1 : return result . x . item ( 0 ) else : return result . x
Minimize a function f of one or more arguments .
15,789
def _lat_lons_from_geojson ( s ) : if len ( s ) >= 2 and isinstance ( s [ 0 ] , _number ) and isinstance ( s [ 0 ] , _number ) : lat , lon = s [ 1 ] , s [ 0 ] return [ ( lat , lon ) ] else : return [ lat_lon for sub in s for lat_lon in _lat_lons_from_geojson ( sub ) ]
Return a latitude - longitude pairs from nested GeoJSON coordinates .
15,790
def as_html ( self ) : if not self . _folium_map : self . draw ( ) return self . _inline_map ( self . _folium_map , self . _width , self . _height )
Generate HTML to display map .
15,791
def show ( self ) : IPython . display . display ( IPython . display . HTML ( self . as_html ( ) ) )
Publish HTML .
15,792
def copy ( self ) : m = Map ( features = self . _features , width = self . _width , height = self . _height , ** self . _attrs ) m . _folium_map = self . _folium_map return m
Copies the current Map into a new one and returns it .
15,793
def _autozoom ( self ) : bounds = self . _autobounds ( ) attrs = { } midpoint = lambda a , b : ( a + b ) / 2 attrs [ 'location' ] = ( midpoint ( bounds [ 'min_lat' ] , bounds [ 'max_lat' ] ) , midpoint ( bounds [ 'min_lon' ] , bounds [ 'max_lon' ] ) ) import math try : lat_diff = bounds [ 'max_lat' ] - bounds [ 'min_lat' ] lon_diff = bounds [ 'max_lon' ] - bounds [ 'min_lon' ] area , max_area = lat_diff * lon_diff , 180 * 360 if area : factor = 1 + max ( 0 , 1 - self . _width / 1000 ) / 2 + max ( 0 , 1 - area ** 0.5 ) / 2 zoom = math . log ( area / max_area ) / - factor else : zoom = self . _default_zoom zoom = max ( 1 , min ( 18 , round ( zoom ) ) ) attrs [ 'zoom_start' ] = zoom except ValueError as e : raise Exception ( 'Check that your locations are lat-lon pairs' , e ) return attrs
Calculate zoom and location .
15,794
def _autobounds ( self ) : bounds = { } def check ( prop , compare , extreme , val ) : opp = min if compare is max else max bounds . setdefault ( prop , val ) bounds [ prop ] = opp ( compare ( bounds [ prop ] , val ) , extreme ) def bound_check ( lat_lon ) : lat , lon = lat_lon check ( 'max_lat' , max , 90 , lat ) check ( 'min_lat' , min , - 90 , lat ) check ( 'max_lon' , max , 180 , lon ) check ( 'min_lon' , min , - 180 , lon ) lat_lons = [ lat_lon for feature in self . _features . values ( ) for lat_lon in feature . lat_lons ] if not lat_lons : lat_lons . append ( self . _default_lat_lon ) for lat_lon in lat_lons : bound_check ( lat_lon ) return bounds
Simple calculation for bounds .
15,795
def geojson ( self ) : return { "type" : "FeatureCollection" , "features" : [ f . geojson ( i ) for i , f in self . _features . items ( ) ] }
Render features as a FeatureCollection .
15,796
def color ( self , values , ids = ( ) , key_on = 'feature.id' , palette = 'YlOrBr' , ** kwargs ) : id_name , value_name = 'IDs' , 'values' if isinstance ( values , collections . abc . Mapping ) : assert not ids , 'IDs and a map cannot both be used together' if hasattr ( values , 'columns' ) and len ( values . columns ) == 2 : table = values ids , values = table . columns id_name , value_name = table . labels else : dictionary = values ids , values = list ( dictionary . keys ( ) ) , list ( dictionary . values ( ) ) if len ( ids ) != len ( values ) : assert len ( ids ) == 0 ids = list ( range ( len ( values ) ) ) m = self . _create_map ( ) data = pandas . DataFrame ( { id_name : ids , value_name : values } ) attrs = { 'geo_str' : json . dumps ( self . geojson ( ) ) , 'data' : data , 'columns' : [ id_name , value_name ] , 'key_on' : key_on , 'fill_color' : palette , } kwargs . update ( attrs ) m . geo_json ( ** kwargs ) colored = self . format ( ) colored . _folium_map = m return colored
Color map features by binning values .
15,797
def overlay ( self , feature , color = 'Blue' , opacity = 0.6 ) : result = self . copy ( ) if type ( feature ) == Table : if 'feature' in feature : feature = feature [ 'feature' ] else : feature = Circle . map_table ( feature ) if type ( feature ) in [ list , np . ndarray ] : for f in feature : f . _attrs [ 'fill_color' ] = color f . _attrs [ 'fill_opacity' ] = opacity f . draw_on ( result . _folium_map ) elif type ( feature ) == Map : for i in range ( len ( feature . _features ) ) : f = feature . _features [ i ] f . _attrs [ 'fill_color' ] = color f . _attrs [ 'fill_opacity' ] = opacity f . draw_on ( result . _folium_map ) elif type ( feature ) == Region : feature . _attrs [ 'fill_color' ] = color feature . _attrs [ 'fill_opacity' ] = opacity feature . draw_on ( result . _folium_map ) return result
Overlays feature on the map . Returns a new Map .
15,798
def read_geojson ( cls , path_or_json_or_string ) : assert path_or_json_or_string data = None if isinstance ( path_or_json_or_string , ( dict , list ) ) : data = path_or_json_or_string try : data = json . loads ( path_or_json_or_string ) except ValueError : pass try : path = path_or_json_or_string if path . endswith ( '.gz' ) or path . endswith ( '.gzip' ) : import gzip contents = gzip . open ( path , 'r' ) . read ( ) . decode ( 'utf-8' ) else : contents = open ( path , 'r' ) . read ( ) data = json . loads ( contents ) except FileNotFoundError : pass assert data , 'MapData accepts a valid geoJSON object, geoJSON string, or path to a geoJSON file' return cls ( cls . _read_geojson_features ( data ) )
Read a geoJSON string object or file . Return a dict of features keyed by ID .
15,799
def _read_geojson_features ( data , features = None , prefix = "" ) : if features is None : features = collections . OrderedDict ( ) for i , feature in enumerate ( data [ 'features' ] ) : key = feature . get ( 'id' , prefix + str ( i ) ) feature_type = feature [ 'geometry' ] [ 'type' ] if feature_type == 'FeatureCollection' : _read_geojson_features ( feature , features , prefix + '.' + key ) elif feature_type == 'Point' : value = Circle . _convert_point ( feature ) elif feature_type in [ 'Polygon' , 'MultiPolygon' ] : value = Region ( feature ) else : value = None features [ key ] = value return features
Return a dict of features keyed by ID .