idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
22,800
def register_multi_flags_validator ( flag_names , multi_flags_checker , message = 'Flags validation failed' , flag_values = FLAGS ) : v = gflags_validators . MultiFlagsValidator ( flag_names , multi_flags_checker , message ) _add_validator ( flag_values , v )
Adds a constraint to multiple flags .
76
7
22,801
def multi_flags_validator ( flag_names , message = 'Flag validation failed' , flag_values = FLAGS ) : def decorate ( function ) : register_multi_flags_validator ( flag_names , function , message = message , flag_values = flag_values ) return function return decorate
A function decorator for defining a multi - flag validator .
68
13
22,802
def mark_flag_as_required ( flag_name , flag_values = FLAGS ) : if flag_values [ flag_name ] . default is not None : # TODO(vrusinov): Turn this warning into an exception. warnings . warn ( 'Flag %s has a non-None default value; therefore, ' 'mark_flag_as_required will pass even if flag is not specified in the ' 'command line!' % flag_name ) register_validator ( flag_name , lambda value : value is not None , message = 'Flag --%s must be specified.' % flag_name , flag_values = flag_values )
Ensures that flag is not None during program execution .
141
12
22,803
def mark_flags_as_mutual_exclusive ( flag_names , required = False , flag_values = FLAGS ) : def validate_mutual_exclusion ( flags_dict ) : flag_count = sum ( 1 for val in flags_dict . values ( ) if val is not None ) if flag_count == 1 or ( not required and flag_count == 0 ) : return True message = ( '%s one of (%s) must be specified.' % ( 'Exactly' if required else 'At most' , ', ' . join ( flag_names ) ) ) raise ValidationError ( message ) register_multi_flags_validator ( flag_names , validate_mutual_exclusion , flag_values = flag_values )
Ensures that only one flag among flag_names is set .
161
14
22,804
def DEFINE_enum ( # pylint: disable=g-bad-name,redefined-builtin name , default , enum_values , help , flag_values = FLAGS , module_name = None , * * args ) : DEFINE_flag ( EnumFlag ( name , default , help , enum_values , * * args ) , flag_values , module_name )
Registers a flag whose value can be any string from enum_values .
85
15
22,805
def DEFINE_list ( # pylint: disable=g-bad-name,redefined-builtin name , default , help , flag_values = FLAGS , * * args ) : parser = ListParser ( ) serializer = CsvListSerializer ( ',' ) DEFINE ( parser , name , default , help , flag_values , serializer , * * args )
Registers a flag whose value is a comma - separated list of strings .
83
15
22,806
def DEFINE_multi ( # pylint: disable=g-bad-name,redefined-builtin parser , serializer , name , default , help , flag_values = FLAGS , module_name = None , * * args ) : DEFINE_flag ( MultiFlag ( parser , serializer , name , default , help , * * args ) , flag_values , module_name )
Registers a generic MultiFlag that parses its args with a given parser .
86
16
22,807
def DEFINE_multi_float ( # pylint: disable=g-bad-name,redefined-builtin name , default , help , lower_bound = None , upper_bound = None , flag_values = FLAGS , * * args ) : parser = FloatParser ( lower_bound , upper_bound ) serializer = ArgumentSerializer ( ) DEFINE_multi ( parser , serializer , name , default , help , flag_values , * * args )
Registers a flag whose value can be a list of arbitrary floats .
102
14
22,808
def DEFINE_alias ( name , original_name , flag_values = FLAGS , module_name = None ) : # pylint: disable=g-bad-name if original_name not in flag_values : raise UnrecognizedFlagError ( original_name ) flag = flag_values [ original_name ] class _Parser ( ArgumentParser ) : """The parser for the alias flag calls the original flag parser.""" def parse ( self , argument ) : flag . parse ( argument ) return flag . value class _FlagAlias ( Flag ) : """Overrides Flag class so alias value is copy of original flag value.""" @ property def value ( self ) : return flag . value @ value . setter def value ( self , value ) : flag . value = value help_msg = 'Alias for --%s.' % flag . name # If alias_name has been used, gflags.DuplicatedFlag will be raised. DEFINE_flag ( _FlagAlias ( _Parser ( ) , flag . serializer , name , flag . default , help_msg , boolean = flag . boolean ) , flag_values , module_name )
Defines an alias flag for an existing one .
244
10
22,809
def from_flag ( cls , flagname , flag_values , other_flag_values = None ) : first_module = flag_values . FindModuleDefiningFlag ( flagname , default = '<unknown>' ) if other_flag_values is None : second_module = _helpers . GetCallingModule ( ) else : second_module = other_flag_values . FindModuleDefiningFlag ( flagname , default = '<unknown>' ) flag_summary = flag_values [ flagname ] . help msg = ( "The flag '%s' is defined twice. First from %s, Second from %s. " "Description from first occurrence: %s" ) % ( flagname , first_module , second_module , flag_summary ) return cls ( msg )
Create a DuplicateFlagError by providing flag name and values .
171
13
22,810
def convert ( self , argument ) : if isinstance ( argument , str ) : if argument . lower ( ) in [ 'true' , 't' , '1' ] : return True elif argument . lower ( ) in [ 'false' , 'f' , '0' ] : return False bool_argument = bool ( argument ) if argument == bool_argument : # The argument is a valid boolean (True, False, 0, or 1), and not just # something that always converts to bool (list, string, int, etc.). return bool_argument raise ValueError ( 'Non-boolean argument to boolean flag' , argument )
Converts the argument to a boolean ; raise ValueError on errors .
136
14
22,811
def parse ( self , argument ) : if not self . enum_values : return argument elif self . case_sensitive : if argument not in self . enum_values : raise ValueError ( 'value should be one of <%s>' % '|' . join ( self . enum_values ) ) else : return argument else : if argument . upper ( ) not in [ value . upper ( ) for value in self . enum_values ] : raise ValueError ( 'value should be one of <%s>' % '|' . join ( self . enum_values ) ) else : return [ value for value in self . enum_values if value . upper ( ) == argument . upper ( ) ] [ 0 ]
Determine validity of argument and return the correct element of enum .
153
14
22,812
def serialize ( self , value ) : if six . PY2 : # In Python2 csv.writer doesn't accept unicode, so we convert to UTF-8. output = io . BytesIO ( ) csv . writer ( output ) . writerow ( [ unicode ( x ) . encode ( 'utf-8' ) for x in value ] ) serialized_value = output . getvalue ( ) . decode ( 'utf-8' ) . strip ( ) else : # In Python3 csv.writer expects a text stream. output = io . StringIO ( ) csv . writer ( output ) . writerow ( [ str ( x ) for x in value ] ) serialized_value = output . getvalue ( ) . strip ( ) # We need the returned value to be pure ascii or Unicodes so that # when the xml help is generated they are usefully encodable. return _helpers . StrOrUnicode ( serialized_value )
Serialize a list as a string if possible or as a unicode string .
211
16
22,813
def record ( self ) : while True : frames = [ ] self . stream . start_stream ( ) for i in range ( self . num_frames ) : data = self . stream . read ( self . config . FRAMES_PER_BUFFER ) frames . append ( data ) self . output . seek ( 0 ) w = wave . open ( self . output , 'wb' ) w . setnchannels ( self . config . CHANNELS ) w . setsampwidth ( self . audio . get_sample_size ( self . config . FORMAT ) ) w . setframerate ( self . config . RATE ) w . writeframes ( b'' . join ( frames ) ) w . close ( ) yield
Record PyAudio stream into StringIO output
153
8
22,814
def stop ( self ) : self . prestop ( ) if not self . _graceful : self . _graceful = True self . stream . stop_stream ( ) self . audio . terminate ( ) msg = 'Stopped' self . verbose_info ( msg , log = False ) # Log 'Stopped' anyway if self . log : self . logging . info ( msg ) if self . collect : if self . _data : print ( 'Collected result:' ) print ( ' min: %10d' % self . _data [ 'min' ] ) print ( ' max: %10d' % self . _data [ 'max' ] ) print ( ' avg: %10d' % int ( self . _data [ 'avg' ] ) ) self . poststop ( )
Stop the stream and terminate PyAudio
173
7
22,815
def get_threshold ( self ) : if self . threshold . startswith ( '+' ) : if self . threshold [ 1 : ] . isdigit ( ) : self . _threshold = int ( self . threshold [ 1 : ] ) self . _upper = True elif self . threshold . startswith ( '-' ) : if self . threshold [ 1 : ] . isdigit ( ) : self . _threshold = int ( self . threshold [ 1 : ] ) self . _upper = False else : if self . threshold . isdigit ( ) : self . _threshold = int ( self . threshold ) self . _upper = True if not hasattr ( self , '_threshold' ) : raise ValueError ( 'Invalid threshold' )
Get and validate raw RMS value from threshold
162
9
22,816
def collect_rms ( self , rms ) : if self . _data : self . _data [ 'min' ] = min ( rms , self . _data [ 'min' ] ) self . _data [ 'max' ] = max ( rms , self . _data [ 'max' ] ) self . _data [ 'avg' ] = float ( rms + self . _data [ 'avg' ] ) / 2 else : self . _data [ 'min' ] = rms self . _data [ 'max' ] = rms self . _data [ 'avg' ] = rms
Collect and calculate min max and average RMS values
136
10
22,817
def from_timedelta ( cls , timedelta ) : from math import ceil units = ceil ( timedelta . total_seconds ( ) / cls . time_unit ) return cls . create ( units )
expects a datetime . timedelta object
48
9
22,818
def b58decode_check ( v : str ) -> bytes : result = b58decode ( v ) result , check = result [ : - 4 ] , result [ - 4 : ] digest = sha256 ( sha256 ( result ) . digest ( ) ) . digest ( ) if check != digest [ : 4 ] : raise ValueError ( "Invalid checksum" ) return result
Decode and verify the checksum of a Base58 encoded string
83
13
22,819
def bech32_decode ( bech ) : if ( ( any ( ord ( x ) < 33 or ord ( x ) > 126 for x in bech ) ) or ( bech . lower ( ) != bech and bech . upper ( ) != bech ) ) : return None , None bech = bech . lower ( ) pos = bech . rfind ( '1' ) if pos < 1 or pos + 7 > len ( bech ) or len ( bech ) > 90 : return None , None if not all ( x in CHARSET for x in bech [ pos + 1 : ] ) : return None , None hrp = bech [ : pos ] data = [ CHARSET . find ( x ) for x in bech [ pos + 1 : ] ] if not bech32_verify_checksum ( hrp , data ) : return None , None return hrp , data [ : - 6 ]
Validate a Bech32 string and determine HRP and data .
203
14
22,820
def process_request ( self , request ) : if django . VERSION < ( 1 , 10 ) : is_authenticated = request . user . is_authenticated ( ) else : is_authenticated = request . user . is_authenticated if not is_authenticated : return now = datetime . now ( ) if '_session_security' not in request . session : set_last_activity ( request . session , now ) return delta = now - get_last_activity ( request . session ) expire_seconds = self . get_expire_seconds ( request ) if delta >= timedelta ( seconds = expire_seconds ) : logout ( request ) elif ( request . path == reverse ( 'session_security_ping' ) and 'idleFor' in request . GET ) : self . update_last_activity ( request , now ) elif not self . is_passive_request ( request ) : set_last_activity ( request . session , now )
Update last activity time or logout .
210
8
22,821
def get_last_activity ( session ) : try : return datetime . strptime ( session [ '_session_security' ] , '%Y-%m-%dT%H:%M:%S.%f' ) except AttributeError : ################################################################# # * this is an odd bug in python # bug report: http://bugs.python.org/issue7980 # bug explained here: # http://code-trick.com/python-bug-attribute-error-_strptime/ # * sometimes, in multithreaded enviroments, we get AttributeError # in this case, we just return datetime.now(), # so that we are not logged out # "./session_security/middleware.py", in update_last_activity # last_activity = get_last_activity(request.session) # "./session_security/utils.py", in get_last_activity # '%Y-%m-%dT%H:%M:%S.%f') # AttributeError: _strptime # ################################################################# return datetime . now ( ) except TypeError : return datetime . now ( )
Get the last activity datetime string from the session and return the python datetime object .
257
18
22,822
def name ( self ) : return self . inquire ( name = True , lifetime = False , usage = False , mechs = False ) . name
Get the name associated with these credentials
30
7
22,823
def acquire ( cls , name = None , lifetime = None , mechs = None , usage = 'both' , store = None ) : if store is None : res = rcreds . acquire_cred ( name , lifetime , mechs , usage ) else : if rcred_cred_store is None : raise NotImplementedError ( "Your GSSAPI implementation does " "not have support for manipulating " "credential stores" ) store = _encode_dict ( store ) res = rcred_cred_store . acquire_cred_from ( store , name , lifetime , mechs , usage ) return tuples . AcquireCredResult ( cls ( base = res . creds ) , res . mechs , res . lifetime )
Acquire GSSAPI credentials
165
6
22,824
def store ( self , store = None , usage = 'both' , mech = None , overwrite = False , set_default = False ) : if store is None : if rcred_rfc5588 is None : raise NotImplementedError ( "Your GSSAPI implementation does " "not have support for RFC 5588" ) return rcred_rfc5588 . store_cred ( self , usage , mech , overwrite , set_default ) else : if rcred_cred_store is None : raise NotImplementedError ( "Your GSSAPI implementation does " "not have support for manipulating " "credential stores directly" ) store = _encode_dict ( store ) return rcred_cred_store . store_cred_into ( store , self , usage , mech , overwrite , set_default )
Store these credentials into the given store
180
7
22,825
def impersonate ( self , name = None , lifetime = None , mechs = None , usage = 'initiate' ) : if rcred_s4u is None : raise NotImplementedError ( "Your GSSAPI implementation does not " "have support for S4U" ) res = rcred_s4u . acquire_cred_impersonate_name ( self , name , lifetime , mechs , usage ) return type ( self ) ( base = res . creds )
Impersonate a name using the current credentials
107
9
22,826
def inquire ( self , name = True , lifetime = True , usage = True , mechs = True ) : res = rcreds . inquire_cred ( self , name , lifetime , usage , mechs ) if res . name is not None : res_name = names . Name ( res . name ) else : res_name = None return tuples . InquireCredResult ( res_name , res . lifetime , res . usage , res . mechs )
Inspect these credentials for information
99
6
22,827
def inquire_by_mech ( self , mech , name = True , init_lifetime = True , accept_lifetime = True , usage = True ) : res = rcreds . inquire_cred_by_mech ( self , mech , name , init_lifetime , accept_lifetime , usage ) if res . name is not None : res_name = names . Name ( res . name ) else : res_name = None return tuples . InquireCredByMechResult ( res_name , res . init_lifetime , res . accept_lifetime , res . usage )
Inspect these credentials for per - mechanism information
130
9
22,828
def add ( self , name , mech , usage = 'both' , init_lifetime = None , accept_lifetime = None , impersonator = None , store = None ) : if store is not None and impersonator is not None : raise ValueError ( 'You cannot use both the `impersonator` and ' '`store` arguments at the same time' ) if store is not None : if rcred_cred_store is None : raise NotImplementedError ( "Your GSSAPI implementation does " "not have support for manipulating " "credential stores" ) store = _encode_dict ( store ) res = rcred_cred_store . add_cred_from ( store , self , name , mech , usage , init_lifetime , accept_lifetime ) elif impersonator is not None : if rcred_s4u is None : raise NotImplementedError ( "Your GSSAPI implementation does " "not have support for S4U" ) res = rcred_s4u . add_cred_impersonate_name ( self , impersonator , name , mech , usage , init_lifetime , accept_lifetime ) else : res = rcreds . add_cred ( self , name , mech , usage , init_lifetime , accept_lifetime ) return Credentials ( res . creds )
Acquire more credentials to add to the current set
297
10
22,829
def display_as ( self , name_type ) : if rname_rfc6680 is None : raise NotImplementedError ( "Your GSSAPI implementation does not " "support RFC 6680 (the GSSAPI naming " "extensions)" ) return rname_rfc6680 . display_name_ext ( self , name_type ) . decode ( _utils . _get_encoding ( ) )
Display this name as the given name type .
91
9
22,830
def export ( self , composite = False ) : if composite : if rname_rfc6680 is None : raise NotImplementedError ( "Your GSSAPI implementation does " "not support RFC 6680 (the GSSAPI " "naming extensions)" ) return rname_rfc6680 . export_name_composite ( self ) else : return rname . export_name ( self )
Export this name as a token .
88
7
22,831
def _inquire ( self , * * kwargs ) : if rname_rfc6680 is None : raise NotImplementedError ( "Your GSSAPI implementation does not " "support RFC 6680 (the GSSAPI naming " "extensions)" ) if not kwargs : default_val = True else : default_val = False attrs = kwargs . get ( 'attrs' , default_val ) mech_name = kwargs . get ( 'mech_name' , default_val ) return rname_rfc6680 . inquire_name ( self , mech_name = mech_name , attrs = attrs )
Inspect this name for information .
144
7
22,832
def from_sasl_name ( cls , name = None ) : if rfc5801 is None : raise NotImplementedError ( "Your GSSAPI implementation does not " "have support for RFC 5801" ) if isinstance ( name , six . text_type ) : name = name . encode ( _utils . _get_encoding ( ) ) m = rfc5801 . inquire_mech_for_saslname ( name ) return cls ( m )
Create a Mechanism from its SASL name
107
9
22,833
def import_gssapi_extension ( name ) : try : path = 'gssapi.raw.ext_{0}' . format ( name ) __import__ ( path ) return sys . modules [ path ] except ImportError : return None
Import a GSSAPI extension module
53
7
22,834
def inquire_property ( name , doc = None ) : def inquire_property ( self ) : if not self . _started : msg = ( "Cannot read {0} from a security context whose " "establishment has not yet been started." ) raise AttributeError ( msg ) return getattr ( self . _inquire ( * * { name : True } ) , name ) return property ( inquire_property , doc = doc )
Creates a property based on an inquire result
91
9
22,835
def _encode_dict ( d ) : def enc ( x ) : if isinstance ( x , six . text_type ) : return x . encode ( _ENCODING ) else : return x return dict ( ( enc ( k ) , enc ( v ) ) for k , v in six . iteritems ( d ) )
Encodes any relevant strings in a dict
70
8
22,836
def catch_and_return_token ( func , self , * args , * * kwargs ) : try : return func ( self , * args , * * kwargs ) except GSSError as e : if e . token is not None and self . __DEFER_STEP_ERRORS__ : self . _last_err = e # skip the "return func" line above in the traceback if six . PY2 : self . _last_tb = sys . exc_info ( ) [ 2 ] . tb_next . tb_next else : self . _last_err . __traceback__ = e . __traceback__ . tb_next return e . token else : raise
Optionally defer exceptions and return a token instead
155
9
22,837
def check_last_err ( func , self , * args , * * kwargs ) : if self . _last_err is not None : try : if six . PY2 : six . reraise ( type ( self . _last_err ) , self . _last_err , self . _last_tb ) else : # NB(directxman12): not using six.reraise in Python 3 leads # to cleaner tracebacks, and raise x is valid # syntax in Python 3 (unlike raise x, y, z) raise self . _last_err finally : if six . PY2 : del self . _last_tb # in case of cycles, break glass self . _last_err = None else : return func ( self , * args , * * kwargs ) @ deco . decorator def check_last_err ( func , self , * args , * * kwargs ) : if self . _last_err is not None : try : raise self . _last_err finally : self . _last_err = None else : return func ( self , * args , * * kwargs )
Check and raise deferred errors before running the function
246
9
22,838
def velocity_graph ( adata , basis = None , vkey = 'velocity' , which_graph = 'velocity' , n_neighbors = 10 , alpha = .8 , perc = 90 , edge_width = .2 , edge_color = 'grey' , color = None , use_raw = None , layer = None , color_map = None , colorbar = True , palette = None , size = None , sort_order = True , groups = None , components = None , projection = '2d' , legend_loc = 'on data' , legend_fontsize = None , legend_fontweight = None , right_margin = None , left_margin = None , xlabel = None , ylabel = None , title = None , fontsize = None , figsize = None , dpi = None , frameon = None , show = True , save = None , ax = None ) : basis = default_basis ( adata ) if basis is None else basis title = which_graph + ' graph' if title is None else title scatter_kwargs = { "basis" : basis , "perc" : perc , "use_raw" : use_raw , "sort_order" : sort_order , "alpha" : alpha , "components" : components , "projection" : projection , "legend_loc" : legend_loc , "groups" : groups , "legend_fontsize" : legend_fontsize , "legend_fontweight" : legend_fontweight , "palette" : palette , "color_map" : color_map , "frameon" : frameon , "title" : title , "xlabel" : xlabel , "ylabel" : ylabel , "right_margin" : right_margin , "left_margin" : left_margin , "colorbar" : colorbar , "dpi" : dpi , "fontsize" : fontsize , "show" : False , "save" : None , "figsize" : figsize , } ax = scatter ( adata , layer = layer , color = color , size = size , ax = ax , zorder = 0 , * * scatter_kwargs ) from networkx import Graph , draw_networkx_edges if which_graph == 'neighbors' : T = adata . uns [ 'neighbors' ] [ 'connectivities' ] if perc is not None : threshold = np . percentile ( T . data , perc ) T . data [ T . data < threshold ] = 0 T . eliminate_zeros ( ) else : T = transition_matrix ( adata , vkey = vkey , weight_indirect_neighbors = 0 , n_neighbors = n_neighbors , perc = perc ) with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" ) edges = draw_networkx_edges ( Graph ( T ) , adata . obsm [ 'X_' + basis ] , width = edge_width , edge_color = edge_color , ax = ax ) edges . set_zorder ( - 2 ) edges . set_rasterized ( settings . _vector_friendly ) savefig_or_show ( '' if basis is None else basis , dpi = dpi , save = save , show = show ) if not show : return ax
\ Plot of the velocity graph .
736
7
22,839
def cleanup ( data , clean = 'layers' , keep = None , copy = False ) : adata = data . copy ( ) if copy else data keep = list ( [ keep ] ) if isinstance ( keep , str ) else list ( ) if keep is None else list ( keep ) keep . extend ( [ 'spliced' , 'unspliced' , 'Ms' , 'Mu' , 'clusters' , 'neighbors' ] ) ann_dict = { 'obs' : adata . obs_keys ( ) , 'var' : adata . var_keys ( ) , 'uns' : adata . uns_keys ( ) , 'layers' : list ( adata . layers . keys ( ) ) } if 'all' not in clean : ann_dict = { ann : values for ( ann , values ) in ann_dict . items ( ) if ann in clean } for ( ann , values ) in ann_dict . items ( ) : for value in values : if value not in keep : del ( getattr ( adata , ann ) [ value ] ) return adata if copy else None
Deletes attributes not needed .
241
6
22,840
def filter_genes_dispersion ( data , flavor = 'seurat' , min_disp = None , max_disp = None , min_mean = None , max_mean = None , n_bins = 20 , n_top_genes = None , log = True , copy = False ) : adata = data . copy ( ) if copy else data set_initial_size ( adata ) if n_top_genes is not None and adata . n_vars < n_top_genes : logg . info ( 'Skip filtering by dispersion since number of variables are less than `n_top_genes`' ) else : if flavor is 'svr' : mu = adata . X . mean ( 0 ) . A1 if issparse ( adata . X ) else adata . X . mean ( 0 ) sigma = np . sqrt ( adata . X . multiply ( adata . X ) . mean ( 0 ) . A1 - mu ** 2 ) if issparse ( adata . X ) else adata . X . std ( 0 ) log_mu = np . log2 ( mu ) log_cv = np . log2 ( sigma / mu ) from sklearn . svm import SVR clf = SVR ( gamma = 150. / len ( mu ) ) clf . fit ( log_mu [ : , None ] , log_cv ) score = log_cv - clf . predict ( log_mu [ : , None ] ) nth_score = np . sort ( score ) [ : : - 1 ] [ n_top_genes ] adata . _inplace_subset_var ( score >= nth_score ) else : from scanpy . api . pp import filter_genes_dispersion filter_genes_dispersion ( adata , flavor = flavor , min_disp = min_disp , max_disp = max_disp , min_mean = min_mean , max_mean = max_mean , n_bins = n_bins , n_top_genes = n_top_genes , log = log ) return adata if copy else None
Extract highly variable genes . The normalized dispersion is obtained by scaling with the mean and standard deviation of the dispersions for genes falling into a given bin for mean expression of genes . This means that for each bin of mean expression highly variable genes are selected .
477
52
22,841
def normalize_per_cell ( data , counts_per_cell_after = None , counts_per_cell = None , key_n_counts = None , max_proportion_per_cell = None , use_initial_size = True , layers = [ 'spliced' , 'unspliced' ] , enforce = False , copy = False ) : adata = data . copy ( ) if copy else data layers = adata . layers . keys ( ) if layers is 'all' else [ layers ] if isinstance ( layers , str ) else [ layer for layer in layers if layer in adata . layers . keys ( ) ] layers = [ 'X' ] + layers modified_layers = [ ] for layer in layers : X = adata . X if layer is 'X' else adata . layers [ layer ] if not_yet_normalized ( X ) or enforce : counts = counts_per_cell if counts_per_cell is not None else get_initial_size ( adata , layer ) if use_initial_size else get_size ( adata , layer ) if max_proportion_per_cell is not None and ( 0 < max_proportion_per_cell < 1 ) : counts = counts_per_cell_quantile ( X , max_proportion_per_cell , counts ) # equivalent to scanpy.pp.normalize_per_cell(X, counts_per_cell_after, counts) counts_after = np . median ( counts ) if counts_per_cell_after is None else counts_per_cell_after counts /= counts_after + ( counts_after == 0 ) counts += counts == 0 # to avoid division by zero if issparse ( X ) : sparsefuncs . inplace_row_scale ( X , 1 / counts ) else : X /= np . array ( counts [ : , None ] ) modified_layers . append ( layer ) adata . obs [ 'n_counts' if key_n_counts is None else key_n_counts ] = get_size ( adata ) if len ( modified_layers ) > 0 : logg . info ( 'Normalized count data:' , ', ' . join ( modified_layers ) + '.' ) return adata if copy else None
Normalize each cell by total counts over all genes .
498
11
22,842
def filter_and_normalize ( data , min_counts = None , min_counts_u = None , min_cells = None , min_cells_u = None , min_shared_counts = None , min_shared_cells = None , n_top_genes = None , flavor = 'seurat' , log = True , copy = False ) : adata = data . copy ( ) if copy else data if 'spliced' not in adata . layers . keys ( ) or 'unspliced' not in adata . layers . keys ( ) : raise ValueError ( 'Could not find spliced / unspliced counts.' ) filter_genes ( adata , min_counts = min_counts , min_counts_u = min_counts_u , min_cells = min_cells , min_cells_u = min_cells_u , min_shared_counts = min_shared_counts , min_shared_cells = min_shared_cells , ) normalize_per_cell ( adata ) if n_top_genes is not None : filter_genes_dispersion ( adata , n_top_genes = n_top_genes , flavor = flavor ) log_advised = np . allclose ( adata . X [ : 10 ] . sum ( ) , adata . layers [ 'spliced' ] [ : 10 ] . sum ( ) ) if log and log_advised : log1p ( adata ) logg . info ( 'Logarithmized X.' if log and log_advised else 'Did not modify X as it looks preprocessed already.' if log else 'Consider logarithmizing X with `scv.pp.log1p` for better results.' if log_advised else '' ) return adata if copy else None
Filtering normalization and log transform
405
7
22,843
def toy_data ( n_obs ) : """Random samples from Dentate Gyrus. """ adata = dentategyrus ( ) indices = np . random . choice ( adata . n_obs , n_obs ) adata = adata [ indices ] adata . obs_names = np . array ( range ( adata . n_obs ) , dtype = 'str' ) adata . var_names_make_unique ( ) return adata
Randomly samples from the Dentate Gyrus dataset .
98
11
22,844
def forebrain ( ) : filename = 'data/ForebrainGlut/hgForebrainGlut.loom' url = 'http://pklab.med.harvard.edu/velocyto/hgForebrainGlut/hgForebrainGlut.loom' adata = read ( filename , backup_url = url , cleanup = True , sparse = True , cache = True ) adata . var_names_make_unique ( ) return adata
Developing human forebrain . Forebrain tissue of a week 10 embryo focusing on the glutamatergic neuronal lineage .
103
25
22,845
def set_rcParams_scvelo ( fontsize = 8 , color_map = None , frameon = None ) : # dpi options (mpl default: 100, 100) rcParams [ 'figure.dpi' ] = 100 rcParams [ 'savefig.dpi' ] = 150 # figure (mpl default: 0.125, 0.96, 0.15, 0.91) rcParams [ 'figure.figsize' ] = ( 7 , 5 ) rcParams [ 'figure.subplot.left' ] = 0.18 rcParams [ 'figure.subplot.right' ] = 0.96 rcParams [ 'figure.subplot.bottom' ] = 0.15 rcParams [ 'figure.subplot.top' ] = 0.91 # lines (defaults: 1.5, 6, 1) rcParams [ 'lines.linewidth' ] = 1.5 # the line width of the frame rcParams [ 'lines.markersize' ] = 6 rcParams [ 'lines.markeredgewidth' ] = 1 # font rcParams [ 'font.sans-serif' ] = [ 'Arial' , 'Helvetica' , 'DejaVu Sans' , 'Bitstream Vera Sans' , 'sans-serif' ] fontsize = fontsize labelsize = 0.92 * fontsize # fonsizes (mpl default: 10, medium, large, medium) rcParams [ 'font.size' ] = fontsize rcParams [ 'legend.fontsize' ] = labelsize rcParams [ 'axes.titlesize' ] = fontsize rcParams [ 'axes.labelsize' ] = labelsize # legend (mpl default: 1, 1, 2, 0.8) rcParams [ 'legend.numpoints' ] = 1 rcParams [ 'legend.scatterpoints' ] = 1 rcParams [ 'legend.handlelength' ] = 0.5 rcParams [ 'legend.handletextpad' ] = 0.4 # color cycle rcParams [ 'axes.prop_cycle' ] = cycler ( color = vega_10 ) # axes rcParams [ 'axes.linewidth' ] = 0.8 rcParams [ 'axes.edgecolor' ] = 'black' rcParams [ 'axes.facecolor' ] = 'white' # ticks (mpl default: k, k, medium, medium) rcParams [ 'xtick.color' ] = 'k' rcParams [ 'ytick.color' ] = 'k' rcParams [ 'xtick.labelsize' ] = labelsize rcParams [ 'ytick.labelsize' ] = labelsize # axes grid (mpl default: False, #b0b0b0) rcParams [ 'axes.grid' ] = False rcParams [ 'grid.color' ] = '.8' # color map rcParams [ 'image.cmap' ] = 'RdBu_r' if color_map is None else color_map # frame (mpl default: True) frameon = False if frameon is None else frameon global _frameon _frameon = frameon
Set matplotlib . rcParams to scvelo defaults .
729
14
22,846
def merge ( adata , ldata , copy = True ) : if 'spliced' in ldata . layers . keys ( ) and 'initial_size_spliced' not in ldata . obs . keys ( ) : set_initial_size ( ldata ) elif 'spliced' in adata . layers . keys ( ) and 'initial_size_spliced' not in adata . obs . keys ( ) : set_initial_size ( adata ) common_obs = adata . obs_names . intersection ( ldata . obs_names ) common_vars = adata . var_names . intersection ( ldata . var_names ) if len ( common_obs ) == 0 : clean_obs_names ( adata ) clean_obs_names ( ldata ) common_obs = adata . obs_names . intersection ( ldata . obs_names ) if copy : _adata = adata [ common_obs ] . copy ( ) if adata . shape [ 1 ] >= ldata . shape [ 1 ] else ldata [ common_obs ] . copy ( ) _ldata = ldata [ common_obs ] . copy ( ) if adata . shape [ 1 ] >= ldata . shape [ 1 ] else adata [ common_obs ] . copy ( ) else : adata . _inplace_subset_obs ( common_obs ) _adata , _ldata = adata , ldata [ common_obs ] same_vars = ( len ( _adata . var_names ) == len ( _ldata . var_names ) and np . all ( _adata . var_names == _ldata . var_names ) ) if len ( common_vars ) > 0 and not same_vars : _adata . _inplace_subset_var ( common_vars ) _ldata . _inplace_subset_var ( common_vars ) for attr in _ldata . obs . keys ( ) : _adata . obs [ attr ] = _ldata . obs [ attr ] for attr in _ldata . obsm . keys ( ) : _adata . obsm [ attr ] = _ldata . obsm [ attr ] for attr in _ldata . uns . keys ( ) : _adata . uns [ attr ] = _ldata . uns [ attr ] for attr in _ldata . layers . keys ( ) : _adata . layers [ attr ] = _ldata . layers [ attr ] if _adata . shape [ 1 ] == _ldata . shape [ 1 ] : same_vars = ( len ( _adata . var_names ) == len ( _ldata . var_names ) and np . all ( _adata . var_names == _ldata . var_names ) ) if same_vars : for attr in _ldata . var . keys ( ) : _adata . var [ attr ] = _ldata . var [ attr ] for attr in _ldata . varm . keys ( ) : _adata . varm [ attr ] = _ldata . varm [ attr ] else : raise ValueError ( 'Variable names are not identical.' ) return _adata if copy else None
Merges two annotated data matrices .
702
9
22,847
def velocity_graph ( data , vkey = 'velocity' , xkey = 'Ms' , tkey = None , basis = None , n_neighbors = None , n_recurse_neighbors = None , random_neighbors_at_max = None , sqrt_transform = False , approx = False , copy = False ) : adata = data . copy ( ) if copy else data if vkey not in adata . layers . keys ( ) : velocity ( adata , vkey = vkey ) vgraph = VelocityGraph ( adata , vkey = vkey , xkey = xkey , tkey = tkey , basis = basis , n_neighbors = n_neighbors , approx = approx , n_recurse_neighbors = n_recurse_neighbors , random_neighbors_at_max = random_neighbors_at_max , sqrt_transform = sqrt_transform , report = True ) logg . info ( 'computing velocity graph' , r = True ) vgraph . compute_cosines ( ) adata . uns [ vkey + '_graph' ] = vgraph . graph adata . uns [ vkey + '_graph_neg' ] = vgraph . graph_neg logg . info ( ' finished' , time = True , end = ' ' if settings . verbosity > 2 else '\n' ) logg . hint ( 'added \n' ' \'' + vkey + '_graph\', sparse matrix with cosine correlations (adata.uns)' ) return adata if copy else None
Computes velocity graph based on cosine similarities .
350
10
22,848
def optimize_NxN ( x , y , fit_offset = False , perc = None ) : if perc is not None : if not fit_offset and isinstance ( perc , ( list , tuple ) ) : perc = perc [ 1 ] weights = get_weight ( x , y , perc ) . astype ( bool ) if issparse ( weights ) : weights = weights . A else : weights = None x , y = x . astype ( np . float64 ) , y . astype ( np . float64 ) n_vars = x . shape [ 1 ] offset , gamma = np . zeros ( n_vars ) , np . zeros ( n_vars ) for i in range ( n_vars ) : xi = x [ : , i ] if weights is None else x [ : , i ] [ weights [ : , i ] ] yi = y [ : , i ] if weights is None else y [ : , i ] [ weights [ : , i ] ] if fit_offset : offset [ i ] , gamma [ i ] = minimize ( lambda m : np . sum ( ( - yi + xi * m [ 1 ] + m [ 0 ] ) ** 2 ) , method = "L-BFGS-B" , x0 = ( 0 , 0.1 ) , bounds = [ ( 0 , None ) , ( None , None ) ] ) . x else : gamma [ i ] = minimize ( lambda m : np . sum ( ( - yi + xi * m ) ** 2 ) , x0 = 0.1 , method = "L-BFGS-B" ) . x offset [ np . isnan ( offset ) ] , gamma [ np . isnan ( gamma ) ] = 0 , 0 return offset , gamma
Just to compare with closed - form solution
386
8
22,849
def velocity_confidence ( data , vkey = 'velocity' , copy = False ) : adata = data . copy ( ) if copy else data if vkey not in adata . layers . keys ( ) : raise ValueError ( 'You need to run `tl.velocity` first.' ) idx = np . array ( adata . var [ vkey + '_genes' ] . values , dtype = bool ) X , V = adata . layers [ 'Ms' ] [ : , idx ] . copy ( ) , adata . layers [ vkey ] [ : , idx ] . copy ( ) indices = get_indices ( dist = adata . uns [ 'neighbors' ] [ 'distances' ] ) [ 0 ] V -= V . mean ( 1 ) [ : , None ] V_norm = norm ( V ) R = np . zeros ( adata . n_obs ) for i in range ( adata . n_obs ) : Vi_neighs = V [ indices [ i ] ] Vi_neighs -= Vi_neighs . mean ( 1 ) [ : , None ] R [ i ] = np . mean ( np . einsum ( 'ij, j' , Vi_neighs , V [ i ] ) / ( norm ( Vi_neighs ) * V_norm [ i ] ) [ None , : ] ) adata . obs [ vkey + '_length' ] = V_norm . round ( 2 ) adata . obs [ vkey + '_confidence' ] = R logg . hint ( 'added \'' + vkey + '_confidence\' (adata.obs)' ) if vkey + '_confidence_transition' not in adata . obs . keys ( ) : velocity_confidence_transition ( adata , vkey ) return adata if copy else None
Computes confidences of velocities .
405
9
22,850
def velocity_confidence_transition ( data , vkey = 'velocity' , scale = 10 , copy = False ) : adata = data . copy ( ) if copy else data if vkey not in adata . layers . keys ( ) : raise ValueError ( 'You need to run `tl.velocity` first.' ) idx = np . array ( adata . var [ vkey + '_genes' ] . values , dtype = bool ) T = transition_matrix ( adata , vkey = vkey , scale = scale ) dX = T . dot ( adata . layers [ 'Ms' ] [ : , idx ] ) - adata . layers [ 'Ms' ] [ : , idx ] dX -= dX . mean ( 1 ) [ : , None ] V = adata . layers [ vkey ] [ : , idx ] . copy ( ) V -= V . mean ( 1 ) [ : , None ] adata . obs [ vkey + '_confidence_transition' ] = prod_sum_var ( dX , V ) / ( norm ( dX ) * norm ( V ) ) logg . hint ( 'added \'' + vkey + '_confidence_transition\' (adata.obs)' ) return adata if copy else None
Computes confidences of velocity transitions .
281
8
22,851
def cell_fate ( data , groupby = 'clusters' , disconnected_groups = None , self_transitions = False , n_neighbors = None , copy = False ) : adata = data . copy ( ) if copy else data logg . info ( 'computing cell fates' , r = True ) n_neighbors = 10 if n_neighbors is None else n_neighbors _adata = adata . copy ( ) vgraph = VelocityGraph ( _adata , n_neighbors = n_neighbors , approx = True , n_recurse_neighbors = 1 ) vgraph . compute_cosines ( ) _adata . uns [ 'velocity_graph' ] = vgraph . graph _adata . uns [ 'velocity_graph_neg' ] = vgraph . graph_neg T = transition_matrix ( _adata , self_transitions = self_transitions ) I = np . eye ( _adata . n_obs ) fate = np . linalg . inv ( I - T ) if issparse ( T ) : fate = fate . A cell_fates = np . array ( _adata . obs [ groupby ] [ fate . argmax ( 1 ) ] ) if disconnected_groups is not None : idx = _adata . obs [ groupby ] . isin ( disconnected_groups ) cell_fates [ idx ] = _adata . obs [ groupby ] [ idx ] adata . obs [ 'cell_fate' ] = cell_fates adata . obs [ 'cell_fate_confidence' ] = fate . max ( 1 ) / fate . sum ( 1 ) strings_to_categoricals ( adata ) logg . info ( ' finished' , time = True , end = ' ' if settings . verbosity > 2 else '\n' ) logg . hint ( 'added\n' ' \'cell_fate\', most likely cell fate (adata.obs)\n' ' \'cell_fate_confidence\', confidence of transitioning to the assigned fate (adata.obs)' ) return adata if copy else None
Computes individual cell endpoints
464
6
22,852
def moments ( data , n_neighbors = 30 , n_pcs = 30 , mode = 'connectivities' , method = 'umap' , metric = 'euclidean' , use_rep = None , recurse_neighbors = False , renormalize = False , copy = False ) : adata = data . copy ( ) if copy else data if 'spliced' not in adata . layers . keys ( ) or 'unspliced' not in adata . layers . keys ( ) : raise ValueError ( 'Could not find spliced / unspliced counts.' ) if any ( [ not_yet_normalized ( adata . layers [ layer ] ) for layer in { 'spliced' , 'unspliced' } ] ) : normalize_per_cell ( adata ) if 'neighbors' not in adata . uns . keys ( ) or neighbors_to_be_recomputed ( adata , n_neighbors = n_neighbors ) : if use_rep is None : use_rep = 'X_pca' neighbors ( adata , n_neighbors = n_neighbors , use_rep = use_rep , n_pcs = n_pcs , method = method , metric = metric ) if mode not in adata . uns [ 'neighbors' ] : raise ValueError ( 'mode can only be \'connectivities\' or \'distances\'' ) logg . info ( 'computing moments based on ' + str ( mode ) , r = True ) connectivities = get_connectivities ( adata , mode , n_neighbors = n_neighbors , recurse_neighbors = recurse_neighbors ) adata . layers [ 'Ms' ] = csr_matrix . dot ( connectivities , csr_matrix ( adata . layers [ 'spliced' ] ) ) . astype ( np . float32 ) . A adata . layers [ 'Mu' ] = csr_matrix . dot ( connectivities , csr_matrix ( adata . layers [ 'unspliced' ] ) ) . astype ( np . float32 ) . A if renormalize : normalize_per_cell ( adata , layers = { 'Ms' , 'Mu' } , enforce = True ) logg . info ( ' finished' , time = True , end = ' ' if settings . verbosity > 2 else '\n' ) logg . hint ( 'added \n' ' \'Ms\' and \'Mu\', moments of spliced/unspliced abundances (adata.layers)' ) return adata if copy else None
Computes moments for velocity estimation .
582
7
22,853
def transition_matrix ( adata , vkey = 'velocity' , basis = None , backward = False , self_transitions = True , scale = 10 , perc = None , use_negative_cosines = False , weight_diffusion = 0 , scale_diffusion = 1 , weight_indirect_neighbors = None , n_neighbors = None , vgraph = None ) : if vkey + '_graph' not in adata . uns : raise ValueError ( 'You need to run `tl.velocity_graph` first to compute cosine correlations.' ) graph = csr_matrix ( adata . uns [ vkey + '_graph' ] ) . copy ( ) if vgraph is None else vgraph . copy ( ) if self_transitions : confidence = graph . max ( 1 ) . A . flatten ( ) ub = np . percentile ( confidence , 98 ) self_prob = np . clip ( ub - confidence , 0 , 1 ) graph . setdiag ( self_prob ) T = np . expm1 ( graph * scale ) # equivalent to np.exp(graph.A * scale) - 1 if vkey + '_graph_neg' in adata . uns . keys ( ) : graph_neg = adata . uns [ vkey + '_graph_neg' ] if use_negative_cosines : T -= np . expm1 ( - graph_neg * scale ) else : T += np . expm1 ( graph_neg * scale ) T . data += 1 # weight direct and indirect (recursed) neighbors if 'neighbors' in adata . uns . keys ( ) and weight_indirect_neighbors is not None and weight_indirect_neighbors < 1 : direct_neighbors = adata . uns [ 'neighbors' ] [ 'distances' ] > 0 direct_neighbors . setdiag ( 1 ) w = weight_indirect_neighbors T = w * T + ( 1 - w ) * direct_neighbors . multiply ( T ) if backward : T = T . T T = normalize ( T ) if n_neighbors is not None : T = T . multiply ( get_connectivities ( adata , mode = 'distances' , n_neighbors = n_neighbors , recurse_neighbors = True ) ) if perc is not None : threshold = np . percentile ( T . data , perc ) T . data [ T . data < threshold ] = 0 T . eliminate_zeros ( ) if 'X_' + str ( basis ) in adata . obsm . keys ( ) : dists_emb = ( T > 0 ) . multiply ( squareform ( pdist ( adata . obsm [ 'X_' + basis ] ) ) ) scale_diffusion *= dists_emb . data . mean ( ) diffusion_kernel = dists_emb . copy ( ) diffusion_kernel . data = np . exp ( - .5 * dists_emb . data ** 2 / scale_diffusion ** 2 ) T = T . multiply ( diffusion_kernel ) # combine velocity based kernel with diffusion based kernel if 0 < weight_diffusion < 1 : # add another diffusion kernel (Brownian motion - like) diffusion_kernel . data = np . exp ( - .5 * dists_emb . data ** 2 / ( scale_diffusion / 2 ) ** 2 ) T = ( 1 - weight_diffusion ) * T + weight_diffusion * diffusion_kernel T = normalize ( T ) return T
Computes transition probabilities from velocity graph
781
7
22,854
def apply ( self ) : raw_operations = copy . deepcopy ( self . occurrences ) for task in self . tasks : task ( self ) self . occurrences = raw_operations
Apply the rules of the context to its occurrences .
39
10
22,855
def average_price ( quantity_1 , price_1 , quantity_2 , price_2 ) : return ( quantity_1 * price_1 + quantity_2 * price_2 ) / ( quantity_1 + quantity_2 )
Calculates the average price between two asset states .
50
11
22,856
def update_holder ( self , holder ) : subject_symbol = self . subject . symbol # If the Holder already have a state regarding this Subject, # update that state if subject_symbol in holder . state : # If the Holder have zero units of this subject, the average # value paid/received for the subject is the value of the trade itself if not holder . state [ subject_symbol ] [ 'quantity' ] : holder . state [ subject_symbol ] [ 'value' ] = self . details [ 'value' ] # If the Holder owns units of this subject then the average value # paid/received for the subject may need to be updated with # this occurrence details # If the occurrence have the same sign as the quantity in the Holder # state, a new average value needs to be calculated for the subject elif same_sign ( holder . state [ subject_symbol ] [ 'quantity' ] , self . details [ 'quantity' ] ) : holder . state [ subject_symbol ] [ 'value' ] = average_price ( holder . state [ subject_symbol ] [ 'quantity' ] , holder . state [ subject_symbol ] [ 'value' ] , self . details [ 'quantity' ] , self . details [ 'value' ] ) # If the occurrence does not have the same sign of the quantity in the # Holder state, then do other stuff. # A trade app would normally implement some sort of profit/loss logic # here. # This sample implementation only checks if the average value # of the subject needs to be updated and then update it as needed. else : if same_sign ( self . details [ 'quantity' ] , holder . state [ subject_symbol ] [ 'quantity' ] + self . details [ 'quantity' ] ) : holder . state [ subject_symbol ] [ 'value' ] = self . details [ 'value' ] # Update the quantity of the subject in the Holder's posession holder . state [ subject_symbol ] [ 'quantity' ] += self . details [ 'quantity' ] # If the Holder don't have a state with this occurrence's Subject, # then register this occurrence as the first state of the Subject # in the Holder's possession else : holder . state [ subject_symbol ] = { 'quantity' : self . details [ 'quantity' ] , 'value' : self . details [ 'value' ] } # If the Holder knows about this Subject but don't have any unit # of it, the paid value of the subject in the Holder state should # be zero. if not holder . state [ subject_symbol ] [ 'quantity' ] : holder . state [ subject_symbol ] [ 'value' ] = 0
Udpate the Holder state according to the occurrence .
587
11
22,857
def fitter ( self , n = 0 , ftype = "real" , colfac = 1.0e-8 , lmfac = 1.0e-3 ) : fid = self . _fitproxy . getid ( ) ftype = self . _gettype ( ftype ) n = len ( self . _fitids ) if 0 <= fid < n : self . _fitids [ fid ] = { } elif fid == n : self . _fitids . append ( { } ) else : # shouldn't happen raise RangeError ( "fit id out of range" ) self . init ( n = n , ftype = ftype , colfac = colfac , lmfac = lmfac , fid = fid ) return fid
Create a sub - fitter .
160
7
22,858
def done ( self , fid = 0 ) : self . _checkid ( fid ) self . _fitids [ fid ] = { } self . _fitproxy . done ( fid )
Terminates the fitserver .
39
6
22,859
def reset ( self , fid = 0 ) : self . _checkid ( fid ) self . _fitids [ fid ] [ "solved" ] = False self . _fitids [ fid ] [ "haserr" ] = False if not self . _fitids [ fid ] [ "looped" ] : return self . _fitproxy . reset ( fid ) else : self . _fitids [ fid ] [ "looped" ] = False return True
Reset the object s resources to its initialized state .
99
11
22,860
def addconstraint ( self , x , y = 0 , fnct = None , fid = 0 ) : self . _checkid ( fid ) i = 0 if "constraint" in self . _fitids [ fid ] : i = len ( self . _fitids [ fid ] [ "constraint" ] ) else : self . _fitids [ fid ] [ "constraint" ] = { } # dict key needs to be string i = str ( i ) self . _fitids [ fid ] [ "constraint" ] [ i ] = { } if isinstance ( fnct , functional ) : self . _fitids [ fid ] [ "constraint" ] [ i ] [ "fnct" ] = fnct . todict ( ) else : self . _fitids [ fid ] [ "constraint" ] [ i ] [ "fnct" ] = functional ( "hyper" , len ( x ) ) . todict ( ) self . _fitids [ fid ] [ "constraint" ] [ i ] [ "x" ] = [ float ( v ) for v in x ] self . _fitids [ fid ] [ "constraint" ] [ i ] [ "y" ] = float ( y ) six . print_ ( self . _fitids [ fid ] [ "constraint" ] )
Add constraint .
293
3
22,861
def fitspoly ( self , n , x , y , sd = None , wt = 1.0 , fid = 0 ) : a = max ( abs ( max ( x ) ) , abs ( min ( x ) ) ) if a == 0 : a = 1 a = 1.0 / a b = NUM . power ( a , range ( n + 1 ) ) if self . set ( n = n + 1 , fid = fid ) : self . linear ( poly ( n ) , x * a , y , sd , wt , fid ) self . _fitids [ fid ] [ "sol" ] *= b self . _fitids [ fid ] [ "error" ] *= b return self . linear ( poly ( n ) , x , y , sd , wt , fid )
Create normal equations from the specified condition equations and solve the resulting normal equations . It is in essence a combination .
169
22
22,862
def functional ( self , fnct , x , y , sd = None , wt = 1.0 , mxit = 50 , fid = 0 ) : self . _fit ( fitfunc = "functional" , fnct = fnct , x = x , y = y , sd = sd , wt = wt , mxit = mxit , fid = fid )
Make a non - linear least squares solution .
79
9
22,863
def linear ( self , fnct , x , y , sd = None , wt = 1.0 , fid = 0 ) : self . _fit ( fitfunc = "linear" , fnct = fnct , x = x , y = y , sd = sd , wt = wt , fid = fid )
Make a linear least squares solution .
68
7
22,864
def constraint ( self , n = - 1 , fid = 0 ) : c = self . _getval ( "constr" , fid ) if n < 0 or n > self . deficiency ( fid ) : return c else : raise RuntimeError ( "Not yet implemented" )
Obtain the set of orthogonal equations that make the solution of the rank deficient normal equations possible .
58
21
22,865
def fitted ( self , fid = 0 ) : self . _checkid ( fid ) return not ( self . _fitids [ fid ] [ "fit" ] > 0 or self . _fitids [ fid ] [ "fit" ] < - 0.001 )
Test if enough Levenberg - Marquardt loops have been done .
56
15
22,866
def set_data_path ( self , pth ) : if os . path . exists ( pth ) : if not os . path . exists ( os . path . join ( pth , 'data' , 'geodetic' ) ) : raise IOError ( "The given path doesn't contain a 'data' " "subdirectory" ) os . environ [ "AIPSPATH" ] = "%s dummy dummy" % pth
Set the location of the measures data directory .
94
9
22,867
def asbaseline ( self , pos ) : if not is_measure ( pos ) or pos [ 'type' ] not in [ 'position' , 'baseline' ] : raise TypeError ( 'Argument is not a position/baseline measure' ) if pos [ 'type' ] == 'position' : loc = self . measure ( pos , 'itrf' ) loc [ 'type' ] = 'baseline' return self . measure ( loc , 'j2000' ) return pos
Convert a position measure into a baseline measure . No actual baseline is calculated since operations can be done on positions with subtractions to obtain baselines at a later stage .
106
34
22,868
def getvalue ( self , v ) : if not is_measure ( v ) : raise TypeError ( 'Incorrect input type for getvalue()' ) import re rx = re . compile ( "m\d+" ) out = [ ] keys = v . keys ( ) [ : ] keys . sort ( ) for key in keys : if re . match ( rx , key ) : out . append ( dq . quantity ( v . get ( key ) ) ) return out
Return a list of quantities making up the measures value .
104
11
22,869
def doframe ( self , v ) : if not is_measure ( v ) : raise TypeError ( 'Argument is not a measure' ) if ( v [ "type" ] == "frequency" and v [ "refer" ] . lower ( ) == "rest" ) or _measures . doframe ( self , v ) : self . _framestack [ v [ "type" ] ] = v return True return False
This method will set the measure specified as part of a frame .
93
13
22,870
def addDerivedMSCal ( msname ) : # Open the MS t = table ( msname , readonly = False , ack = False ) colnames = t . colnames ( ) # Check that the columns needed by DerivedMSCal are present. # Note that ANTENNA2 and FEED2 are not required. for col in [ "TIME" , "ANTENNA1" , "FIELD_ID" , "FEED1" ] : if col not in colnames : raise ValueError ( "Columns " + colnames + " should be present in table " + msname ) scols1 = [ 'HA' , 'HA1' , 'HA2' , 'PA1' , 'PA2' ] scols2 = [ 'LAST' , 'LAST1' , 'LAST2' ] acols1 = [ 'AZEL1' , 'AZEL2' ] acols2 = [ 'UVW_J2000' ] descs = [ ] # Define the columns and their units. for col in scols1 : descs . append ( makescacoldesc ( col , 0. , keywords = { "QuantumUnits" : [ "rad" ] } ) ) for col in scols2 : descs . append ( makescacoldesc ( col , 0. , keywords = { "QuantumUnits" : [ "d" ] } ) ) for col in acols1 : descs . append ( makearrcoldesc ( col , 0. , keywords = { "QuantumUnits" : [ "rad" , "rad" ] } ) ) for col in acols2 : descs . append ( makearrcoldesc ( col , 0. , keywords = { "QuantumUnits" : [ "m" , "m" , "m" ] , "MEASINFO" : { "Ref" : "J2000" , "type" : "uvw" } } ) ) # Add all columns using DerivedMSCal as data manager. dminfo = { "TYPE" : "DerivedMSCal" , "NAME" : "" , "SPEC" : { } } t . addcols ( maketabdesc ( descs ) , dminfo ) # Flush the table to make sure it is written. t . flush ( )
Add the derived columns like HA to an MS or CalTable .
503
13
22,871
def removeDerivedMSCal ( msname ) : # Open the MS t = table ( msname , readonly = False , ack = False ) # Remove the columns stored as DerivedMSCal. dmi = t . getdminfo ( ) for x in dmi . values ( ) : if x [ 'TYPE' ] == 'DerivedMSCal' : t . removecols ( x [ 'COLUMNS' ] ) t . flush ( )
Remove the derived columns like HA from an MS or CalTable .
99
13
22,872
def msregularize ( msname , newname ) : # Find out all baselines. t = table ( msname ) t1 = t . sort ( 'unique ANTENNA1,ANTENNA2' ) nadded = 0 # Now iterate in time,band over the MS. for tsub in t . iter ( [ 'TIME' , 'DATA_DESC_ID' ] ) : nmissing = t1 . nrows ( ) - tsub . nrows ( ) if nmissing < 0 : raise ValueError ( "A time/band chunk has too many rows" ) if nmissing > 0 : # Rows needs to be added for the missing baselines. ant1 = str ( t1 . getcol ( 'ANTENNA1' ) ) . replace ( ' ' , ',' ) ant2 = str ( t1 . getcol ( 'ANTENNA2' ) ) . replace ( ' ' , ',' ) ant1 = tsub . getcol ( 'ANTENNA1' ) ant2 = tsub . getcol ( 'ANTENNA2' ) t2 = taql ( 'select from $t1 where !any(ANTENNA1 == $ant1 &&' + ' ANTENNA2 == $ant2)' ) six . print_ ( nmissing , t1 . nrows ( ) , tsub . nrows ( ) , t2 . nrows ( ) ) if t2 . nrows ( ) != nmissing : raise ValueError ( "A time/band chunk behaves strangely" ) # If nothing added yet, create a new table. # (which has to be reopened for read/write). # Otherwise append to that new table. if nadded == 0 : tnew = t2 . copy ( newname + "_add" , deep = True ) tnew = table ( newname + "_add" , readonly = False ) else : t2 . copyrows ( tnew ) # Set the correct time and band in the new rows. tnew . putcell ( 'TIME' , range ( nadded , nadded + nmissing ) , tsub . getcell ( 'TIME' , 0 ) ) tnew . putcell ( 'DATA_DESC_ID' , range ( nadded , nadded + nmissing ) , tsub . getcell ( 'DATA_DESC_ID' , 0 ) ) nadded += nmissing # Combine the existing table and new table. if nadded > 0 : # First initialize data and flags in the added rows. taql ( 'update $tnew set DATA=0+0i' ) taql ( 'update $tnew set FLAG=True' ) tcomb = table ( [ t , tnew ] ) tcomb . rename ( newname + '_adds' ) tcombs = tcomb . sort ( 'TIME,DATA_DESC_ID,ANTENNA1,ANTENNA2' ) else : tcombs = t . query ( offset = 0 ) tcombs . rename ( newname ) six . print_ ( newname , 'has been created; it references the original MS' ) if nadded > 0 : six . print_ ( ' and' , newname + '_adds' , 'containing' , nadded , 'new rows' ) else : six . print_ ( ' no rows needed to be added' )
Regularize an MS
721
4
22,873
def _repr_html_ ( self ) : out = "<table class='taqltable'>\n" # Print column name (not if it is auto-generated) if not ( self . name ( ) [ : 4 ] == "Col_" ) : out += "<tr>" out += "<th><b>" + self . name ( ) + "</b></th>" out += "</tr>" cropped = False rowcount = 0 colkeywords = self . getkeywords ( ) for row in self : out += "\n<tr>" out += "<td>" + _format_cell ( row , colkeywords ) + "</td>\n" out += "</tr>\n" rowcount += 1 out += "\n" if rowcount >= 20 : cropped = True break if out [ - 2 : ] == "\n\n" : out = out [ : - 1 ] out += "</table>" if cropped : out += "<p style='text-align:center'>(" + str ( self . nrows ( ) - 20 ) + " more rows)</p>\n" return out
Give a nice representation of columns in notebooks .
239
9
22,874
def _get_coordinatenames ( self ) : validnames = ( "direction" , "spectral" , "linear" , "stokes" , "tabular" ) self . _names = [ "" ] * len ( validnames ) n = 0 for key in self . _csys . keys ( ) : for name in validnames : if key . startswith ( name ) : idx = int ( key [ len ( name ) : ] ) self . _names [ idx ] = name n += 1 # reverse as we are c order in python self . _names = self . _names [ : n ] [ : : - 1 ] if len ( self . _names ) == 0 : raise LookupError ( "Coordinate record doesn't contain valid coordinates" )
Create ordered list of coordinate names
167
6
22,875
def set_projection ( self , val ) : knownproj = [ "SIN" , "ZEA" , "TAN" , "NCP" , "AIT" , "ZEA" ] # etc assert val . upper ( ) in knownproj self . _coord [ "projection" ] = val . upper ( )
Set the projection of the given axis in this coordinate .
74
11
22,876
def tablefromascii ( tablename , asciifile , headerfile = '' , autoheader = False , autoshape = [ ] , columnnames = [ ] , datatypes = [ ] , sep = ' ' , commentmarker = '' , firstline = 1 , lastline = - 1 , readonly = True , lockoptions = 'default' , ack = True ) : import os . path filename = os . path . expandvars ( asciifile ) filename = os . path . expanduser ( filename ) if not os . path . exists ( filename ) : s = "File '%s' not found" % ( filename ) raise IOError ( s ) if headerfile != '' : filename = os . path . expandvars ( headerfile ) filename = os . path . expanduser ( filename ) if not os . path . exists ( filename ) : s = "File '%s' not found" % ( filename ) raise IOError ( s ) tab = table ( asciifile , headerfile , tablename , autoheader , autoshape , sep , commentmarker , firstline , lastline , _columnnames = columnnames , _datatypes = datatypes , _oper = 1 ) six . print_ ( 'Input format: [' + tab . _getasciiformat ( ) + ']' ) # Close table and reopen it in correct way. tab = 0 return table ( tablename , readonly = readonly , lockoptions = lockoptions , ack = ack )
Create a table from an ASCII file .
329
8
22,877
def makescacoldesc ( columnname , value , datamanagertype = '' , datamanagergroup = '' , options = 0 , maxlen = 0 , comment = '' , valuetype = '' , keywords = { } ) : vtype = valuetype if vtype == '' : vtype = _value_type_name ( value ) rec2 = { 'valueType' : vtype , 'dataManagerType' : datamanagertype , 'dataManagerGroup' : datamanagergroup , 'option' : options , 'maxlen' : maxlen , 'comment' : comment , 'keywords' : keywords } return { 'name' : columnname , 'desc' : rec2 }
Create description of a scalar column .
154
8
22,878
def makearrcoldesc ( columnname , value , ndim = 0 , shape = [ ] , datamanagertype = '' , datamanagergroup = '' , options = 0 , maxlen = 0 , comment = '' , valuetype = '' , keywords = { } ) : vtype = valuetype if vtype == '' : vtype = _value_type_name ( value ) if len ( shape ) > 0 : if ndim <= 0 : ndim = len ( shape ) rec2 = { 'valueType' : vtype , 'dataManagerType' : datamanagertype , 'dataManagerGroup' : datamanagergroup , 'ndim' : ndim , 'shape' : shape , '_c_order' : True , 'option' : options , 'maxlen' : maxlen , 'comment' : comment , 'keywords' : keywords } return { 'name' : columnname , 'desc' : rec2 }
Create description of an array column .
207
7
22,879
def maketabdesc ( descs = [ ] ) : rec = { } # If a single dict is given, make a list of it. if isinstance ( descs , dict ) : descs = [ descs ] for desc in descs : colname = desc [ 'name' ] if colname in rec : raise ValueError ( 'Column name ' + colname + ' multiply used in table description' ) rec [ colname ] = desc [ 'desc' ] return rec
Create a table description .
104
5
22,880
def makedminfo ( tabdesc , group_spec = None ) : if group_spec is None : group_spec = { } class DMGroup ( object ) : """ Keep track of the columns, type and spec of each data manager group """ def __init__ ( self ) : self . columns = [ ] self . type = None self . spec = None dm_groups = defaultdict ( DMGroup ) # Iterate through the table columns, grouping them # by their dataManagerGroup for c , d in six . iteritems ( tabdesc ) : if c in ( '_define_hypercolumn_' , '_keywords_' , '_private_keywords_' ) : continue # Extract group and data manager type group = d . get ( "dataManagerGroup" , "StandardStMan" ) type_ = d . get ( "dataManagerType" , "StandardStMan" ) # Set defaults if necessary if not group : group = "StandardStMan" if not type_ : type_ = "StandardStMan" # Obtain the (possibly empty) data manager group dm_group = dm_groups [ group ] # Add the column dm_group . columns . append ( c ) # Set the spec if dm_group . spec is None : dm_group . spec = group_spec . get ( group , { } ) # Check that the data manager type is consistent across columns if dm_group . type is None : dm_group . type = type_ elif not dm_group . type == type_ : raise ValueError ( "Mismatched dataManagerType '%s' " "for dataManagerGroup '%s' " "Previously, the type was '%s'" % ( type_ , group , dm_group . type ) ) # Output a data manager entry return { '*%d' % ( i + 1 ) : { 'COLUMNS' : dm_group . columns , 'TYPE' : dm_group . type , 'NAME' : group , 'SPEC' : dm_group . spec , 'SEQNR' : i } for i , ( group , dm_group ) in enumerate ( six . iteritems ( dm_groups ) ) }
Creates a data manager information object .
482
8
22,881
def tabledefinehypercolumn ( tabdesc , name , ndim , datacolumns , coordcolumns = False , idcolumns = False ) : rec = { 'HCndim' : ndim , 'HCdatanames' : datacolumns } if not isinstance ( coordcolumns , bool ) : rec [ 'HCcoordnames' ] = coordcolumns if not isinstance ( idcolumns , bool ) : rec [ 'HCidnames' ] = idcolumns if '_define_hypercolumn_' not in tabdesc : tabdesc [ '_define_hypercolumn_' ] = { } tabdesc [ '_define_hypercolumn_' ] [ name ] = rec
Add a hypercolumn to a table description .
154
9
22,882
def tabledelete ( tablename , checksubtables = False , ack = True ) : tabname = _remove_prefix ( tablename ) t = table ( tabname , ack = False ) if t . ismultiused ( checksubtables ) : six . print_ ( 'Table' , tabname , 'cannot be deleted; it is still in use' ) else : t = 0 table ( tabname , readonly = False , _delete = True , ack = False ) if ack : six . print_ ( 'Table' , tabname , 'has been deleted' )
Delete a table on disk .
132
6
22,883
def tableexists ( tablename ) : result = True try : t = table ( tablename , ack = False ) except : result = False return result
Test if a table exists .
35
6
22,884
def tableiswritable ( tablename ) : result = True try : t = table ( tablename , readonly = False , ack = False ) result = t . iswritable ( ) except : result = False return result
Test if a table is writable .
50
8
22,885
def tablestructure ( tablename , dataman = True , column = True , subtable = False , sort = False ) : t = table ( tablename , ack = False ) six . print_ ( t . showstructure ( dataman , column , subtable , sort ) )
Print the structure of a table .
63
7
22,886
def attrget ( self , groupname , attrname , rownr ) : return self . _attrget ( groupname , attrname , rownr )
Get the value of an attribute in the given row in a group .
37
14
22,887
def attrgetcol ( self , groupname , attrname ) : values = [ ] for rownr in range ( self . attrnrows ( groupname ) ) : values . append ( self . attrget ( groupname , attrname , rownr ) ) return values
Get the value of an attribute for all rows in a group .
62
13
22,888
def attrfindrows ( self , groupname , attrname , value ) : values = self . attrgetcol ( groupname , attrname ) return [ i for i in range ( len ( values ) ) if values [ i ] == value ]
Get the row numbers of all rows where the attribute matches the given value .
55
15
22,889
def attrgetrow ( self , groupname , key , value = None ) : if not isinstance ( key , str ) : return self . _attrgetrow ( groupname , key ) # The key is an attribute name whose value has to be found. rownrs = self . attrfindrows ( groupname , key , value ) if len ( rownrs ) == 0 : raise IndexError ( "Image attribute " + key + " in group " + groupname + " has no matches for value " + str ( value ) ) if len ( rownrs ) > 1 : raise IndexError ( "Image attribute " + key + " in group " + groupname + " has multiple matches for value " + str ( value ) ) return self . _attrgetrow ( groupname , rownrs [ 0 ] )
Get the values of all attributes of a row in a group .
175
13
22,890
def attrput ( self , groupname , attrname , rownr , value , unit = [ ] , meas = [ ] ) : return self . _attrput ( groupname , attrname , rownr , value , unit , meas )
Put the value and optionally unit and measinfo of an attribute in a row in a group .
55
19
22,891
def getdata ( self , blc = ( ) , trc = ( ) , inc = ( ) ) : return self . _getdata ( self . _adjustBlc ( blc ) , self . _adjustTrc ( trc ) , self . _adjustInc ( inc ) )
Get image data .
62
4
22,892
def getmask ( self , blc = ( ) , trc = ( ) , inc = ( ) ) : return numpy . logical_not ( self . _getmask ( self . _adjustBlc ( blc ) , self . _adjustTrc ( trc ) , self . _adjustInc ( inc ) ) )
Get image mask .
70
4
22,893
def get ( self , blc = ( ) , trc = ( ) , inc = ( ) ) : return nma . masked_array ( self . getdata ( blc , trc , inc ) , self . getmask ( blc , trc , inc ) )
Get image data and mask .
59
6
22,894
def putdata ( self , value , blc = ( ) , trc = ( ) , inc = ( ) ) : return self . _putdata ( value , self . _adjustBlc ( blc ) , self . _adjustInc ( inc ) )
Put image data .
55
4
22,895
def putmask ( self , value , blc = ( ) , trc = ( ) , inc = ( ) ) : # casa and numpy have opposite flags return self . _putmask ( ~ value , self . _adjustBlc ( blc ) , self . _adjustInc ( inc ) )
Put image mask .
65
4
22,896
def put ( self , value , blc = ( ) , trc = ( ) , inc = ( ) ) : if isinstance ( value , nma . MaskedArray ) : self . putdata ( value . data , blc , trc , inc ) self . putmask ( nma . getmaskarray ( value ) , blc , trc , inc ) else : self . putdata ( value , blc , trc , inc )
Put image data and mask .
96
6
22,897
def subimage ( self , blc = ( ) , trc = ( ) , inc = ( ) , dropdegenerate = True ) : return image ( self . _subimage ( self . _adjustBlc ( blc ) , self . _adjustTrc ( trc ) , self . _adjustInc ( inc ) , dropdegenerate ) )
Form a subimage .
77
5
22,898
def info ( self ) : return { 'coordinates' : self . _coordinates ( ) , 'imageinfo' : self . _imageinfo ( ) , 'miscinfo' : self . _miscinfo ( ) , 'unit' : self . _unit ( ) }
Get coordinates image info and unit .
58
7
22,899
def tofits ( self , filename , overwrite = True , velocity = True , optical = True , bitpix = - 32 , minpix = 1 , maxpix = - 1 ) : return self . _tofits ( filename , overwrite , velocity , optical , bitpix , minpix , maxpix )
Write the image to a file in FITS format .
68
11