signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def current_frame ( self ) : """Compute the number of the current frame ( 0 - indexed )"""
if not self . _pause_level : return ( int ( ( self . _clock ( ) + self . _offset ) * self . frames_per_second ) % len ( self . _frames ) ) else : return self . _paused_frame
def _strip ( string , pattern ) : """Return complement of pattern in string"""
m = re . compile ( pattern ) . search ( string ) if m : return string [ 0 : m . start ( ) ] + string [ m . end ( ) : len ( string ) ] else : return string
def write_versions ( dirs , config = None , is_wrapper = False ) : """Write CSV file with versions used in analysis pipeline ."""
out_file = _get_program_file ( dirs ) if is_wrapper : assert utils . file_exists ( out_file ) , "Failed to create program versions from VM" elif out_file is None : for p in _get_versions ( config ) : print ( "{program},{version}" . format ( ** p ) ) else : with open ( out_file , "w" ) as out_handle : for p in _get_versions ( config ) : out_handle . write ( "{program},{version}\n" . format ( ** p ) ) return out_file
def to_dict ( self ) : """Render a MessageElement as python dict . : return : Python dict representation : rtype : dict"""
return { 'type' : self . __class__ . __name__ , 'element_id' : self . element_id , 'style_class' : self . style_class , 'icon' : self . icon , 'attributes' : self . attributes }
def _depth_to_seq2cov ( input_fpath , output_fpath , sample_name ) : """Args : input _ fpath : output of " mosdepth " : chr22 14250 15500 name3 5.54 chrM 100 1000 name1 916.08 output _ fpath : path to write results - input for Seq2C ' s cov2lr . pl , e . g . : seq2cov : chr20 _ tumor _ 1 DEFB125 chr20 68346 68413 Amplicon 68 28.0 chr20 _ tumor _ 1 DEFB125 chr20 76641 77301 Amplicon 661 24.0 chr20 _ tumor _ 1 DEFB125 chr20 68346 77301 Whole - Gene 729 24.3731138546 sample _ name : sample name ( e . g . chr20 _ tumor _ 1)"""
# First round : collecting gene ends gene_end_by_gene = defaultdict ( lambda : - 1 ) with utils . open_gzipsafe ( input_fpath ) as f : for xs in ( l . rstrip ( ) . split ( ) for l in f if not l . startswith ( "#" ) ) : xs = [ x for x in xs if x . strip ( ) ] if any ( x == "." for x in xs ) : continue end = int ( xs [ 2 ] ) gene_name = xs [ 3 ] gene_end_by_gene [ gene_name ] = max ( gene_end_by_gene [ gene_name ] , end ) # Second round : calculating gene level coverage , and writing file for Seq2C total_cov_by_gene = dict ( ) gene_start_by_gene = dict ( ) total_size_by_gene = dict ( ) with utils . open_gzipsafe ( input_fpath ) as f , open ( output_fpath , 'w' ) as out : for xs in ( l . rstrip ( ) . split ( ) for l in f if not l . startswith ( "#" ) ) : xs = [ x for x in xs if x . strip ( ) ] if any ( x == "." for x in xs ) : continue chrom , start , end , gene_name = xs [ : 4 ] start , end = int ( start ) , int ( end ) ave_depth = float ( xs [ - 1 ] ) if gene_name not in gene_start_by_gene : gene_start_by_gene [ gene_name ] = start total_cov_by_gene [ gene_name ] = 0 total_size_by_gene [ gene_name ] = 0 else : gene_start_by_gene [ gene_name ] = min ( start , gene_start_by_gene [ gene_name ] ) total_cov_by_gene [ gene_name ] += ave_depth * ( end - start ) total_size_by_gene [ gene_name ] += end - start fs = [ sample_name , gene_name , chrom , str ( start + 1 ) , str ( end ) , 'Amplicon' , str ( end - start ) , str ( ave_depth ) ] out . write ( '\t' . join ( fs ) + '\n' ) if end >= gene_end_by_gene [ gene_name ] : assert end == gene_end_by_gene [ gene_name ] , ( end , gene_end_by_gene [ gene_name ] ) start = gene_start_by_gene [ gene_name ] ave_depth = total_cov_by_gene [ gene_name ] / total_size_by_gene [ gene_name ] size = total_size_by_gene [ gene_name ] fs = [ sample_name , gene_name , chrom , str ( start + 1 ) , str ( end ) , 'Whole-Gene' , str ( size ) , str ( ave_depth ) ] out . write ( '\t' . join ( fs ) + '\n' ) return output_fpath
def select2 ( self , box , drop , text ) : ''': param box : the locator for Selection Box : param drop : the locator for Selection Dropdown : param text : the text value to select or the index of the option to select : return : True : example : https : / / github . com / ldiary / marigoso / blob / master / notebooks / handling _ select2 _ controls _ in _ selenium _ webdriver . ipynb'''
if not self . is_available ( drop ) : if isinstance ( box , str ) : self . get_element ( box ) . click ( ) else : box . click ( ) ul_dropdown = self . get_element ( drop ) options = ul_dropdown . get_children ( 'tag=li' ) if isinstance ( text , int ) : options [ text ] . click ( ) return True for option in options : if option . text == text : option . click ( ) return True print ( "[Error!] Selection not found: {}" . format ( text ) ) print ( "Available Selections\n {}" . format ( [ option . text for option in options ] ) )
def add_subprocess_to_diagram ( self , process_id , subprocess_name , is_expanded = False , triggered_by_event = False , node_id = None ) : """Adds a SubProcess element to BPMN diagram . User - defined attributes : - name - triggered _ by _ event : param process _ id : string object . ID of parent process , : param subprocess _ name : string object . Name of subprocess , : param is _ expanded : boolean value for attribute " isExpanded " . Default value false , : param triggered _ by _ event : boolean value for attribute " triggeredByEvent " . Default value false , : param node _ id : string object . ID of node . Default value - None . : return : a tuple , where first value is subProcess ID , second a reference to created object ."""
subprocess_id , subprocess = self . add_flow_node_to_diagram ( process_id , consts . Consts . subprocess , subprocess_name , node_id ) self . diagram_graph . node [ subprocess_id ] [ consts . Consts . is_expanded ] = "true" if is_expanded else "false" self . diagram_graph . node [ subprocess_id ] [ consts . Consts . triggered_by_event ] = "true" if triggered_by_event else "false" return subprocess_id , subprocess
def is_stopped ( self , * args , ** kwargs ) : """Return whether this container is stopped"""
kwargs [ "waiting" ] = False return self . wait_till_stopped ( * args , ** kwargs )
def to_html ( doc , output = "/tmp" , style = "dep" ) : """Doc method extension for saving the current state as a displaCy visualization ."""
# generate filename from first six non - punct tokens file_name = "-" . join ( [ w . text for w in doc [ : 6 ] if not w . is_punct ] ) + ".html" html = displacy . render ( doc , style = style , page = True ) # render markup if output is not None : output_path = Path ( output ) if not output_path . exists ( ) : output_path . mkdir ( ) output_file = Path ( output ) / file_name output_file . open ( "w" , encoding = "utf-8" ) . write ( html ) # save to file print ( "Saved HTML to {}" . format ( output_file ) ) else : print ( html )
def task ( self , func , * args , ** kwargs ) : """Pushes a task onto the queue ( with the specified options ) . This will instantiate a ` ` Gator . task _ class ` ` instance , configure the task execution options , configure the callable & its arguments , then push it onto the queue . You ' ll typically call this method when specifying advanced options . : param func : The callable with business logic to execute : type func : callable : param args : Positional arguments to pass to the callable task : type args : list : param kwargs : Keyword arguments to pass to the callable task : type kwargs : dict : returns : The ` ` Task ` ` instance"""
task = self . gator . task_class ( ** self . task_kwargs ) return self . gator . push ( task , func , * args , ** kwargs )
def write ( self , oprot ) : '''Write this object to the given output protocol and return self . : type oprot : thryft . protocol . _ output _ protocol . _ OutputProtocol : rtype : pastpy . gen . database . impl . dbf . dbf _ database _ configuration . DbfDatabaseConfiguration'''
oprot . write_struct_begin ( 'DbfDatabaseConfiguration' ) if self . pp_images_dir_path is not None : oprot . write_field_begin ( name = 'pp_images_dir_path' , type = 11 , id = None ) oprot . write_string ( self . pp_images_dir_path ) oprot . write_field_end ( ) if self . pp_install_dir_path is not None : oprot . write_field_begin ( name = 'pp_install_dir_path' , type = 11 , id = None ) oprot . write_string ( self . pp_install_dir_path ) oprot . write_field_end ( ) if self . pp_objects_dbf_file_path is not None : oprot . write_field_begin ( name = 'pp_objects_dbf_file_path' , type = 11 , id = None ) oprot . write_string ( self . pp_objects_dbf_file_path ) oprot . write_field_end ( ) oprot . write_field_stop ( ) oprot . write_struct_end ( ) return self
def _load_all ( self ) : '''Load all of them'''
with self . _lock : for name in self . file_mapping : if name in self . loaded_files or name in self . missing_modules : continue self . _load_module ( name ) self . loaded = True
def _get_hierarchy ( cls ) : """Internal helper to return the list of polymorphic base classes . This returns a list of class objects , e . g . [ Animal , Feline , Cat ] ."""
bases = [ ] for base in cls . mro ( ) : # pragma : no branch if hasattr ( base , '_get_hierarchy' ) : bases . append ( base ) del bases [ - 1 ] # Delete PolyModel itself bases . reverse ( ) return bases
def tickets ( self , extra_params = None ) : """All Tickets which are a part of this Milestone"""
return filter ( lambda ticket : ticket . get ( 'milestone_id' , None ) == self [ 'id' ] , self . space . tickets ( extra_params = extra_params ) )
def js_exec ( self , method : str , * args : Union [ int , str , bool ] ) -> None : """Execute ` ` method ` ` in the related node on browser . Other keyword arguments are passed to ` ` params ` ` attribute . If this node is not in any document tree ( namely , this node does not have parent node ) , the ` ` method ` ` is not executed ."""
if self . connected : self . ws_send ( dict ( method = method , params = args ) )
def push_concurrency_history_item ( self , state , number_concurrent_threads ) : """Adds a new concurrency - history - item to the history item list A concurrent history item stores information about the point in time where a certain number of states is launched concurrently ( e . g . in a barrier concurrency state ) . : param state : the state that launches the state group : param number _ concurrent _ threads : the number of states that are launched"""
last_history_item = self . get_last_history_item ( ) return_item = ConcurrencyItem ( state , self . get_last_history_item ( ) , number_concurrent_threads , state . run_id , self . execution_history_storage ) return self . _push_item ( last_history_item , return_item )
def get_event_with_balance_proof_by_balance_hash ( storage : sqlite . SQLiteStorage , canonical_identifier : CanonicalIdentifier , balance_hash : BalanceHash , ) -> sqlite . EventRecord : """Returns the event which contains the corresponding balance proof . Use this function to find a balance proof for a call to settle , which only has the blinded balance proof data ."""
return storage . get_latest_event_by_data_field ( { 'balance_proof.canonical_identifier.chain_identifier' : str ( canonical_identifier . chain_identifier , ) , 'balance_proof.canonical_identifier.token_network_address' : to_checksum_address ( canonical_identifier . token_network_address , ) , 'balance_proof.canonical_identifier.channel_identifier' : str ( canonical_identifier . channel_identifier , ) , 'balance_proof.balance_hash' : serialize_bytes ( balance_hash ) , } )
def _flush ( self ) : """Returns a list of all current data"""
if self . _recording : raise Exception ( "Cannot flush data queue while recording!" ) if self . _saving_cache : logging . warn ( "Flush when using cache means unsaved data will be lost and not returned!" ) self . _cmds_q . put ( ( "reset_data_segment" , ) ) else : data = self . _extract_q ( 0 ) return data
def glow_hparams ( ) : """Glow Hparams ."""
hparams = common_hparams . basic_params1 ( ) hparams . clip_grad_norm = None hparams . weight_decay = 0.0 hparams . learning_rate_constant = 3e-4 hparams . batch_size = 32 # can be prev _ level , prev _ step or normal . # see : glow _ ops . merge _ level _ and _ latent _ dist hparams . add_hparam ( "level_scale" , "prev_level" ) hparams . add_hparam ( "n_levels" , 3 ) hparams . add_hparam ( "n_bits_x" , 8 ) hparams . add_hparam ( "depth" , 32 ) # Activation - Relu or Gatu hparams . add_hparam ( "activation" , "relu" ) # Coupling layer , additive or affine . hparams . add_hparam ( "coupling" , "affine" ) hparams . add_hparam ( "coupling_width" , 512 ) hparams . add_hparam ( "coupling_dropout" , 0.0 ) hparams . add_hparam ( "top_prior" , "single_conv" ) # init _ batch _ size denotes the number of examples used for data - dependent # initialization . A higher init _ batch _ size is required for training # stability especially when hparams . batch _ size is low . hparams . add_hparam ( "init_batch_size" , 256 ) hparams . add_hparam ( "temperature" , 1.0 ) return hparams
def get_manager ( self , identity ) : """Given the identity return a HPEManager object : param identity : The identity of the Manager resource : returns : The Manager object"""
return manager . HPEManager ( self . _conn , identity , redfish_version = self . redfish_version )
def fixed ( self ) : """Returns a list of just the fixed source roots in the trie ."""
for key , child in self . _root . children . items ( ) : if key == '^' : return list ( child . subpatterns ( ) ) return [ ]
def get_top_level_indicator_node ( root_node ) : """This returns the first top level Indicator node under the criteria node . : param root _ node : Root node of an etree . : return : an elementTree Element item , or None if no item is found ."""
if root_node . tag != 'OpenIOC' : raise IOCParseError ( 'Root tag is not "OpenIOC" [{}].' . format ( root_node . tag ) ) elems = root_node . xpath ( 'criteria/Indicator' ) if len ( elems ) == 0 : log . warning ( 'No top level Indicator node found.' ) return None elif len ( elems ) > 1 : log . warning ( 'Multiple top level Indicator nodes found. This is not a valid MIR IOC.' ) return None else : top_level_indicator_node = elems [ 0 ] if top_level_indicator_node . get ( 'operator' ) . lower ( ) != 'or' : log . warning ( 'Top level Indicator/@operator attribute is not "OR". This is not a valid MIR IOC.' ) return top_level_indicator_node
def get_cpu_property ( self , property_p ) : """Returns the virtual CPU boolean value of the specified property . in property _ p of type : class : ` CPUPropertyType ` Property type to query . return value of type bool Property value . raises : class : ` OleErrorInvalidarg ` Invalid property ."""
if not isinstance ( property_p , CPUPropertyType ) : raise TypeError ( "property_p can only be an instance of type CPUPropertyType" ) value = self . _call ( "getCPUProperty" , in_p = [ property_p ] ) return value
def schedule ( code , interval , secret_key = None , url = None ) : """Schedule a string of ` code ` to be executed every ` interval ` Specificying an ` interval ` of 0 indicates the event should only be run one time and will not be rescheduled ."""
if not secret_key : secret_key = default_key ( ) if not url : url = default_url ( ) url = '%s/schedule' % url values = { 'interval' : interval , 'code' : code , } return _send_with_auth ( values , secret_key , url )
def get_api_group ( self , ** kwargs ) : """get information of a group This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass async _ req = True > > > thread = api . get _ api _ group ( async _ req = True ) > > > result = thread . get ( ) : param async _ req bool : return : V1APIGroup If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async_req' ) : return self . get_api_group_with_http_info ( ** kwargs ) else : ( data ) = self . get_api_group_with_http_info ( ** kwargs ) return data
def get_profiles ( self , profile_base = "/data/b2g/mozilla" , timeout = None ) : """Return a list of paths to gecko profiles on the device , : param timeout : Timeout of each adb command run : param profile _ base : Base directory containing the profiles . ini file"""
rv = { } if timeout is None : timeout = self . _timeout profile_path = posixpath . join ( profile_base , "profiles.ini" ) try : proc = self . shell ( "cat %s" % profile_path , timeout = timeout ) config = ConfigParser . ConfigParser ( ) config . readfp ( proc . stdout_file ) for section in config . sections ( ) : items = dict ( config . items ( section ) ) if "name" in items and "path" in items : path = items [ "path" ] if "isrelative" in items and int ( items [ "isrelative" ] ) : path = posixpath . normpath ( "%s/%s" % ( profile_base , path ) ) rv [ items [ "name" ] ] = path finally : proc . stdout_file . close ( ) proc . stderr_file . close ( ) return rv
def find_by_tag ( self , tag , params = { } , ** options ) : """Returns the compact task records for all tasks with the given tag . Parameters tag : { Id } The tag in which to search for tasks . [ params ] : { Object } Parameters for the request"""
path = "/tags/%s/tasks" % ( tag ) return self . client . get_collection ( path , params , ** options )
def _to_bytes ( self , data ) : """Normalize a text data to bytes ( type ` bytes ` ) so that the go bindings can handle it easily ."""
# TODO : On Python 3 , move this ` if ` line to the ` except ` branch # as the common case will indeed no longer be bytes . if not isinstance ( data , bytes ) : try : return data . encode ( 'utf-8' ) except Exception : return None return data
def calc_uvw ( sdmfile , scan = 0 , datetime = 0 , radec = ( ) ) : """Calculates and returns uvw in meters for a given SDM , time , and pointing direction . sdmfile is path to sdm directory that includes " Station . xml " file . scan is scan number defined by observatory . datetime is time ( as string ) to calculate uvw ( format : ' 2014/09/03/08:33:04.20 ' ) radec is ( ra , dec ) as tuple in units of degrees ( format : ( 180 . , + 45 . ) )"""
# set up CASA tools try : import casautil except ImportError : try : import pwkit . environments . casa . util as casautil except ImportError : logger . info ( 'Cannot find pwkit/casautil. No calc_uvw possible.' ) return me = casautil . tools . measures ( ) qa = casautil . tools . quanta ( ) logger . debug ( 'Accessing CASA libraries with casautil.' ) assert os . path . exists ( os . path . join ( sdmfile , 'Station.xml' ) ) , 'sdmfile %s has no Station.xml file. Not an SDM?' % sdmfile # get scan info scans , sources = read_metadata ( sdmfile , scan ) # default is to use scan info if ( datetime == 0 ) and ( len ( radec ) == 0 ) : assert scan != 0 , 'scan must be set when using datetime and radec' # default scan value not valid logger . info ( 'Calculating uvw for first integration of scan %d of source %s' % ( scan , scans [ scan ] [ 'source' ] ) ) datetime = qa . time ( qa . quantity ( scans [ scan ] [ 'startmjd' ] , 'd' ) , form = "ymd" , prec = 8 ) [ 0 ] sourcenum = [ kk for kk in sources . keys ( ) if sources [ kk ] [ 'source' ] == scans [ scan ] [ 'source' ] ] [ 0 ] direction = me . direction ( 'J2000' , str ( np . degrees ( sources [ sourcenum ] [ 'ra' ] ) ) + 'deg' , str ( np . degrees ( sources [ sourcenum ] [ 'dec' ] ) ) + 'deg' ) # secondary case is when datetime is also given elif ( datetime != 0 ) and ( len ( radec ) == 0 ) : assert scan != 0 , 'scan must be set when using datetime and radec' # default scan value not valid assert '/' in datetime , 'datetime must be in yyyy/mm/dd/hh:mm:ss.sss format' logger . info ( 'Calculating uvw at %s for scan %d of source %s' % ( datetime , scan , scans [ scan ] [ 'source' ] ) ) sourcenum = [ kk for kk in sources . keys ( ) if sources [ kk ] [ 'source' ] == scans [ scan ] [ 'source' ] ] [ 0 ] direction = me . direction ( 'J2000' , str ( np . degrees ( sources [ sourcenum ] [ 'ra' ] ) ) + 'deg' , str ( np . degrees ( sources [ sourcenum ] [ 'dec' ] ) ) + 'deg' ) else : assert '/' in datetime , 'datetime must be in yyyy/mm/dd/hh:mm:ss.sss format' assert len ( radec ) == 2 , 'radec must be (ra,dec) tuple in units of degrees' logger . info ( 'Calculating uvw at %s in direction %s' % ( datetime , direction ) ) logger . info ( 'This mode assumes all antennas used.' ) ra = radec [ 0 ] ; dec = radec [ 1 ] direction = me . direction ( 'J2000' , str ( ra ) + 'deg' , str ( dec ) + 'deg' ) # define metadata " frame " for uvw calculation sdm = sdmpy . SDM ( sdmfile ) telescopename = sdm [ 'ExecBlock' ] [ 0 ] [ 'telescopeName' ] . strip ( ) logger . debug ( 'Found observatory name %s' % telescopename ) me . doframe ( me . observatory ( telescopename ) ) me . doframe ( me . epoch ( 'utc' , datetime ) ) me . doframe ( direction ) # read antpos if scan != 0 : configid = [ row . configDescriptionId for row in sdm [ 'Main' ] if scan == int ( row . scanNumber ) ] [ 0 ] antidlist = [ row . antennaId for row in sdm [ 'ConfigDescription' ] if configid == row . configDescriptionId ] [ 0 ] . split ( ' ' ) [ 2 : ] stationidlist = [ ant . stationId for antid in antidlist for ant in sdm [ 'Antenna' ] if antid == ant . antennaId ] else : stationidlist = [ ant . stationId for ant in sdm [ 'Antenna' ] ] positions = [ station . position . strip ( ) . split ( ' ' ) for station in sdm [ 'Station' ] if station . stationId in stationidlist ] x = [ float ( positions [ i ] [ 2 ] ) for i in range ( len ( positions ) ) ] y = [ float ( positions [ i ] [ 3 ] ) for i in range ( len ( positions ) ) ] z = [ float ( positions [ i ] [ 4 ] ) for i in range ( len ( positions ) ) ] ants = me . position ( 'itrf' , qa . quantity ( x , 'm' ) , qa . quantity ( y , 'm' ) , qa . quantity ( z , 'm' ) ) # calc bl bls = me . asbaseline ( ants ) uvwlist = me . expand ( me . touvw ( bls ) [ 0 ] ) [ 1 ] [ 'value' ] # define new bl order to match sdm binary file bl order u = np . empty ( len ( uvwlist ) / 3 ) ; v = np . empty ( len ( uvwlist ) / 3 ) ; w = np . empty ( len ( uvwlist ) / 3 ) nants = len ( ants [ 'm0' ] [ 'value' ] ) ord1 = [ i * nants + j for i in range ( nants ) for j in range ( i + 1 , nants ) ] ord2 = [ i * nants + j for j in range ( nants ) for i in range ( j ) ] key = [ ] for new in ord2 : key . append ( ord1 . index ( new ) ) for i in range ( len ( key ) ) : u [ i ] = uvwlist [ 3 * key [ i ] ] v [ i ] = uvwlist [ 3 * key [ i ] + 1 ] w [ i ] = uvwlist [ 3 * key [ i ] + 2 ] return u , v , w
def chunk ( seq , n ) : # http : / / stackoverflow . com / a / 312464/190597 ( Ned Batchelder ) """Yield successive n - sized chunks from seq ."""
for i in range ( 0 , len ( seq ) , n ) : yield seq [ i : i + n ]
def destroy ( self ) : """destroy the server ."""
logger . info ( "destroying server" ) if self . library : self . library . Srv_Destroy ( ctypes . byref ( self . pointer ) )
def BindVar ( self , var_id , value ) : """Associates a value with given variable . This can be called multiple times to associate multiple values . Args : var _ id : A variable id to bind the values to . value : A value to bind to the specified variable . Raises : KeyError : If given variable is not specified in the pattern ."""
if var_id not in self . _vars : raise KeyError ( var_id ) self . _var_bindings [ var_id ] . append ( value )
def register_entity ( self , entity_value , entity_type , alias_of = None ) : """Register an entity to be tagged in potential parse results Args : entity _ value ( str ) : the value / proper name of an entity instance ( Ex : " The Big Bang Theory " ) entity _ type ( str ) : the type / tag of an entity instance ( Ex : " Television Show " )"""
if alias_of : self . trie . insert ( entity_value . lower ( ) , data = ( alias_of , entity_type ) ) else : self . trie . insert ( entity_value . lower ( ) , data = ( entity_value , entity_type ) ) self . trie . insert ( entity_type . lower ( ) , data = ( entity_type , 'Concept' ) )
def _sort_text ( definition ) : """Ensure builtins appear at the bottom . Description is of format < type > : < module > . < item >"""
# If its ' hidden ' , put it next last prefix = 'z{}' if definition . name . startswith ( '_' ) else 'a{}' return prefix . format ( definition . name )
def tsqr ( a ) : """Perform a QR decomposition of a tall - skinny matrix . Args : a : A distributed matrix with shape MxN ( suppose K = min ( M , N ) ) . Returns : A tuple of q ( a DistArray ) and r ( a numpy array ) satisfying the following . - If q _ full = ray . get ( DistArray , q ) . assemble ( ) , then q _ full . shape = = ( M , K ) . - np . allclose ( np . dot ( q _ full . T , q _ full ) , np . eye ( K ) ) = = True . - If r _ val = ray . get ( np . ndarray , r ) , then r _ val . shape = = ( K , N ) . - np . allclose ( r , np . triu ( r ) ) = = True ."""
if len ( a . shape ) != 2 : raise Exception ( "tsqr requires len(a.shape) == 2, but a.shape is " "{}" . format ( a . shape ) ) if a . num_blocks [ 1 ] != 1 : raise Exception ( "tsqr requires a.num_blocks[1] == 1, but a.num_blocks " "is {}" . format ( a . num_blocks ) ) num_blocks = a . num_blocks [ 0 ] K = int ( np . ceil ( np . log2 ( num_blocks ) ) ) + 1 q_tree = np . empty ( ( num_blocks , K ) , dtype = object ) current_rs = [ ] for i in range ( num_blocks ) : block = a . objectids [ i , 0 ] q , r = ra . linalg . qr . remote ( block ) q_tree [ i , 0 ] = q current_rs . append ( r ) for j in range ( 1 , K ) : new_rs = [ ] for i in range ( int ( np . ceil ( 1.0 * len ( current_rs ) / 2 ) ) ) : stacked_rs = ra . vstack . remote ( * current_rs [ ( 2 * i ) : ( 2 * i + 2 ) ] ) q , r = ra . linalg . qr . remote ( stacked_rs ) q_tree [ i , j ] = q new_rs . append ( r ) current_rs = new_rs assert len ( current_rs ) == 1 , "len(current_rs) = " + str ( len ( current_rs ) ) # handle the special case in which the whole DistArray " a " fits in one # block and has fewer rows than columns , this is a bit ugly so think about # how to remove it if a . shape [ 0 ] >= a . shape [ 1 ] : q_shape = a . shape else : q_shape = [ a . shape [ 0 ] , a . shape [ 0 ] ] q_num_blocks = core . DistArray . compute_num_blocks ( q_shape ) q_objectids = np . empty ( q_num_blocks , dtype = object ) q_result = core . DistArray ( q_shape , q_objectids ) # reconstruct output for i in range ( num_blocks ) : q_block_current = q_tree [ i , 0 ] ith_index = i for j in range ( 1 , K ) : if np . mod ( ith_index , 2 ) == 0 : lower = [ 0 , 0 ] upper = [ a . shape [ 1 ] , core . BLOCK_SIZE ] else : lower = [ a . shape [ 1 ] , 0 ] upper = [ 2 * a . shape [ 1 ] , core . BLOCK_SIZE ] ith_index //= 2 q_block_current = ra . dot . remote ( q_block_current , ra . subarray . remote ( q_tree [ ith_index , j ] , lower , upper ) ) q_result . objectids [ i ] = q_block_current r = current_rs [ 0 ] return q_result , ray . get ( r )
def encrypt ( self , serialized ) : """Encrypts the serialized message using Fernet : param serialized : the serialized object to encrypt : type serialized : bytes : returns : an encrypted bytes returned by Fernet"""
fernet = Fernet ( self . encryption_cipher_key ) return fernet . encrypt ( serialized )
def p_if_statement_1 ( self , p ) : """if _ statement : IF LPAREN expr RPAREN statement"""
p [ 0 ] = ast . If ( predicate = p [ 3 ] , consequent = p [ 5 ] )
def approvewitness ( ctx , witnesses , account ) : """Approve witness ( es )"""
print_tx ( ctx . bitshares . approvewitness ( witnesses , account = account ) )
def assert_valid_sdl_extension ( document_ast : DocumentNode , schema : GraphQLSchema ) -> None : """Assert document is a valid SDL extension . Utility function which asserts a SDL document is valid by throwing an error if it is invalid ."""
errors = validate_sdl ( document_ast , schema ) if errors : raise TypeError ( "\n\n" . join ( error . message for error in errors ) )
def srbt ( peer , pkts , inter = 0.1 , * args , ** kargs ) : """send and receive using a bluetooth socket"""
s = conf . BTsocket ( peer = peer ) a , b = sndrcv ( s , pkts , inter = inter , * args , ** kargs ) s . close ( ) return a , b
def _load_neighbors_from_external_source ( self ) -> None : """Loads the neighbors of the node from the igraph ` Graph ` instance that is wrapped by the graph that has this node ."""
graph : IGraphWrapper = self . _graph ig_vertex : IGraphVertex = graph . wrapped_graph . vs [ self . _igraph_index ] ig_neighbors : List [ IGraphVertex ] = ig_vertex . neighbors ( ) for ig_neighbor in ig_neighbors : try : name : str = ig_neighbor [ "name" ] except KeyError : name : str = str ( ig_neighbor . index ) try : external_id : Optional [ str ] = ig_neighbor [ "external_id" ] except KeyError : external_id : Optional [ str ] = None neighbor : IGraphNode = graph . nodes . get_node_by_name ( name , can_validate_and_load = True , external_id = external_id ) graph . add_edge ( self , neighbor )
def step1 ( self , username , password ) : """First authentication step ."""
self . _check_initialized ( ) context = AtvSRPContext ( str ( username ) , str ( password ) , prime = constants . PRIME_2048 , generator = constants . PRIME_2048_GEN ) self . session = SRPClientSession ( context , binascii . hexlify ( self . _auth_private ) . decode ( ) )
def explain ( self ) : """Returns an explain plan record for this cursor . . . mongodoc : : explain"""
clone = self . _clone ( deepcopy = True , base = Cursor ( self . collection ) ) return clone . explain ( )
def get_google_token ( account_email , oauth_client_id , oauth_redirect_uri , oauth_scope , account_password = None , account_otp_secret = None , ** kwargs ) : """: param account _ email : ( REQUIRED ) : param oauth _ client _ id : ( REQUIRED ) : param oauth _ redirect _ uri : ( REQUIRED ) : param oauth _ scope : ( REQUIRED ) : param account _ password : Necessary for first use . : param account _ otp _ secret : Necessary for first use if enabled on account : return : generated token"""
items = { "account_email" : account_email , "oauth_client_id" : oauth_client_id , "oauth_redirect_uri" : oauth_redirect_uri , "oauth_scope" : oauth_scope , "account_password" : account_password , "account_otp_secret" : account_otp_secret } for key , value in kwargs . items ( ) : if key not in items : items [ key ] = value generator = GoogleTokenGenerator ( ** items ) return generator . generate ( )
def _get_timestamp_ms ( when ) : """Converts a datetime . datetime to integer milliseconds since the epoch . Requires special handling to preserve microseconds . Args : when : A datetime . datetime instance . Returns : Integer time since the epoch in milliseconds . If the supplied ' when ' is None , the return value will be None ."""
if when is None : return None ms_since_epoch = float ( time . mktime ( when . utctimetuple ( ) ) * 1000.0 ) ms_since_epoch += when . microsecond / 1000.0 return int ( ms_since_epoch )
def to_hising ( self ) : """Construct a higher - order Ising problem from a binary polynomial . Returns : tuple : A 3 - tuple of the form ( ` h ` , ` J ` , ` offset ` ) where ` h ` includes the linear biases , ` J ` has the higher - order biases and ` offset ` is the linear offset . Examples : > > > poly = dimod . BinaryPolynomial ( { ' a ' : - 1 , ' ab ' : 1 , ' abc ' : - 1 } , dimod . SPIN ) > > > h , J , off = poly . to _ hising ( ) { ' a ' : - 1}"""
if self . vartype is Vartype . BINARY : return self . to_spin ( ) . to_hising ( ) h = { } J = { } offset = 0 for term , bias in self . items ( ) : if len ( term ) == 0 : offset += bias elif len ( term ) == 1 : v , = term h [ v ] = bias else : J [ tuple ( term ) ] = bias return h , J , offset
def train_episode ( agent , envs , preprocessors , t_max , render ) : """Complete an episode ' s worth of training for each environment ."""
num_envs = len ( envs ) # Buffers to hold trajectories , e . g . ` env _ xs [ i ] ` will hold the observations # for environment ` i ` . env_xs , env_as = _2d_list ( num_envs ) , _2d_list ( num_envs ) env_rs , env_vs = _2d_list ( num_envs ) , _2d_list ( num_envs ) episode_rs = np . zeros ( num_envs , dtype = np . float ) for p in preprocessors : p . reset ( ) observations = [ p . preprocess ( e . reset ( ) ) for p , e in zip ( preprocessors , envs ) ] done = np . array ( [ False for _ in range ( num_envs ) ] ) all_done = False t = 1 while not all_done : if render : envs [ 0 ] . render ( ) # NOTE ( reed ) : Reshape to set the data shape . agent . model . reshape ( [ ( 'data' , ( num_envs , preprocessors [ 0 ] . obs_size ) ) ] ) step_xs = np . vstack ( [ o . ravel ( ) for o in observations ] ) # Get actions and values for all environments in a single forward pass . step_xs_nd = mx . nd . array ( step_xs , ctx = agent . ctx ) data_batch = mx . io . DataBatch ( data = [ step_xs_nd ] , label = None ) agent . model . forward ( data_batch , is_train = False ) _ , step_vs , _ , step_ps = agent . model . get_outputs ( ) step_ps = step_ps . asnumpy ( ) step_vs = step_vs . asnumpy ( ) step_as = agent . act ( step_ps ) # Step each environment whose episode has not completed . for i , env in enumerate ( envs ) : if not done [ i ] : obs , r , done [ i ] , _ = env . step ( step_as [ i ] ) # Record the observation , action , value , and reward in the # buffers . env_xs [ i ] . append ( step_xs [ i ] . ravel ( ) ) env_as [ i ] . append ( step_as [ i ] ) env_vs [ i ] . append ( step_vs [ i ] [ 0 ] ) env_rs [ i ] . append ( r ) episode_rs [ i ] += r # Add 0 as the state value when done . if done [ i ] : env_vs [ i ] . append ( 0.0 ) else : observations [ i ] = preprocessors [ i ] . preprocess ( obs ) # Perform an update every ` t _ max ` steps . if t == t_max : # If the episode has not finished , add current state ' s value . This # will be used to ' bootstrap ' the final return ( see Algorithm S3 # in A3C paper ) . step_xs = np . vstack ( [ o . ravel ( ) for o in observations ] ) step_xs_nd = mx . nd . array ( step_xs , ctx = agent . ctx ) data_batch = mx . io . DataBatch ( data = [ step_xs_nd ] , label = None ) agent . model . forward ( data_batch , is_train = False ) _ , extra_vs , _ , _ = agent . model . get_outputs ( ) extra_vs = extra_vs . asnumpy ( ) for i in range ( num_envs ) : if not done [ i ] : env_vs [ i ] . append ( extra_vs [ i ] [ 0 ] ) # Perform update and clear buffers . env_xs = np . vstack ( list ( chain . from_iterable ( env_xs ) ) ) agent . train_step ( env_xs , env_as , env_rs , env_vs ) env_xs , env_as = _2d_list ( num_envs ) , _2d_list ( num_envs ) env_rs , env_vs = _2d_list ( num_envs ) , _2d_list ( num_envs ) t = 0 all_done = np . all ( done ) t += 1 return episode_rs
def _reconnect ( self ) : """Reconnect to the ReadRows stream using the most recent offset ."""
self . _wrapped = self . _client . read_rows ( _copy_stream_position ( self . _position ) , ** self . _read_rows_kwargs )
def make_type ( typename , lineno , implicit = False ) : """Converts a typename identifier ( e . g . ' float ' ) to its internal symbol table entry representation . Creates a type usage symbol stored in a AST E . g . DIM a As Integer will access Integer type"""
assert isinstance ( typename , str ) if not SYMBOL_TABLE . check_is_declared ( typename , lineno , 'type' ) : return None type_ = symbols . TYPEREF ( SYMBOL_TABLE . get_entry ( typename ) , lineno , implicit ) return type_
def _build_params_from_kwargs ( self , ** kwargs ) : """Builds parameters from passed arguments Search passed parameters in available methods , prepend specified API key , and return dictionary which can be sent directly to API server . : param kwargs : : type param : dict : raises ValueError : If type of specified parameter doesn ' t match the expected type . Also raised if some basic validation of passed parameter fails . : raises PushalotException : If required parameter not set . : return : Dictionary with params which can be sent to API server : rtype : dict"""
api_methods = self . get_api_params ( ) required_methods = self . get_api_required_params ( ) ret_kwargs = { } for key , val in kwargs . items ( ) : if key not in api_methods : warnings . warn ( 'Passed uknown parameter [{}]' . format ( key ) , Warning ) continue if key not in required_methods and val is None : continue if type ( val ) != api_methods [ key ] [ 'type' ] : raise ValueError ( "Invalid type specified to param: {}" . format ( key ) ) if 'max_len' in api_methods [ key ] : if len ( val ) > api_methods [ key ] [ 'max_len' ] : raise ValueError ( "Lenght of parameter [{}] more than " "allowed length" . format ( key ) ) ret_kwargs [ api_methods [ key ] [ 'param' ] ] = val for item in required_methods : if item not in ret_kwargs : raise pushalot . exc . PushalotException ( "Parameter [{}] required, but not set" . format ( item ) ) return ret_kwargs
def community_topic_delete ( self , id , ** kwargs ) : "https : / / developer . zendesk . com / rest _ api / docs / help _ center / topics # delete - topic"
api_path = "/api/v2/community/topics/{id}.json" api_path = api_path . format ( id = id ) return self . call ( api_path , method = "DELETE" , ** kwargs )
def is_name_valid ( fqn ) : """Is a fully - qualified name acceptable ? Return True if so Return False if not > > > is _ name _ valid ( ' abcd ' ) False > > > is _ name _ valid ( ' abcd . ' ) False > > > is _ name _ valid ( ' . abcd ' ) False > > > is _ name _ valid ( ' Abcd . abcd ' ) False > > > is _ name _ valid ( ' abcd . abc . d ' ) False > > > is _ name _ valid ( ' abcd . abc + d ' ) False > > > is _ name _ valid ( ' a . b . c ' ) False > > > is _ name _ valid ( True ) False > > > is _ name _ valid ( 123) False > > > is _ name _ valid ( None ) False > > > is _ name _ valid ( ' ' ) False > > > is _ name _ valid ( ' abcdabcdabcdabcdabcdabcdabcdabcda . bcd ' ) True > > > is _ name _ valid ( ' abcdabcdabcdabcdabcdabcdabcdabcdab . bcd ' ) False > > > is _ name _ valid ( ' abcdabcdabcdabcdabcdabcdabcdabcdabc . d ' ) True > > > is _ name _ valid ( ' a + b . c ' ) False > > > is _ name _ valid ( ' a _ b . c ' ) True"""
if not isinstance ( fqn , ( str , unicode ) ) : return False if fqn . count ( "." ) != 1 : return False name , namespace_id = fqn . split ( "." ) if len ( name ) == 0 or len ( namespace_id ) == 0 : return False if not is_b40 ( name ) or "+" in name or "." in name : return False if not is_namespace_valid ( namespace_id ) : return False if len ( fqn ) > LENGTHS [ 'blockchain_id_name' ] : # too long return False return True
def collect_via_nvidia_smi ( self , stats_config ) : """Use nvidia smi command line tool to collect metrics : param stats _ config : : return :"""
raw_output = self . run_command ( [ '--query-gpu={query_gpu}' . format ( query_gpu = ',' . join ( stats_config ) ) , '--format=csv,nounits,noheader' ] ) if raw_output is None : return results = raw_output [ 0 ] . strip ( ) . split ( "\n" ) for result in results : stats = result . strip ( ) . split ( ',' ) assert len ( stats ) == len ( stats_config ) index = stats [ 0 ] for stat_name , metric in izip ( stats_config [ 1 : ] , stats [ 1 : ] ) : metric_name = 'gpu_{index}.{stat_name}' . format ( index = str ( index ) , stat_name = stat_name ) self . publish ( metric_name , metric )
def get_datetime ( formatted_str , user_fmt = None ) : # type : ( AnyStr , Optional [ AnyStr ] ) - > datetime """get datetime ( ) object from string formatted % Y - % m - % d % H : % M : % S Examples : > > > StringClass . get _ datetime ( ' 2008-11-9 ' ) datetime . datetime ( 2008 , 11 , 9 , 0 , 0) > > > StringClass . get _ datetime ( ' 2008/11/9 ' ) datetime . datetime ( 2008 , 11 , 9 , 0 , 0) > > > StringClass . get _ datetime ( ' 20081109 ' ) datetime . datetime ( 2008 , 11 , 9 , 0 , 0) > > > StringClass . get _ datetime ( ' 11/9/2008 ' ) datetime . datetime ( 2008 , 11 , 9 , 0 , 0) > > > StringClass . get _ datetime ( ' 11-9-2008 ' ) datetime . datetime ( 2008 , 11 , 9 , 0 , 0) > > > StringClass . get _ datetime ( ' 11/09/08 ' ) datetime . datetime ( 2008 , 11 , 9 , 0 , 0) > > > StringClass . get _ datetime ( ' 2008-11-9 11:09 ' ) datetime . datetime ( 2008 , 11 , 9 , 11 , 9) > > > StringClass . get _ datetime ( ' 2008-11-9 11:09:52 ' ) datetime . datetime ( 2008 , 11 , 9 , 11 , 9 , 52)"""
date_fmts = [ '%m-%d-%Y' , '%Y-%m-%d' , '%m-%d-%y' , '%y-%m-%d' ] date_fmts += [ d . replace ( '-' , '/' ) for d in date_fmts ] date_fmts += [ d . replace ( '-' , '' ) for d in date_fmts ] time_fmts = [ '%H:%M' , '%H:%M:%S' ] fmts = date_fmts + [ '%s %s' % ( d , t ) for d in date_fmts for t in time_fmts ] if user_fmt is not None : if is_string ( user_fmt ) : fmts . insert ( 0 , str ( user_fmt ) ) elif isinstance ( user_fmt , list ) : fmts = user_fmt + fmts elif isinstance ( user_fmt , tuple ) : for fff in user_fmt : fmts . insert ( 0 , fff ) flag = False for fmt in fmts : try : org_time = time . strptime ( formatted_str , fmt ) flag = True break except ValueError : pass if not flag : raise ValueError ( 'The DATETIME must be one of the formats: %s' % ',' . join ( fmts ) ) else : return datetime ( org_time . tm_year , org_time . tm_mon , org_time . tm_mday , org_time . tm_hour , org_time . tm_min , org_time . tm_sec )
def pack_found_items ( self , s_text , target ) : """pack up found items for search ctrl : param target : treectrl obj : param s _ text : text to search , lower case return list of found items"""
all_children = self . all_children all_text = [ target . GetItemText ( i ) . lower ( ) for i in all_children ] found_items = [ child for i , child in enumerate ( all_children ) if s_text in all_text [ i ] ] return found_items
def setup_tree ( self ) : """Setup an example Treeview"""
self . tree . insert ( "" , tk . END , text = "Example 1" , iid = "1" ) self . tree . insert ( "" , tk . END , text = "Example 2" , iid = "2" ) self . tree . insert ( "2" , tk . END , text = "Example Child" ) self . tree . heading ( "#0" , text = "Example heading" )
def col_mod ( df , col_name , func , * args , ** kwargs ) : """Changes a column of a DataFrame according to a given function Parameters : df - DataFrame DataFrame to operate on col _ name - string Name of column to modify func - function The function to use to modify the column"""
backup = df [ col_name ] . copy ( ) try : return_val = func ( df , col_name , * args , ** kwargs ) if return_val is not None : set_col ( df , col_name , return_val ) except : df [ col_name ] = backup
def is_valid_hexameter ( self , scanned_line : str ) -> bool : """Determine if a scansion pattern is one of the valid hexameter metrical patterns : param scanned _ line : a line containing a sequence of stressed and unstressed syllables : return bool > > > print ( MetricalValidator ( ) . is _ valid _ hexameter ( " - UU - - - UU - - - UU - U " ) ) True"""
line = scanned_line . replace ( self . constants . FOOT_SEPARATOR , "" ) line = line . replace ( " " , "" ) if len ( line ) < 12 : return False line = line [ : - 1 ] + self . constants . OPTIONAL_ENDING return self . VALID_HEXAMETERS . __contains__ ( line )
def summary ( args ) : """% prog summary coordsfile provide summary on id % and cov % , for both query and reference"""
from jcvi . formats . blast import AlignStats p = OptionParser ( summary . __doc__ ) p . add_option ( "-s" , dest = "single" , default = False , action = "store_true" , help = "provide stats per reference seq" ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( p . print_help ( ) ) coordsfile , = args alignstats = get_stats ( coordsfile ) alignstats . print_stats ( )
def to_array ( self ) : """Serializes this Voice to a dictionary . : return : dictionary representation of this object . : rtype : dict"""
array = super ( Voice , self ) . to_array ( ) array [ 'file_id' ] = u ( self . file_id ) # py2 : type unicode , py3 : type str array [ 'duration' ] = int ( self . duration ) # type int if self . mime_type is not None : array [ 'mime_type' ] = u ( self . mime_type ) # py2 : type unicode , py3 : type str if self . file_size is not None : array [ 'file_size' ] = int ( self . file_size ) # type int return array
def set_debugged_thread ( self , target_thread_ident = None ) : """Allows to reset or set the thread to debug ."""
if target_thread_ident is None : self . debugged_thread_ident = None self . debugged_thread_name = '' return { "result" : self . get_threads ( ) , "error" : "" } thread_list = self . get_threads ( ) if target_thread_ident not in thread_list : return { "result" : None , "error" : "No thread with ident:%s." % target_thread_ident } if thread_list [ target_thread_ident ] [ 'is_debugger' ] : return { "result" : None , "error" : "Cannot debug IKPdb tracer (sadly...)." } self . debugged_thread_ident = target_thread_ident self . debugged_thread_name = thread_list [ target_thread_ident ] [ 'name' ] return { "result" : self . get_threads ( ) , "error" : "" }
async def revoke ( self , username , acl = 'login' ) : """Removes some or all access of a user to from a controller If ' login ' access is revoked , the user will no longer have any permissions on the controller . Revoking a higher privilege from a user without that privilege will have no effect . : param str username : username : param str acl : Access to remove ( ' login ' , ' add - model ' or ' superuser ' )"""
controller_facade = client . ControllerFacade . from_connection ( self . connection ( ) ) user = tag . user ( username ) changes = client . ModifyControllerAccess ( 'login' , 'revoke' , user ) return await controller_facade . ModifyControllerAccess ( [ changes ] )
def get_default_classes ( self ) : """Returns a list of the default classes for the tab group . Defaults to ` ` [ " nav " , " nav - tabs " , " ajax - tabs " ] ` ` ."""
default_classes = super ( TabGroup , self ) . get_default_classes ( ) default_classes . extend ( CSS_TAB_GROUP_CLASSES ) return default_classes
def to_designspace ( font , family_name = None , instance_dir = None , propagate_anchors = True , ufo_module = defcon , minimize_glyphs_diffs = False , generate_GDEF = True , store_editor_state = True , ) : """Take a GSFont object and convert it into a Designspace Document + UFOS . The UFOs are available as the attribute ` font ` of each SourceDescriptor of the DesignspaceDocument : ufos = [ source . font for source in designspace . sources ] The designspace and the UFOs are not written anywhere by default , they are all in - memory . If you want to write them to the disk , consider using the ` filename ` attribute of the DesignspaceDocument and of its SourceDescriptor as possible file names . Takes in data as Glyphs . app - compatible classes , as documented at https : / / docu . glyphsapp . com / If include _ instances is True , also returns the parsed instance data . If family _ name is provided , the master UFOs will be given this name and only instances with this name will be returned . If generate _ GDEF is True , write a ` table GDEF { . . . } ` statement in the UFO ' s features . fea , containing GlyphClassDef and LigatureCaretByPos ."""
builder = UFOBuilder ( font , ufo_module = ufo_module , family_name = family_name , instance_dir = instance_dir , propagate_anchors = propagate_anchors , use_designspace = True , minimize_glyphs_diffs = minimize_glyphs_diffs , generate_GDEF = generate_GDEF , store_editor_state = store_editor_state , ) return builder . designspace
def end_subsegment ( self , end_time = None ) : """End the current active subsegment . If this is the last one open under its parent segment , the entire segment will be sent . : param float end _ time : subsegment compeletion in unix epoch in seconds ."""
if not self . context . end_subsegment ( end_time ) : return # if segment is already close , we check if we can send entire segment # otherwise we check if we need to stream some subsegments if self . current_segment ( ) . ready_to_send ( ) : self . _send_segment ( ) else : self . stream_subsegments ( )
def gcommer_claim ( address = None ) : """Try to get a token for this server address . ` address ` has to be ip : port , e . g . ` ' 1.2.3.4:1234 ' ` Returns tuple ( address , token )"""
if not address : # get token for any world # this is only useful for testing , # because that is exactly what m . agar . io does url = 'http://at.gcommer.com/status' text = urllib . request . urlopen ( url ) . read ( ) . decode ( ) j = json . loads ( text ) for address , num in j [ 'status' ] . items ( ) : if num > 0 : break # address is now one of the listed servers with tokens url = 'http://at.gcommer.com/claim?server=%s' % address text = urllib . request . urlopen ( url ) . read ( ) . decode ( ) j = json . loads ( text ) token = j [ 'token' ] return address , token
def syncView ( self ) : """Syncs all the items to the view ."""
if not self . updatesEnabled ( ) : return for item in self . topLevelItems ( ) : try : item . syncView ( recursive = True ) except AttributeError : continue
def erase_text ( self , locator , click = True , clear = False , backspace = 0 , params = None ) : """Various ways to erase text from web element . : param locator : locator tuple or WebElement instance : param click : clicks the input field : param clear : clears the input field : param backspace : how many times to hit backspace : param params : ( optional ) locator params : return : None"""
element = locator if not isinstance ( element , WebElement ) : element = self . get_visible_element ( locator , params ) if click : self . click ( element ) if clear : element . clear ( ) if backspace : actions = ActionChains ( self . driver ) for _ in range ( backspace ) : actions . send_keys ( Keys . BACKSPACE ) actions . perform ( )
def process_composite ( self , response ) : """Process a composite response . composites do not have inter item separators as they appear joined . We need to respect the universal options too ."""
composite = response [ "composite" ] # if the composite is of not Composite make it one so we can simplify # it . if not isinstance ( composite , Composite ) : composite = Composite ( composite ) # simplify and get underlying list . composite = composite . simplify ( ) . get_content ( ) response [ "composite" ] = composite if not isinstance ( composite , list ) : raise Exception ( 'expecting "composite" key in response' ) # if list is empty nothing to do if not len ( composite ) : return if "full_text" in response : err = 'conflicting "full_text" and "composite" in response' raise Exception ( err ) # set markup if "markup" in self . py3status_module_options : markup = self . py3status_module_options [ "markup" ] for item in composite : item [ "markup" ] = markup # set universal options on last component composite [ - 1 ] . update ( self . i3bar_module_options ) # update all components color = response . get ( "color" ) urgent = response . get ( "urgent" ) composite_length = len ( response [ "composite" ] ) - 1 for index , item in enumerate ( response [ "composite" ] ) : # validate the response if "full_text" not in item : raise KeyError ( 'missing "full_text" key in response' ) # make sure all components have a name if "name" not in item : instance_index = item . get ( "index" , index ) item [ "instance" ] = "{} {}" . format ( self . module_inst , instance_index ) item [ "name" ] = self . module_name # hide separator for all inner components unless existing if index != composite_length : if "separator" not in item : item [ "separator" ] = False item [ "separator_block_width" ] = 0 # If a color was supplied for the composite and a composite # part does not supply a color , use the composite color . if color and "color" not in item : item [ "color" ] = color # Remove any none color from our output if hasattr ( item . get ( "color" ) , "none_setting" ) : del item [ "color" ] # set background and border colors . set left / right border widths # only on first / last composites and no border width for inner # composites or we will see border lines between composites . for key , value in self . i3bar_gaps_module_options . items ( ) : if ( key == "border_left" and index != 0 ) or ( key == "border_right" and index != composite_length ) : item [ key ] = 0 else : item [ key ] = value # set urgent based on available user - defined settings if not self . allow_urgent : if "urgent" in item : del item [ "urgent" ] elif urgent : if self . i3bar_gaps_urgent_options : # set background and border colors . set left / right border widths # only on first / last composites and no border width for inner # composites or we will see border lines between composites . for key , value in self . i3bar_gaps_urgent_options . items ( ) : if ( key == "border_left" and index != 0 ) or ( key == "border_right" and index != composite_length ) : item [ key ] = 0 elif key == "foreground" : item [ "color" ] = value else : item [ key ] = value if "urgent" in item : del item [ "urgent" ] else : item [ "urgent" ] = urgent # set min _ length if "min_length" in self . py3status_module_options : min_length = self . py3status_module_options [ "min_length" ] # get length , skip if length exceeds min _ length length = sum ( [ len ( x [ "full_text" ] ) for x in response [ "composite" ] ] ) if length >= min_length : return # sometimes we go under min _ length to pad both side evenly , # we will add extra space on either side to honor min _ length padding = int ( ( min_length / 2.0 ) - ( length / 2.0 ) ) offset = min_length - ( ( padding * 2 ) + length ) # set position position = self . py3status_module_options . get ( "position" , "left" ) if position == "center" : left = right = " " * padding if self . random_int : left += " " * offset else : right += " " * offset elif position == "left" : left , right = "" , " " * ( padding * 2 + offset ) elif position == "right" : right , left = "" , " " * ( padding * 2 + offset ) # padding if left : response [ "composite" ] [ 0 ] [ "full_text" ] = ( left + response [ "composite" ] [ 0 ] [ "full_text" ] ) if right : response [ "composite" ] [ - 1 ] [ "full_text" ] += right
def resolve_service_id ( self , service_name = None , service_type = None ) : """Find the service _ id of a given service"""
services = [ s . _info for s in self . api . services . list ( ) ] service_name = service_name . lower ( ) for s in services : name = s [ 'name' ] . lower ( ) if service_type and service_name : if ( service_name == name and service_type == s [ 'type' ] ) : return s [ 'id' ] elif service_name and service_name == name : return s [ 'id' ] elif service_type and service_type == s [ 'type' ] : return s [ 'id' ] return None
def list_topic ( self , k , Nwords = 10 ) : """List the top ` ` topn ` ` words for topic ` ` k ` ` . Examples . . code - block : : python > > > model . list _ topic ( 1 , Nwords = 5) [ ' opposed ' , ' terminates ' , ' trichinosis ' , ' cistus ' , ' acaule ' ]"""
return [ ( self . vocabulary [ w ] , p ) for w , p in self . phi . features [ k ] . top ( Nwords ) ]
def select ( cls , dataset , selection_mask = None , ** selection ) : """Slice the underlying numpy array in sheet coordinates ."""
selection = { k : slice ( * sel ) if isinstance ( sel , tuple ) else sel for k , sel in selection . items ( ) } coords = tuple ( selection [ kd . name ] if kd . name in selection else slice ( None ) for kd in dataset . kdims ) if not any ( [ isinstance ( el , slice ) for el in coords ] ) : return dataset . data [ dataset . sheet2matrixidx ( * coords ) ] # Apply slices xidx , yidx = coords l , b , r , t = dataset . bounds . lbrt ( ) if isinstance ( xidx , slice ) : l = l if xidx . start is None else max ( l , xidx . start ) r = r if xidx . stop is None else min ( r , xidx . stop ) if isinstance ( yidx , slice ) : b = b if yidx . start is None else max ( b , yidx . start ) t = t if yidx . stop is None else min ( t , yidx . stop ) bounds = BoundingBox ( points = ( ( l , b ) , ( r , t ) ) ) slc = Slice ( bounds , dataset ) return slc . submatrix ( dataset . data )
def set_index ( self , index ) : """Display the data of the given index : param index : the index to paint : type index : QtCore . QModelIndex : returns : None : rtype : None : raises : None"""
item = index . internalPointer ( ) note = item . internal_data ( ) self . content_lb . setText ( note . content ) self . created_dte . setDateTime ( dt_to_qdatetime ( note . date_created ) ) self . updated_dte . setDateTime ( dt_to_qdatetime ( note . date_updated ) ) self . username_lb . setText ( note . user . username )
def update ( self , key , item ) : """Update or insert item into hash table with specified key and item . If the key is already present , destroys old item and inserts new one . If you set a container item destructor , this is called on the old value . If the key was not already present , inserts a new item . Sets the hash cursor to the new item ."""
return lib . zhashx_update ( self . _as_parameter_ , key , item )
def initialize ( # type : ignore self , max_clients : int = 10 , hostname_mapping : Dict [ str , str ] = None , max_buffer_size : int = 104857600 , resolver : Resolver = None , defaults : Dict [ str , Any ] = None , max_header_size : int = None , max_body_size : int = None , ) -> None : """Creates a AsyncHTTPClient . Only a single AsyncHTTPClient instance exists per IOLoop in order to provide limitations on the number of pending connections . ` ` force _ instance = True ` ` may be used to suppress this behavior . Note that because of this implicit reuse , unless ` ` force _ instance ` ` is used , only the first call to the constructor actually uses its arguments . It is recommended to use the ` ` configure ` ` method instead of the constructor to ensure that arguments take effect . ` ` max _ clients ` ` is the number of concurrent requests that can be in progress ; when this limit is reached additional requests will be queued . Note that time spent waiting in this queue still counts against the ` ` request _ timeout ` ` . ` ` hostname _ mapping ` ` is a dictionary mapping hostnames to IP addresses . It can be used to make local DNS changes when modifying system - wide settings like ` ` / etc / hosts ` ` is not possible or desirable ( e . g . in unittests ) . ` ` max _ buffer _ size ` ` ( default 100MB ) is the number of bytes that can be read into memory at once . ` ` max _ body _ size ` ` ( defaults to ` ` max _ buffer _ size ` ` ) is the largest response body that the client will accept . Without a ` ` streaming _ callback ` ` , the smaller of these two limits applies ; with a ` ` streaming _ callback ` ` only ` ` max _ body _ size ` ` does . . . versionchanged : : 4.2 Added the ` ` max _ body _ size ` ` argument ."""
super ( SimpleAsyncHTTPClient , self ) . initialize ( defaults = defaults ) self . max_clients = max_clients self . queue = ( collections . deque ( ) ) # type : Deque [ Tuple [ object , HTTPRequest , Callable [ [ HTTPResponse ] , None ] ] ] self . active = ( { } ) # type : Dict [ object , Tuple [ HTTPRequest , Callable [ [ HTTPResponse ] , None ] ] ] self . waiting = ( { } ) # type : Dict [ object , Tuple [ HTTPRequest , Callable [ [ HTTPResponse ] , None ] , object ] ] self . max_buffer_size = max_buffer_size self . max_header_size = max_header_size self . max_body_size = max_body_size # TCPClient could create a Resolver for us , but we have to do it # ourselves to support hostname _ mapping . if resolver : self . resolver = resolver self . own_resolver = False else : self . resolver = Resolver ( ) self . own_resolver = True if hostname_mapping is not None : self . resolver = OverrideResolver ( resolver = self . resolver , mapping = hostname_mapping ) self . tcp_client = TCPClient ( resolver = self . resolver )
def spectral_embedding_ ( self , n ) : """Old method for generating coords , used on original analysis of yeast data . Included to reproduce yeast result from paper . Reason for difference - switched to using spectral embedding method provided by scikit - learn ( mainly because it spreads points over a sphere , rather than a half sphere , so looks better plotted ) . Uses a different Laplacian matrix ."""
aff = self . _affinity . copy ( ) aff . flat [ : : aff . shape [ 0 ] + 1 ] = 0 laplacian = laplace ( aff ) decomp = eigen ( laplacian ) return CoordinateMatrix ( normalise_rows ( decomp . vecs [ : , : n ] ) )
def restart_minecraft ( world_state , agent_host , client_info , message ) : """" Attempt to quit mission if running and kill the client"""
if world_state . is_mission_running : agent_host . sendCommand ( "quit" ) time . sleep ( 10 ) agent_host . killClient ( client_info ) raise MissionTimeoutException ( message )
def drag_sphere ( Re , Method = None , AvailableMethods = False ) : r'''This function handles calculation of drag coefficient on spheres . Twenty methods are available , all requiring only the Reynolds number of the sphere . Most methods are valid from Re = 0 to Re = 200,000 . A correlation will be automatically selected if none is specified . The full list of correlations valid for a given Reynolds number can be obtained with the ` AvailableMethods ` flag . If no correlation is selected , the following rules are used : * If Re < 0.01 , use Stoke ' s solution . * If 0.01 < = Re < 0.1 , linearly combine ' Barati ' with Stokes ' s solution such that at Re = 0.1 the solution is ' Barati ' , and at Re = 0.01 the solution is ' Stokes ' . * If 0.1 < = Re < = ~ 212963 , use the ' Barati ' solution . * If ~ 212963 < Re < = 1E6 , use the ' Barati _ high ' solution . * For Re > 1E6 , raises an exception ; no valid results have been found . Examples > > > drag _ sphere ( 200) 0.7682237950389874 Parameters Re : float Particle Reynolds number of the sphere using the surrounding fluid density and viscosity , [ - ] Returns Cd : float Drag coefficient [ - ] methods : list , only returned if AvailableMethods = = True List of methods which can be used to calculate ` Cd ` with the given ` Re ` Other Parameters Method : string , optional A string of the function name to use , as in the dictionary drag _ sphere _ correlations AvailableMethods : bool , optional If True , function will consider which methods which can be used to calculate ` Cd ` with the given ` Re `'''
def list_methods ( ) : methods = [ ] for key , ( func , Re_min , Re_max ) in drag_sphere_correlations . items ( ) : if ( Re_min is None or Re > Re_min ) and ( Re_max is None or Re < Re_max ) : methods . append ( key ) return methods if AvailableMethods : return list_methods ( ) if not Method : if Re > 0.1 : # Smooth transition point between the two models if Re <= 212963.26847812787 : return Barati ( Re ) elif Re <= 1E6 : return Barati_high ( Re ) else : raise ValueError ( 'No models implement a solution for Re > 1E6' ) elif Re >= 0.01 : # Re from 0.01 to 0.1 ratio = ( Re - 0.01 ) / ( 0.1 - 0.01 ) # Ensure a smooth transition by linearly switching to Stokes ' law return ratio * Barati ( Re ) + ( 1 - ratio ) * Stokes ( Re ) else : return Stokes ( Re ) if Method in drag_sphere_correlations : return drag_sphere_correlations [ Method ] [ 0 ] ( Re ) else : raise Exception ( 'Failure in in function' )
def _verify_pair ( prev , curr ) : """Verify a pair of sides share an endpoint . . . note : : This currently checks that edge endpoints match * * exactly * * but allowing some roundoff may be desired . Args : prev ( . Curve ) : " Previous " curve at piecewise junction . curr ( . Curve ) : " Next " curve at piecewise junction . Raises : ValueError : If the previous side is not in 2D . ValueError : If consecutive sides don ' t share an endpoint ."""
if prev . _dimension != 2 : raise ValueError ( "Curve not in R^2" , prev ) end = prev . _nodes [ : , - 1 ] start = curr . _nodes [ : , 0 ] if not _helpers . vector_close ( end , start ) : raise ValueError ( "Not sufficiently close" , "Consecutive sides do not have common endpoint" , prev , curr , )
def execute ( self , input_data ) : """Info objects all have a type _ tag of ( ' help ' , ' worker ' , ' command ' , or ' other ' )"""
input_data = input_data [ 'info' ] type_tag = input_data [ 'type_tag' ] if type_tag == 'help' : return { 'help' : input_data [ 'help' ] , 'type_tag' : input_data [ 'type_tag' ] } elif type_tag == 'worker' : out_keys = [ 'name' , 'dependencies' , 'docstring' , 'type_tag' ] return { key : value for key , value in input_data . iteritems ( ) if key in out_keys } elif type_tag == 'command' : out_keys = [ 'command' , 'sig' , 'docstring' , 'type_tag' ] return { key : value for key , value in input_data . iteritems ( ) if key in out_keys } elif type_tag == 'other' : return input_data else : print 'Got a malformed info object %s' % input_data return input_data
def run_plink ( in_prefix , out_prefix , extract_filename ) : """Runs Plink with the geno option . : param in _ prefix : the input prefix . : param out _ prefix : the output prefix . : param extract _ filename : the name of the file containing markers to extract . : type in _ prefix : str : type out _ prefix : str : param extract _ filename : str"""
# The plink command plink_command = [ "plink" , "--noweb" , "--bfile" , in_prefix , "--extract" , extract_filename , "--freq" , "--out" , out_prefix , ] output = None try : output = subprocess . check_output ( plink_command , stderr = subprocess . STDOUT , shell = False ) except subprocess . CalledProcessError as exc : msg = "plink: couldn't run plink\n{}" . format ( exc . output ) raise ProgramError ( msg )
def _thread_main ( self ) : """The entry point for the worker thread . Pulls pending data off the queue and writes them in batches to the specified tracing backend using the exporter ."""
quit_ = False while True : items = self . _get_items ( ) data = [ ] for item in items : if item is _WORKER_TERMINATOR : quit_ = True # Continue processing items , don ' t break , try to process # all items we got back before quitting . else : data . extend ( item ) if data : try : self . exporter . emit ( data ) except Exception : logging . exception ( '%s failed to emit data.' 'Dropping %s objects from queue.' , self . exporter . __class__ . __name__ , len ( data ) ) pass for _ in range ( len ( items ) ) : self . _queue . task_done ( ) # self . _ event is set at exit , at which point we start draining the # queue immediately . If self . _ event is unset , block for # self . wait _ period between each batch of exports . self . _event . wait ( self . _wait_period ) if quit_ : break
def signature ( array ) : """Returns the first 262 bytes of the given bytearray as part of the file header signature . Args : array : bytearray to extract the header signature . Returns : First 262 bytes of the file content as bytearray type ."""
length = len ( array ) index = _NUM_SIGNATURE_BYTES if length > _NUM_SIGNATURE_BYTES else length return array [ : index ]
def only_owner ( func ) : """Only owner decorator Restricts access to view ony to profile owner"""
def decorated ( * _ , ** kwargs ) : id = kwargs [ 'id' ] if not current_user . is_authenticated : abort ( 401 ) elif current_user . id != id : abort ( 403 ) return func ( ** kwargs ) return decorated
def view_count_plus ( slug ) : '''View count plus one .'''
entry = TabWiki . update ( view_count = TabWiki . view_count + 1 , ) . where ( TabWiki . uid == slug ) entry . execute ( )
def next_job ( self , timeout_seconds = None ) : """Retuns the next job in the queue , or None if is nothing there"""
if timeout_seconds is not None : timeout = timeout_seconds else : timeout = BLOCK_SECONDS response = self . lua_next ( keys = [ self . queue_name ] ) if not response : return job = Job . from_serialized ( response ) if not job : self . log . warn ( "could not deserialize job from: %s" , serialized_job ) return job
def get_mac_acl_for_intf_output_interface_interface_name ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) get_mac_acl_for_intf = ET . Element ( "get_mac_acl_for_intf" ) config = get_mac_acl_for_intf output = ET . SubElement ( get_mac_acl_for_intf , "output" ) interface = ET . SubElement ( output , "interface" ) interface_type_key = ET . SubElement ( interface , "interface-type" ) interface_type_key . text = kwargs . pop ( 'interface_type' ) interface_name = ET . SubElement ( interface , "interface-name" ) interface_name . text = kwargs . pop ( 'interface_name' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def _escape_token ( token , alphabet ) : r"""Replace characters that aren ' t in the alphabet and append " _ " to token . Apply three transformations to the token : 1 . Replace underline character " _ " with " \ u " , and backslash " \ " with " \ \ " . 2 . Replace characters outside of the alphabet with " \ # # # ; " , where # # # is the character ' s Unicode code point . 3 . Appends " _ " to mark the end of a token . Args : token : unicode string to be escaped alphabet : list of all known characters Returns : escaped string"""
token = token . replace ( u"\\" , u"\\\\" ) . replace ( u"_" , u"\\u" ) ret = [ c if c in alphabet and c != u"\n" else r"\%d;" % ord ( c ) for c in token ] return u"" . join ( ret ) + "_"
def is_running ( self , submissionid , user_check = True ) : """Tells if a submission is running / in queue"""
submission = self . get_submission ( submissionid , user_check ) return submission [ "status" ] == "waiting"
def check_large_images ( self , node , parent_depth_level , sibling_depth_level ) : """although slow the best way to determine the best image is to download them and check the actual dimensions of the image when on disk so we ' ll go through a phased approach . . . 1 . get a list of ALL images from the parent node 2 . filter out any bad image names that we know of ( gifs , ads , etc . . ) 3 . do a head request on each file to make sure it meets our bare requirements 4 . any images left over let ' s do a full GET request , download em to disk and check their dimensions 5 . Score images based on different factors like height / width and possibly things like color density"""
good_images = self . get_image_candidates ( node ) if good_images : scored_images = self . fetch_images ( good_images , parent_depth_level ) if scored_images : highscore_image = sorted ( scored_images . items ( ) , key = lambda x : x [ 1 ] , reverse = True ) [ 0 ] [ 0 ] main_image = Image ( ) main_image . src = highscore_image . src main_image . width = highscore_image . width main_image . height = highscore_image . height main_image . extraction_type = "bigimage" main_image . confidence_score = 100 / len ( scored_images ) if len ( scored_images ) > 0 else 0 return main_image depth_obj = self . get_depth_level ( node , parent_depth_level , sibling_depth_level ) if depth_obj : return self . check_large_images ( depth_obj . node , depth_obj . parent_depth , depth_obj . sibling_depth ) return None
def upper_diag_self_prodx ( list_ ) : """upper diagnoal of cartesian product of self and self . Weird name . fixme Args : list _ ( list ) : Returns : list : CommandLine : python - m utool . util _ alg - - exec - upper _ diag _ self _ prodx Example : > > > # ENABLE _ DOCTEST > > > from utool . util _ alg import * # NOQA > > > list _ = [ 1 , 2 , 3] > > > result = upper _ diag _ self _ prodx ( list _ ) > > > print ( result ) [ ( 1 , 2 ) , ( 1 , 3 ) , ( 2 , 3 ) ]"""
return [ ( item1 , item2 ) for n1 , item1 in enumerate ( list_ ) for n2 , item2 in enumerate ( list_ ) if n1 < n2 ]
def __request ( self , endpoint , method = 'GET' , params = None , convJSON = False ) : """request - Returns dict of response from postcode . nl API . This method is called only by the EndpointMixin methods ."""
url = '%s/%s' % ( self . api_url , endpoint ) method = method . lower ( ) params = params or { } if convJSON : params = json . dumps ( params ) func = getattr ( self . client , method ) request_args = { } if method == 'get' : request_args [ 'params' ] = params else : request_args [ 'data' ] = params try : # Normally some valid HTTP - response will be the case # if not some exception regarding the request / connection has # occurred # this will be one of the exceptions of the request module # if so , we will a PostcodeError exception and pass the request # exception message response = func ( url , ** request_args ) except requests . RequestException as e : raise PostcodeError ( "ERRrequest" , { "exception" : e . __doc__ } ) content = response . content . decode ( 'utf-8' ) content = json . loads ( content ) if response . status_code == 200 : return content # Errors , otherwise we did not get here . . . if 'exceptionId' in content : raise PostcodeError ( content [ 'exceptionId' ] , content ) raise PostcodeError ( "UnknownExceptionFromPostcodeNl" )
def get_data_by_slug ( model , slug , kind = '' , ** kwargs ) : """Get instance data by slug and kind . Raise 404 Not Found if there is no data . This function requires model has a ` slug ` column . : param model : a string , model name in rio . models : param slug : a string used to query by ` slug ` . This requires there is a slug field in model definition . : param kind : a string specified which kind of dict tranformer should be called . : return : a dict or None ."""
instance = get_instance_by_slug ( model , slug , ** kwargs ) if not instance : return return ins2dict ( instance , kind )
def match_entry_line ( str_to_match , regex_obj = MAIN_REGEX_OBJ ) : """Does a regex match of the mount entry string"""
match_obj = regex_obj . match ( str_to_match ) if not match_obj : error_message = ( 'Line "%s" is unrecognized by overlay4u. ' 'This is only meant for use with Ubuntu Linux.' ) raise UnrecognizedMountEntry ( error_message % str_to_match ) return match_obj . groupdict ( )
def qteRemoveHighlighting ( self , widgetObj ) : """Remove the highlighting from previously highlighted characters . The method access instance variables to determine which characters are currently highlighted and have to be converted to non - highlighted ones . | Args | * ` ` widgetObj ` ` ( * * QWidget * * ) : the ` ` QTextEdit ` ` to use . | Returns | * * * None * * | Raises | * * * None * *"""
# Retrieve the widget specific macro data . data = self . qteMacroData ( widgetObj ) if not data : return # If the data structure is empty then no previously # highlighted characters exist in this particular widget , so # do nothing . if not data . matchingPositions : return # Restore the original character formats , ie . undo the # highlighting changes . self . highlightCharacters ( widgetObj , data . matchingPositions , QtCore . Qt . black , 50 , data . oldCharFormats ) # Clear the data structure to indicate that no further # highlighted characters exist in this particular widget . data . matchingPositions = None data . oldCharFormats = None self . qteSaveMacroData ( data , widgetObj )
def _find_compositor ( self , dataset_key , ** dfilter ) : """Find the compositor object for the given dataset _ key ."""
# NOTE : This function can not find a modifier that performs # one or more modifications if it has modifiers see if we can find # the unmodified version first src_node = None if isinstance ( dataset_key , DatasetID ) and dataset_key . modifiers : new_prereq = DatasetID ( * dataset_key [ : - 1 ] + ( dataset_key . modifiers [ : - 1 ] , ) ) src_node , u = self . _find_dependencies ( new_prereq , ** dfilter ) # Update the requested DatasetID with information from the src if src_node is not None : dataset_key = self . _update_modifier_key ( dataset_key , src_node . name ) if u : return None , u try : compositor = self . get_compositor ( dataset_key ) except KeyError : raise KeyError ( "Can't find anything called {}" . format ( str ( dataset_key ) ) ) dataset_key = compositor . id root = Node ( dataset_key , data = ( compositor , [ ] , [ ] ) ) if src_node is not None : self . add_child ( root , src_node ) root . data [ 1 ] . append ( src_node ) # 2.1 get the prerequisites LOG . trace ( "Looking for composite prerequisites for: {}" . format ( dataset_key ) ) prereqs , unknowns = self . _get_compositor_prereqs ( root , compositor . attrs [ 'prerequisites' ] , ** dfilter ) if unknowns : # Should we remove all of the unknown nodes that were found # if there is an unknown prerequisite are we in trouble ? return None , unknowns root . data [ 1 ] . extend ( prereqs ) LOG . trace ( "Looking for optional prerequisites for: {}" . format ( dataset_key ) ) optional_prereqs , _ = self . _get_compositor_prereqs ( root , compositor . attrs [ 'optional_prerequisites' ] , skip = True , ** dfilter ) root . data [ 2 ] . extend ( optional_prereqs ) return root , set ( )
def post ( f , * args , ** kwargs ) : """Automatically log progress on function exit . Default logging value : info . * Logging with values contained in the parameters of the decorated function * Message ( args [ 0 ] ) may be a string to be formatted with parameters passed to the decorated function . Each ' { varname } ' will be replaced by the value of the parameter of the same name . * Keyword parameters * - log : : integer - Specifies a custom level of logging to pass to the active logger . - Default : INFO * Exceptions : * - IndexError and ValueError - will be returned if * args contains a string that does not correspond to a parameter name of the decorated function , or if there are more ' { } ' s than there are * args ."""
kwargs . update ( { 'postfix_only' : True } ) return _stump ( f , * args , ** kwargs )
def decode ( self , payload ) : """Decode payload"""
try : return self . encoder . decode ( payload ) except Exception as exception : raise DecodeError ( str ( exception ) )
def check_routes ( feed : "Feed" , * , as_df : bool = False , include_warnings : bool = False ) -> List : """Analog of : func : ` check _ agency ` for ` ` feed . routes ` ` ."""
table = "routes" problems = [ ] # Preliminary checks if feed . routes is None : problems . append ( [ "error" , "Missing table" , table , [ ] ] ) else : f = feed . routes . copy ( ) problems = check_for_required_columns ( problems , table , f ) if problems : return format_problems ( problems , as_df = as_df ) if include_warnings : problems = check_for_invalid_columns ( problems , table , f ) # Check route _ id problems = check_column_id ( problems , table , f , "route_id" ) # Check agency _ id if "agency_id" in f : if "agency_id" not in feed . agency . columns : problems . append ( [ "error" , "agency_id column present in routes but not in agency" , table , [ ] , ] ) else : g = f . dropna ( subset = [ "agency_id" ] ) cond = ~ g [ "agency_id" ] . isin ( feed . agency [ "agency_id" ] ) problems = check_table ( problems , table , g , cond , "Undefined agency_id" ) # Check route _ short _ name and route _ long _ name for column in [ "route_short_name" , "route_long_name" ] : problems = check_column ( problems , table , f , column , valid_str , column_required = False ) cond = ~ ( f [ "route_short_name" ] . notnull ( ) | f [ "route_long_name" ] . notnull ( ) ) problems = check_table ( problems , table , f , cond , "route_short_name and route_long_name both empty" , ) # Check route _ type v = lambda x : x in range ( 8 ) problems = check_column ( problems , table , f , "route_type" , v ) # Check route _ url problems = check_column ( problems , table , f , "route_url" , valid_url , column_required = False ) # Check route _ color and route _ text _ color for col in [ "route_color" , "route_text_color" ] : problems = check_column ( problems , table , f , col , valid_color , column_required = False ) if include_warnings : # Check for duplicated ( route _ short _ name , route _ long _ name ) pairs cond = f [ [ "route_short_name" , "route_long_name" ] ] . duplicated ( ) problems = check_table ( problems , table , f , cond , "Repeated pair (route_short_name, route_long_name)" , "warning" , ) # Check for routes without trips s = feed . trips [ "route_id" ] cond = ~ f [ "route_id" ] . isin ( s ) problems = check_table ( problems , table , f , cond , "Route has no trips" , "warning" ) return format_problems ( problems , as_df = as_df )
def global_id_field ( type_name , id_fetcher = None ) : '''Creates the configuration for an id field on a node , using ` to _ global _ id ` to construct the ID from the provided typename . The type - specific ID is fetcher by calling id _ fetcher on the object , or if not provided , by accessing the ` id ` property on the object .'''
return GraphQLField ( GraphQLNonNull ( GraphQLID ) , description = 'The ID of an object' , resolver = lambda obj , args , context , info : to_global_id ( type_name or info . parent_type . name , id_fetcher ( obj , context , info ) if id_fetcher else obj . id ) )