signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def parse_grid_facets ( facets ) : """Return two lists of facetting variables , for the rows & columns"""
valid_seqs = [ "('var1', '.')" , "('var1', 'var2')" , "('.', 'var1')" , "((var1, var2), (var3, var4))" ] error_msg_s = ( "Valid sequences for specifying 'facets' look like" " {}" . format ( valid_seqs ) ) valid_forms = [ 'var1 ~ .' , 'var1 ~ var2' , '. ~ var1' , 'var1 + var2 ~ var3 + var4' , '. ~ func(var1) + func(var2)' , '. ~ func(var1+var3) + func(var2)' ] + valid_seqs error_msg_f = ( "Valid formula for 'facet_grid' look like" " {}" . format ( valid_forms ) ) if isinstance ( facets , ( tuple , list ) ) : if len ( facets ) != 2 : raise PlotnineError ( error_msg_s ) rows , cols = facets if isinstance ( rows , str ) : rows = [ ] if rows == '.' else [ rows ] if isinstance ( cols , str ) : cols = [ ] if cols == '.' else [ cols ] return rows , cols if not isinstance ( facets , str ) : raise PlotnineError ( error_msg_f ) # Example of allowed formulae # ' c ~ a + b ' # ' . ~ func ( a ) + func ( b ) ' # ' func ( c ) ~ func ( a + 1 ) + func ( b + 2 ) ' try : lhs , rhs = facets . split ( '~' ) except ValueError : raise PlotnineError ( error_msg_s ) else : lhs = lhs . strip ( ) rhs = rhs . strip ( ) lhs = ensure_var_or_dot ( lhs ) rhs = ensure_var_or_dot ( rhs ) lsplitter = ' + ' if ' + ' in lhs else '+' rsplitter = ' + ' if ' + ' in rhs else '+' if lhs == '.' : rows = [ ] else : rows = [ var . strip ( ) for var in lhs . split ( lsplitter ) ] if rhs == '.' : cols = [ ] else : cols = [ var . strip ( ) for var in rhs . split ( rsplitter ) ] return rows , cols
def updateCurrentView ( self , oldWidget , newWidget ) : """Updates the current view widget . : param oldWidget | < QtGui . QWidget > newWidget | < QtGui . QWidget >"""
view = projexui . ancestor ( newWidget , XView ) if view is not None : view . setCurrent ( )
def reload ( script , input , output ) : """reloads the generator script when the script files or the input files changes"""
script = Path ( script ) . expand ( ) . abspath ( ) output = Path ( output ) . expand ( ) . abspath ( ) input = input if isinstance ( input , ( list , tuple ) ) else [ input ] output . makedirs_p ( ) _script_reload ( script , input , output )
def suppress ( self , email ) : """Adds email addresses to a client ' s suppression list"""
body = { "EmailAddresses" : [ email ] if isinstance ( email , str ) else email } response = self . _post ( self . uri_for ( "suppress" ) , json . dumps ( body ) )
def _storage_create ( args , _ ) : """Create one or more buckets ."""
buckets = datalab . storage . Buckets ( project_id = args [ 'project' ] ) errs = [ ] for name in args [ 'bucket' ] : try : bucket , key = datalab . storage . _bucket . parse_name ( name ) if bucket and not key : buckets . create ( bucket ) else : raise Exception ( "Invalid bucket name %s" % name ) except Exception as e : errs . append ( "Couldn't create %s: %s" % ( name , _extract_storage_api_response_error ( str ( e ) ) ) ) if errs : raise Exception ( '\n' . join ( errs ) )
def get ( self ) -> None : """Return whole html representation of the root document ."""
from wdom . document import get_document logger . info ( 'connected' ) self . write ( get_document ( ) . build ( ) )
def populate_target ( device_name ) : """! @ brief Add targets from cmsis - pack - manager matching the given name . Targets are added to the ` # TARGET ` list . A case - insensitive comparison against the device part number is used to find the target to populate . If multiple packs are installed that provide the same part numbers , all matching targets will be populated ."""
device_name = device_name . lower ( ) targets = ManagedPacks . get_installed_targets ( ) for dev in targets : if device_name == dev . part_number . lower ( ) : PackTargets . populate_device ( dev )
def toDict ( self ) : """To Dictionary Returns the basic node as a dictionary in the same format as is used in constructing it Returns : dict"""
# Create the dict we will return dRet = { } # If the optional flag is set if self . _optional : dRet [ '__optional__' ] = True # Add all the special fields found for k in self . _special . keys ( ) : dRet [ _specialSet % k ] = self . _special [ k ] # Return return dRet
def walk ( self , topdown = True , followlinks = True ) : """walk : walk the filesystem ( just like os . walk ) . Use like : path = URI ( ' / some / dir ' ) for root , dirs , files in path . walk ( ) : do _ something ( ) root will be an URI object ."""
return self . connection . walk ( self , topdown = topdown , followlinks = followlinks )
def find_unique_identity ( session , uuid ) : """Find a unique identity . Find a unique identity by its UUID using the given ` session ` . When the unique identity does not exist the function will return ` None ` . : param session : database session : param uuid : id of the unique identity to find : returns : a unique identity object ; ` None ` when the unique identity does not exist"""
uidentity = session . query ( UniqueIdentity ) . filter ( UniqueIdentity . uuid == uuid ) . first ( ) return uidentity
def to_dict ( self ) : """Return values of each fields of an jsonapi error"""
error_dict = { } for field in ( 'status' , 'source' , 'title' , 'detail' , 'id' , 'code' , 'links' , 'meta' ) : if getattr ( self , field , None ) : error_dict . update ( { field : getattr ( self , field ) } ) return error_dict
def index_all ( self ) : """Index all records under : attr : ` record _ path ` ."""
self . logger . debug ( 'Start indexing all records under: %s' , self . record_path ) with self . db . connection ( ) : for json_path in sorted ( self . find_record_files ( ) ) : self . index_record ( json_path )
def _extract_lambda_function_code ( resource_properties , code_property_key ) : """Extracts the Lambda Function Code from the Resource Properties Parameters resource _ properties dict Dictionary representing the Properties of the Resource code _ property _ key str Property Key of the code on the Resource Returns str Representing the local code path"""
codeuri = resource_properties . get ( code_property_key , SamFunctionProvider . _DEFAULT_CODEURI ) if isinstance ( codeuri , dict ) : codeuri = SamFunctionProvider . _DEFAULT_CODEURI return codeuri
def flush ( self ) : """Force commit changes to the file and stdout"""
if not self . nostdout : self . stdout . flush ( ) if self . file is not None : self . file . flush ( )
def get_uint16 ( self ) : """Read the next token and interpret it as a 16 - bit unsigned integer . @ raises dns . exception . SyntaxError : @ rtype : int"""
value = self . get_int ( ) if value < 0 or value > 65535 : raise dns . exception . SyntaxError ( '%d is not an unsigned 16-bit integer' % value ) return value
def _do_magic_import ( self , rule , p_selectors , p_parents , p_children , scope , media , c_lineno , c_property , c_codestr , code , name ) : """Implements @ import for sprite - maps Imports magic sprite map directories"""
if callable ( STATIC_ROOT ) : files = sorted ( STATIC_ROOT ( name ) ) else : glob_path = os . path . join ( STATIC_ROOT , name ) files = glob . glob ( glob_path ) files = sorted ( ( file [ len ( STATIC_ROOT ) : ] , None ) for file in files ) if files : # Build magic context map_name = os . path . normpath ( os . path . dirname ( name ) ) . replace ( '\\' , '_' ) . replace ( '/' , '_' ) kwargs = { } def setdefault ( var , val ) : _var = '$' + map_name + '-' + var if _var in rule [ CONTEXT ] : kwargs [ var ] = interpolate ( rule [ CONTEXT ] [ _var ] , rule ) else : rule [ CONTEXT ] [ _var ] = val kwargs [ var ] = interpolate ( val , rule ) return rule [ CONTEXT ] [ _var ] setdefault ( 'sprite-base-class' , StringValue ( '.' + map_name + '-sprite' ) ) setdefault ( 'sprite-dimensions' , BooleanValue ( False ) ) position = setdefault ( 'position' , NumberValue ( 0 , '%' ) ) spacing = setdefault ( 'spacing' , NumberValue ( 0 ) ) repeat = setdefault ( 'repeat' , StringValue ( 'no-repeat' ) ) names = tuple ( os . path . splitext ( os . path . basename ( file ) ) [ 0 ] for file , storage in files ) for n in names : setdefault ( n + '-position' , position ) setdefault ( n + '-spacing' , spacing ) setdefault ( n + '-repeat' , repeat ) sprite_map = _sprite_map ( name , ** kwargs ) rule [ CONTEXT ] [ '$' + map_name + '-' + 'sprites' ] = sprite_map ret = ''' @import "compass/utilities/sprites/base"; // All sprites should extend this class // The %(map_name)s-sprite mixin will do so for you. #{$%(map_name)s-sprite-base-class} { background: $%(map_name)s-sprites; } // Use this to set the dimensions of an element // based on the size of the original image. @mixin %(map_name)s-sprite-dimensions($name) { @include sprite-dimensions($%(map_name)s-sprites, $name); } // Move the background position to display the sprite. @mixin %(map_name)s-sprite-position($name, $offset-x: 0, $offset-y: 0) { @include sprite-position($%(map_name)s-sprites, $name, $offset-x, $offset-y); } // Extends the sprite base class and set the background position for the desired sprite. // It will also apply the image dimensions if $dimensions is true. @mixin %(map_name)s-sprite($name, $dimensions: $%(map_name)s-sprite-dimensions, $offset-x: 0, $offset-y: 0) { @extend #{$%(map_name)s-sprite-base-class}; @include sprite($%(map_name)s-sprites, $name, $dimensions, $offset-x, $offset-y); } @mixin %(map_name)s-sprites($sprite-names, $dimensions: $%(map_name)s-sprite-dimensions) { @include sprites($%(map_name)s-sprites, $sprite-names, $%(map_name)s-sprite-base-class, $dimensions); } // Generates a class for each sprited image. @mixin all-%(map_name)s-sprites($dimensions: $%(map_name)s-sprite-dimensions) { @include %(map_name)s-sprites(%(sprites)s, $dimensions); } ''' % { 'map_name' : map_name , 'sprites' : ' ' . join ( names ) } return ret
def cvm_unif_fix1 ( statistic ) : """Approximates the first - term of the small sample count Gotze expansion . After equation 1.10 ( with coefficients pulled out as csa / csb ) ."""
args = fix1_args / statistic kvs = kv ( ( .25 , .75 , 1.25 ) , args [ : , : , newaxis ] ) gs , hs = exp ( - args ) * tensordot ( ( ( 1 , 1 , 0 ) , ( 2 , 3 , - 1 ) ) , kvs , axes = ( 1 , 2 ) ) a = dot ( ( 7 , 16 , 7 ) , fix1_csa * gs ) . sum ( ) / statistic ** 1.5 b = dot ( ( 1 , 0 , 24 ) , fix1_csb * hs ) . sum ( ) / statistic ** 2.5 return cvm_unif_inf ( statistic ) / 12 - a - b
def stop ( self , signum = None , _unused = None ) : """Stop the consumer from consuming by calling BasicCancel and setting our state . : param int signum : The signal received : param frame _ unused : The stack frame from when the signal was called"""
LOGGER . debug ( 'Stop called in state: %s' , self . state_description ) if self . is_stopped : LOGGER . warning ( 'Stop requested but consumer is already stopped' ) return elif self . is_shutting_down : LOGGER . warning ( 'Stop requested, consumer is already shutting down' ) return elif self . is_waiting_to_shutdown : LOGGER . warning ( 'Stop requested but already waiting to shut down' ) return # Stop consuming and close AMQP connections self . shutdown_connections ( ) # Wait until the consumer has finished processing to shutdown if self . is_processing : LOGGER . info ( 'Waiting for consumer to finish processing' ) self . set_state ( self . STATE_STOP_REQUESTED ) if signum == signal . SIGTERM : signal . siginterrupt ( signal . SIGTERM , False ) return
def _reassign_misplaced_members ( binding ) : """Apply misplaced members from ` binding ` to Qt . py Arguments : binding ( dict ) : Misplaced members"""
for src , dst in _misplaced_members [ binding ] . items ( ) : dst_value = None src_parts = src . split ( "." ) src_module = src_parts [ 0 ] src_member = None if len ( src_parts ) > 1 : src_member = src_parts [ 1 : ] if isinstance ( dst , ( list , tuple ) ) : dst , dst_value = dst dst_parts = dst . split ( "." ) dst_module = dst_parts [ 0 ] dst_member = None if len ( dst_parts ) > 1 : dst_member = dst_parts [ 1 ] # Get the member we want to store in the namesapce . if not dst_value : try : _part = getattr ( Qt , "_" + src_module ) while src_member : member = src_member . pop ( 0 ) _part = getattr ( _part , member ) dst_value = _part except AttributeError : # If the member we want to store in the namespace does not # exist , there is no need to continue . This can happen if a # request was made to rename a member that didn ' t exist , for # example if QtWidgets isn ' t available on the target platform . _log ( "Misplaced member has no source: {0}" . format ( src ) ) continue try : src_object = getattr ( Qt , dst_module ) except AttributeError : if dst_module not in _common_members : # Only create the Qt parent module if its listed in # _ common _ members . Without this check , if you remove QtCore # from _ common _ members , the default _ misplaced _ members will add # Qt . QtCore so it can add Signal , Slot , etc . msg = 'Not creating missing member module "{m}" for "{c}"' _log ( msg . format ( m = dst_module , c = dst_member ) ) continue # If the dst is valid but the Qt parent module does not exist # then go ahead and create a new module to contain the member . setattr ( Qt , dst_module , _new_module ( dst_module ) ) src_object = getattr ( Qt , dst_module ) # Enable direct import of the new module sys . modules [ __name__ + "." + dst_module ] = src_object if not dst_value : dst_value = getattr ( Qt , "_" + src_module ) if src_member : dst_value = getattr ( dst_value , src_member ) setattr ( src_object , dst_member or dst_module , dst_value )
def unhex ( inp ) : '''unquote ( r ' abc \x20 def ' ) - > ' abc def ' .'''
res = inp . split ( r'\x' ) for i in xrange ( 1 , len ( res ) ) : item = res [ i ] try : res [ i ] = _hextochr [ item [ : 2 ] ] + item [ 2 : ] except KeyError : res [ i ] = '%' + item except UnicodeDecodeError : res [ i ] = unichr ( int ( item [ : 2 ] , 16 ) ) + item [ 2 : ] return '' . join ( res )
def mimewrap ( path , filename = None , ctype = None ) : """Take the contents of the given path and wrap them into an email MIME part according to the content type . The content type is auto detected from the actual file contents and the file name if it is not given . : param path : the path to the file contents : type path : str : param filename : the file name to use in the generated MIME part : type filename : str or None : param ctype : the content type of the file contents in path : type ctype : str or None : returns : the message MIME part storing the data from path : rtype : subclasses of email . mime . base . MIMEBase"""
with open ( path , 'rb' ) as f : content = f . read ( ) if not ctype : ctype = guess_mimetype ( content ) # libmagic < 5.12 incorrectly detects excel / powerpoint files as # ' application / msword ' ( see # 179 and # 186 in libmagic bugtracker ) # This is a workaround , based on file extension , useful as long # as distributions still ship libmagic 5.11. if ( ctype == 'application/msword' and not libmagic_version_at_least ( 513 ) ) : mimetype , _ = mimetypes . guess_type ( path ) if mimetype : ctype = mimetype maintype , subtype = ctype . split ( '/' , 1 ) if maintype == 'text' : part = MIMEText ( content . decode ( guess_encoding ( content ) , 'replace' ) , _subtype = subtype , _charset = 'utf-8' ) elif maintype == 'image' : part = MIMEImage ( content , _subtype = subtype ) elif maintype == 'audio' : part = MIMEAudio ( content , _subtype = subtype ) else : part = MIMEBase ( maintype , subtype ) part . set_payload ( content ) # Encode the payload using Base64 email . encoders . encode_base64 ( part ) # Set the filename parameter if not filename : filename = os . path . basename ( path ) part . add_header ( 'Content-Disposition' , 'attachment' , filename = filename ) return part
def feed ( self , data ) : """Add * data * to the buffer ."""
self . _buffers . append ( data ) self . _buffer_size += len ( data ) self . _maybe_pause_transport ( ) self . _can_read . set ( )
def n_list ( self , node ) : """prettyprint a list or tuple"""
p = self . prec self . prec = 100 lastnode = node . pop ( ) lastnodetype = lastnode . kind # If this build list is inside a CALL _ FUNCTION _ VAR , # then the first * has already been printed . # Until I have a better way to check for CALL _ FUNCTION _ VAR , # will assume that if the text ends in * . last_was_star = self . f . getvalue ( ) . endswith ( '*' ) if lastnodetype . endswith ( 'UNPACK' ) : # FIXME : need to handle range of BUILD _ LIST _ UNPACK have_star = True # endchar = ' ' else : have_star = False if lastnodetype . startswith ( 'BUILD_LIST' ) : self . write ( '[' ) ; endchar = ']' elif lastnodetype . startswith ( 'BUILD_TUPLE' ) : # Tuples can appear places that can NOT # have parenthesis around them , like array # subscripts . We check for that by seeing # if a tuple item is some sort of slice . no_parens = False for n in node : if n == 'expr' and n [ 0 ] . kind . startswith ( 'build_slice' ) : no_parens = True break pass if no_parens : endchar = '' else : self . write ( '(' ) ; endchar = ')' pass elif lastnodetype . startswith ( 'BUILD_SET' ) : self . write ( '{' ) ; endchar = '}' elif lastnodetype . startswith ( 'BUILD_MAP_UNPACK' ) : self . write ( '{*' ) ; endchar = '}' elif lastnodetype . startswith ( 'ROT_TWO' ) : self . write ( '(' ) ; endchar = ')' else : raise TypeError ( 'Internal Error: n_build_list expects list, tuple, set, or unpack' ) flat_elems = flatten_list ( node ) self . indent_more ( INDENT_PER_LEVEL ) sep = '' for elem in flat_elems : if elem in ( 'ROT_THREE' , 'EXTENDED_ARG' ) : continue assert elem == 'expr' line_number = self . line_number value = self . traverse ( elem ) if line_number != self . line_number : sep += '\n' + self . indent + INDENT_PER_LEVEL [ : - 1 ] else : if sep != '' : sep += ' ' if not last_was_star : if have_star : sep += '*' pass pass else : last_was_star = False self . write ( sep , value ) sep = ',' if lastnode . attr == 1 and lastnodetype . startswith ( 'BUILD_TUPLE' ) : self . write ( ',' ) self . write ( endchar ) self . indent_less ( INDENT_PER_LEVEL ) self . prec = p self . prune ( ) return
def get_acls ( path , profile = None , hosts = None , scheme = None , username = None , password = None , default_acl = None ) : '''Get acls on a znode path path to znode profile Configured Zookeeper profile to authenticate with ( Default : None ) hosts Lists of Zookeeper Hosts ( Default : ' 127.0.0.1:2181) scheme Scheme to authenticate with ( Default : ' digest ' ) username Username to authenticate ( Default : None ) password Password to authenticate ( Default : None ) default _ acl Default acls to assign if a node is created in this connection ( Default : None ) CLI Example : . . code - block : : bash salt minion1 zookeeper . get _ acls / test / name profile = prod'''
conn = _get_zk_conn ( profile = profile , hosts = hosts , scheme = scheme , username = username , password = password , default_acl = default_acl ) return conn . get_acls ( path ) [ 0 ]
def recursive_update ( self , k , d ) : """Recursively update a top - level option in the run control Parameters k : string the top - level key d : dictionary or similar the dictionary to use for updating"""
u = self . __getitem__ ( k ) self . store [ k ] = _recursive_update ( u , d )
def debugExperiment ( logFile ) : """Debug a thing experiment given a logFile"""
exp = rerunExperimentFromLogfile ( logFile ) exp . logCalls = False L2Representations = exp . objectL2Representations print "Learned object representations:" pprint . pprint ( L2Representations , width = 400 ) print "==========================" print "\nRun inference with a point on the capsule" sensationList = [ { 0 : getObjectPair ( "Capsule" , 0 ) } , ] exp . infer ( sensationList , reset = False ) print "Output for capsule:" , exp . getL2Representations ( ) print "Intersection with sphere:" , len ( exp . getL2Representations ( ) [ 0 ] & L2Representations [ "Sphere" ] [ 0 ] ) print "Intersection with capsule:" , len ( exp . getL2Representations ( ) [ 0 ] & L2Representations [ "Capsule" ] [ 0 ] ) print "Intersection with cube:" , len ( exp . getL2Representations ( ) [ 0 ] & L2Representations [ "Cube" ] [ 0 ] ) exp . sendReset ( ) print "\nRun inference with a point on the sphere" sensationList = [ { 0 : getObjectPair ( "Sphere" , 0 ) } , ] exp . infer ( sensationList , reset = False ) print "Output for sphere:" , exp . getL2Representations ( ) print "Intersection with sphere:" , len ( exp . getL2Representations ( ) [ 0 ] & L2Representations [ "Sphere" ] [ 0 ] ) print "Intersection with Capsule:" , len ( exp . getL2Representations ( ) [ 0 ] & L2Representations [ "Capsule" ] [ 0 ] ) print "Intersection with cube:" , len ( exp . getL2Representations ( ) [ 0 ] & L2Representations [ "Cube" ] [ 0 ] ) exp . sendReset ( ) print "\nRun inference with two points on the sphere" sensationList = [ { 0 : getObjectPair ( "Sphere" , 0 ) } , { 0 : getObjectPair ( "Sphere" , 2 ) } , ] exp . infer ( sensationList , reset = False ) print "Output for sphere:" , exp . getL2Representations ( ) print "Intersection with sphere:" , len ( exp . getL2Representations ( ) [ 0 ] & L2Representations [ "Sphere" ] [ 0 ] ) print "Intersection with Capsule:" , len ( exp . getL2Representations ( ) [ 0 ] & L2Representations [ "Capsule" ] [ 0 ] ) print "Intersection with cube:" , len ( exp . getL2Representations ( ) [ 0 ] & L2Representations [ "Cube" ] [ 0 ] ) exp . sendReset ( ) print "\nRun inference with a point on the cube" sensationList = [ { 0 : getObjectPair ( "Cube" , 2 ) } , ] exp . infer ( sensationList , reset = False ) print "Output for cube:" , exp . getL2Representations ( ) print "Intersection with sphere:" , len ( exp . getL2Representations ( ) [ 0 ] & L2Representations [ "Sphere" ] [ 0 ] ) print "Intersection with Capsule:" , len ( exp . getL2Representations ( ) [ 0 ] & L2Representations [ "Capsule" ] [ 0 ] ) print "Intersection with cube:" , len ( exp . getL2Representations ( ) [ 0 ] & L2Representations [ "Cube" ] [ 0 ] ) exp . sendReset ( )
def copy ( self ) : """return a copy"""
o = Option ( name = self . name , default = self . default , doc = self . doc , from_string_converter = self . from_string_converter , to_string_converter = self . to_string_converter , value = self . value , short_form = self . short_form , exclude_from_print_conf = self . exclude_from_print_conf , exclude_from_dump_conf = self . exclude_from_dump_conf , is_argument = self . is_argument , likely_to_be_changed = self . likely_to_be_changed , not_for_definition = self . not_for_definition , reference_value_from = self . reference_value_from , secret = self . secret , has_changed = self . has_changed , foreign_data = self . foreign_data , ) return o
def FindTypecheckParent ( cls , parents ) : """Find parent class that uses this metaclass ."""
for parent in parents : if hasattr ( parent , "__metaclass__" ) and parent . __metaclass__ == cls : return parent return None
def _get_dbt_columns_from_bq_table ( self , table ) : "Translates BQ SchemaField dicts into dbt BigQueryColumn objects"
columns = [ ] for col in table . schema : # BigQuery returns type labels that are not valid type specifiers dtype = self . Column . translate_type ( col . field_type ) column = self . Column ( col . name , dtype , col . fields , col . mode ) columns . append ( column ) return columns
def get_slide_seg_list_belonged ( dt_str , seg_duration , slide_step = 1 , fmt = '%Y-%m-%d %H:%M:%S' ) : """获取该时刻所属的所有时间片列表 : param dt _ str : datetime string , eg : 2016-10-31 12:22:11 : param seg _ duration : 时间片长度 , unit : minute : param slide _ step : 滑动步长 : param fmt : datetime string format : return : 时间片列表"""
dt = time_util . str_to_datetime ( dt_str , fmt ) day_slide_seg_list = gen_slide_seg_list ( const . FIRST_MINUTE_OF_DAY , const . MINUTES_IN_A_DAY , seg_duration , slide_step ) return filter ( lambda x : lie_in_seg ( dt , x , seg_duration ) , day_slide_seg_list )
def defaults ( self ) : """Access the defaults : returns : twilio . rest . autopilot . v1 . assistant . defaults . DefaultsList : rtype : twilio . rest . autopilot . v1 . assistant . defaults . DefaultsList"""
if self . _defaults is None : self . _defaults = DefaultsList ( self . _version , assistant_sid = self . _solution [ 'sid' ] , ) return self . _defaults
def arrays_to_mgr ( arrays , arr_names , index , columns , dtype = None ) : """Segregate Series based on type and coerce into matrices . Needs to handle a lot of exceptional cases ."""
# figure out the index , if necessary if index is None : index = extract_index ( arrays ) else : index = ensure_index ( index ) # don ' t force copy because getting jammed in an ndarray anyway arrays = _homogenize ( arrays , index , dtype ) # from BlockManager perspective axes = [ ensure_index ( columns ) , index ] return create_block_manager_from_arrays ( arrays , arr_names , axes )
def add_switch_mode ( self ) : """Switch dynamic command mode Switch to the raster mode on the printers that support the mode change ( others are in raster mode already ) ."""
if self . model not in modesetting : self . _unsupported ( "Trying to switch the operating mode on a printer that doesn't support the command." ) return self . data += b'\x1B\x69\x61\x01'
async def get_default_storage_layout ( cls ) -> StorageLayout : """Default storage layout . Storage layout that is applied to a node when it is deployed ."""
data = await cls . get_config ( "default_storage_layout" ) return cls . StorageLayout . lookup ( data )
def load_from_rdf_file ( self , rdf_file ) : """Initialize given an RDF input file representing the hierarchy . " Parameters rdf _ file : str Path to an RDF file ."""
self . graph = rdflib . Graph ( ) self . graph . parse ( os . path . abspath ( rdf_file ) , format = 'nt' ) self . initialize ( )
def _retrieve_station_history ( self , station_ID , limit , interval ) : """Helper method for station _ X _ history functions ."""
params = { 'id' : station_ID , 'type' : interval , 'lang' : self . _language } if limit is not None : params [ 'cnt' ] = limit uri = http_client . HttpClient . to_url ( STATION_WEATHER_HISTORY_URL , self . _API_key , self . _subscription_type , self . _use_ssl ) _ , json_data = self . _wapi . cacheable_get_json ( uri , params = params ) station_history = self . _parsers [ 'station_history' ] . parse_JSON ( json_data ) if station_history is not None : station_history . set_station_ID ( station_ID ) station_history . set_interval ( interval ) return station_history
def get ( self , transport , robj , r = None , pr = None , timeout = None , basic_quorum = None , notfound_ok = None , head_only = False ) : """get ( robj , r = None , pr = None , timeout = None ) Fetches the contents of a Riak object . . . note : : This request is automatically retried : attr : ` retries ` times if it fails due to network error . : param robj : the object to fetch : type robj : RiakObject : param r : the read quorum : type r : integer , string , None : param pr : the primary read quorum : type pr : integer , string , None : param timeout : a timeout value in milliseconds : type timeout : int : param basic _ quorum : whether to use the " basic quorum " policy for not - founds : type basic _ quorum : bool : param notfound _ ok : whether to treat not - found responses as successful : type notfound _ ok : bool : param head _ only : whether to fetch without value , so only metadata ( only available on PB transport ) : type head _ only : bool"""
_validate_timeout ( timeout ) if not isinstance ( robj . key , six . string_types ) : raise TypeError ( 'key must be a string, instead got {0}' . format ( repr ( robj . key ) ) ) return transport . get ( robj , r = r , pr = pr , timeout = timeout , basic_quorum = basic_quorum , notfound_ok = notfound_ok , head_only = head_only )
def notches ( self ) : """Reutrns a list of the notches that are going to be used for this ruler . If the notches have not been explicitly set ( per a Custom type ) , then the notches will be generated based on the minimum , maximum and step values the current ruler type . : return [ < str > , . . ]"""
if ( self . _notches is not None ) : return self . _notches rtype = self . rulerType ( ) formatter = self . formatter ( ) format = self . format ( ) self . _notches = [ ] minimum = self . minimum ( ) maximum = self . maximum ( ) step = self . step ( ) if ( step <= 0 ) : return [ ] curr = minimum while ( curr < maximum ) : self . _notches . append ( self . formatValue ( curr ) ) if ( rtype == XChartRuler . Type . Number ) : curr += step elif ( rtype == XChartRuler . Type . Date ) : curr = curr . addDays ( step ) elif ( rtype in ( XChartRuler . Type . Datetime , XChartRuler . Type . Time ) ) : curr = curr . addSecs ( step ) else : break self . _notches . append ( self . formatValue ( maximum ) ) return self . _notches
def _update_lhs ( lhs , xCore , zCore , new_lhs ) : """Function to be called from the project ( )"""
# TODO : Use intermediate variable to use 5 nested loops instead of 6. r_old_x , n , r_x = xCore . shape num_obj , r_old_z , n , r_z = zCore . shape for idx in range ( num_obj ) : for val in range ( n ) : for alpha_old_z in range ( r_old_z ) : for alpha_z in range ( r_z ) : for alpha_old_x in range ( r_old_x ) : for alpha_x in range ( r_x ) : curr_value = lhs [ idx , alpha_old_x , alpha_old_z ] curr_value *= xCore [ alpha_old_x , val , alpha_x ] curr_value *= zCore [ idx , alpha_old_z , val , alpha_z ] new_lhs [ idx , alpha_x , alpha_z ] += curr_value
def h_from_V ( self , V , method = 'spline' ) : r'''Method to calculate the height of liquid in a fully defined tank given a specified volume of liquid in it ` V ` . ` V ` must be under the maximum volume . If the method is ' spline ' , and the interpolation table is not yet defined , creates it by calling the method set _ table . If the method is ' chebyshev ' , and the coefficients have not yet been calculated , they are created by calling ` set _ chebyshev _ approximators ` . Parameters V : float Volume of liquid in the tank up to the desired height , [ m ^ 3] method : str One of ' spline ' , ' chebyshev ' , or ' brenth ' Returns h : float Height of liquid at which the volume is as desired , [ m ]'''
if method == 'spline' : if not self . table : self . set_table ( ) return float ( self . interp_h_from_V ( V ) ) elif method == 'chebyshev' : if not self . chebyshev : self . set_chebyshev_approximators ( ) return self . h_from_V_cheb ( V ) elif method == 'brenth' : to_solve = lambda h : self . V_from_h ( h , method = 'full' ) - V return brenth ( to_solve , self . h_max , 0 ) else : raise Exception ( "Allowable methods are 'full' or 'chebyshev', " "or 'brenth'." )
def getInitialArguments ( self ) : """Return the constructor arguments required for the JavaScript client class , Mantissa . ScrollTable . ScrollTable . @ return : a 3 - tuple of : : - The unicode attribute ID of my current sort column - A list of dictionaries with ' name ' and ' type ' keys which are strings describing the name and type of all the columns in this table . - A bool indicating whether the sort direction is initially ascending ."""
ic = IColumn ( self . currentSortColumn ) return [ ic . attributeID . decode ( 'ascii' ) , self . _getColumnList ( ) , self . isAscending ]
def _handleCallAnswered ( self , regexMatch , callId = None ) : """Handler for " outgoing call answered " event notification line"""
if regexMatch : groups = regexMatch . groups ( ) if len ( groups ) > 1 : callId = int ( groups [ 0 ] ) self . activeCalls [ callId ] . answered = True else : # Call ID not available for this notificition - check for the first outgoing call that has not been answered for call in dictValuesIter ( self . activeCalls ) : if call . answered == False and type ( call ) == Call : call . answered = True return else : # Use supplied values self . activeCalls [ callId ] . answered = True
def create ( self ) : """Create the corresponding index . Will overwrite existing indexes of the same name ."""
body = dict ( ) if self . mapping is not None : body [ 'mappings' ] = self . mapping if self . settings is not None : body [ 'settings' ] = self . settings else : body [ 'settings' ] = self . _default_settings ( ) self . instance . indices . create ( self . index , body )
def remove_core_element ( self , model ) : """Remove respective core element of handed outcome model : param OutcomeModel model : Outcome model which core element should be removed : return :"""
assert model . outcome . parent is self . model . state gui_helper_state_machine . delete_core_element_of_model ( model )
def cli ( ctx , profile ) : """dw commands support working with multiple data . world accounts Use a different < profile > value for each account . In the absence of a < profile > , ' default ' will be used ."""
if ctx . obj is None : ctx . obj = { } ctx . obj [ 'profile' ] = profile pass
def _QueryProcessStatus ( self , process ) : """Queries a process to determine its status . Args : process ( MultiProcessBaseProcess ) : process to query for its status . Returns : dict [ str , str ] : status values received from the worker process ."""
process_is_alive = process . is_alive ( ) if process_is_alive : rpc_client = self . _rpc_clients_per_pid . get ( process . pid , None ) process_status = rpc_client . CallFunction ( ) else : process_status = None return process_status
def _update_services ( self , ta_results ) : """Given a dict of TrustedAdvisor check results from : py : meth : ` ~ . _ poll ` and a dict of Service objects passed in to : py : meth : ` ~ . update _ limits ` , updated the TrustedAdvisor limits for all services . : param ta _ results : results returned by : py : meth : ` ~ . _ poll ` : type ta _ results : dict : param services : dict of service names to _ AwsService objects : type services : dict"""
logger . debug ( "Updating TA limits on all services" ) for svc_name in sorted ( ta_results . keys ( ) ) : svc_results = ta_results [ svc_name ] if svc_name not in self . ta_services : logger . info ( "TrustedAdvisor returned check results for " "unknown service '%s'" , svc_name ) continue svc_limits = self . ta_services [ svc_name ] for lim_name in sorted ( svc_results ) : if lim_name not in svc_limits : logger . info ( "TrustedAdvisor returned check results for " "unknown limit '%s' (service %s)" , lim_name , svc_name ) continue val = svc_results [ lim_name ] if val == 'Unlimited' : svc_limits [ lim_name ] . _set_ta_unlimited ( ) else : svc_limits [ lim_name ] . _set_ta_limit ( val ) logger . info ( "Done updating TA limits on all services" )
def _get_response_body_from_gzipped_content ( self , url , response ) : """Get the response body from gzipped content Try to decode as gzip ( we should check the headers for Content - Encoding = gzip ) if response . headers [ ' content - encoding ' ] = = " gzip " : : param url : the url for which response was sent : type url : str : param response : response content object , probably gzipped : type response : object : returns : returns response body : raises IloError : if the content is * * not * * gzipped"""
try : gzipper = gzip . GzipFile ( fileobj = six . BytesIO ( response . text ) ) LOG . debug ( self . _ ( "Received compressed response for " "url %(url)s." ) , { 'url' : url } ) uncompressed_string = ( gzipper . read ( ) . decode ( 'UTF-8' ) ) response_body = json . loads ( uncompressed_string ) except Exception as e : LOG . debug ( self . _ ( "Error occurred while decompressing body. " "Got invalid response '%(response)s' for " "url %(url)s: %(error)s" ) , { 'url' : url , 'response' : response . text , 'error' : e } ) raise exception . IloError ( e ) return response_body
def is_complete ( self ) : """Checks the job ' s output or log file to determing if the completion criteria was met ."""
qstat = self . _grep_qstat ( 'complete' ) comp = self . _grep_status ( 'complete' ) if qstat and comp : return True return False
def _adjust_sell_amount ( self , stock_code , amount ) : """根据实际持仓值计算雪球卖出股数 因为雪球的交易指令是基于持仓百分比 , 在取近似值的情况下可能出现不精确的问题 。 导致如下情况的产生 , 计算出的指令为买入 1049 股 , 取近似值买入 1000 股 。 而卖出的指令计算出为卖出 1051 股 , 取近似值卖出 1100 股 , 超过 1000 股的买入量 , 导致卖出失败 : param stock _ code : 证券代码 : type stock _ code : str : param amount : 卖出股份数 : type amount : int : return : 考虑实际持仓之后的卖出股份数 : rtype : int"""
stock_code = stock_code [ - 6 : ] user = self . _users [ 0 ] position = user . position try : stock = next ( s for s in position if s [ '证券代码' ] == stock_code ) except StopIteration : log . info ( '根据持仓调整 %s 卖出额,发现未持有股票 %s, 不做任何调整' , stock_code , stock_code ) return amount available_amount = stock [ '可用余额' ] if available_amount >= amount : return amount adjust_amount = available_amount // 100 * 100 log . info ( '股票 %s 实际可用余额 %s, 指令卖出股数为 %s, 调整为 %s' , stock_code , available_amount , amount , adjust_amount ) return adjust_amount
def get ( self , request ) : '''Get user information , with a list of permissions for that user .'''
user = request . user serializer = PermissionsUserSerializer ( instance = user , context = { 'request' : request } ) return Response ( data = serializer . data )
def required_items ( element , children , attributes ) : """Check an xml element to include given attributes and children . : param element : ElementTree element : param children : list of XPaths to check : param attributes : list of attributes names to check : raises NotValidXmlException : if some argument is missing : raises NotValidXmlException : if some child is missing"""
required_elements ( element , * children ) required_attributes ( element , * attributes )
def where_entry_last ( query , ref ) : """Generate a where clause where this is the last entry ref - - the entry of reference"""
return orm . select ( e for e in query if e . local_date < ref . local_date or ( e . local_date == ref . local_date and e . id <= ref . id ) )
def delete_ipv4_range ( start_addr = None , end_addr = None , ** api_opts ) : '''Delete ip range . CLI Example : . . code - block : : bash salt - call infoblox . delete _ ipv4 _ range start _ addr = 123.123.122.12'''
r = get_ipv4_range ( start_addr , end_addr , ** api_opts ) if r : return delete_object ( r [ '_ref' ] , ** api_opts ) else : return True
def get_leads ( self , offset = None , limit = None , lead_list_id = None , first_name = None , last_name = None , email = None , company = None , phone_number = None , twitter = None ) : """Gives back all the leads saved in your account . : param offset : Number of leads to skip . : param limit : Maximum number of leads to return . : param lead _ list _ id : Id of a lead list to query leads on . : param first _ name : First name to filter on . : param last _ name : Last name to filter on . : param email : Email to filter on . : param company : Company to filter on . : param phone _ number : Phone number to filter on . : param twitter : Twitter account to filter on . : return : All leads found as a dict ."""
args = locals ( ) args_params = dict ( ( key , value ) for key , value in args . items ( ) if value is not None ) args_params . pop ( 'self' ) params = self . base_params params . update ( args_params ) endpoint = self . base_endpoint . format ( 'leads' ) return self . _query_hunter ( endpoint , params )
def write_ushort ( self , s ) : """Writes a 2 byte unsigned integer to the stream . @ param s : 2 byte unsigned integer @ type s : C { int } @ raise TypeError : Unexpected type for int C { s } . @ raise OverflowError : Not in range ."""
if type ( s ) not in python . int_types : raise TypeError ( 'expected an int (got:%r)' % ( type ( s ) , ) ) if not 0 <= s <= 65535 : raise OverflowError ( "Not in range, %d" % s ) self . write ( struct . pack ( "%sH" % self . endian , s ) )
def systemInformationType4 ( ChannelDescription_presence = 0 , MobileAllocation_presence = 0 ) : """SYSTEM INFORMATION TYPE 4 Section 9.1.36"""
a = L2PseudoLength ( ) b = TpPd ( pd = 0x6 ) c = MessageType ( mesType = 0x1C ) # 000111100 d = LocalAreaId ( ) e = CellSelectionParameters ( ) f = RachControlParameters ( ) packet = a / b / c / d / e / f if ChannelDescription_presence is 1 : g = ChannelDescriptionHdr ( ieiCD = 0x64 , eightBitCD = 0x0 ) packet = packet / g if MobileAllocation_presence is 1 : h = MobileAllocationHdr ( ieiMA = 0x72 , eightBitMA = 0x0 ) packet = packet / h i = Si4RestOctets ( ) packet = packet / i return packet
def metadata_converter_help ( ) : """Help message for metadata converter Dialog . . . versionadded : : 4.3 : returns : A message object containing helpful information . : rtype : messaging . message . Message"""
message = m . Message ( ) message . add ( m . Brand ( ) ) message . add ( heading ( ) ) message . add ( content ( ) ) return message
def processPropagate ( self , msg : Propagate , frm ) : """Process one propagateRequest sent to this node asynchronously - If this propagateRequest hasn ' t been seen by this node , then broadcast it to all nodes after verifying the the signature . - Add the client to blacklist if its signature is invalid : param msg : the propagateRequest : param frm : the name of the node which sent this ` msg `"""
logger . debug ( "{} received propagated request: {}" . format ( self . name , msg ) ) request = TxnUtilConfig . client_request_class ( ** msg . request ) clientName = msg . senderClient if not self . isProcessingReq ( request . key ) : ledger_id , seq_no = self . seqNoDB . get_by_payload_digest ( request . payload_digest ) if ledger_id is not None and seq_no is not None : self . _clean_req_from_verified ( request ) logger . debug ( "{} ignoring propagated request {} " "since it has been already ordered" . format ( self . name , msg ) ) return self . startedProcessingReq ( request . key , clientName ) # forced request should be processed before consensus self . handle_request_if_forced ( request ) else : if clientName is not None and not self . is_sender_known_for_req ( request . key ) : # Since some propagates might not include the client name self . set_sender_for_req ( request . key , clientName ) self . requests . add_propagate ( request , frm ) self . propagate ( request , clientName ) self . tryForwarding ( request )
def _getbugs ( self , idlist , permissive , include_fields = None , exclude_fields = None , extra_fields = None ) : """Return a list of dicts of full bug info for each given bug id . bug ids that couldn ' t be found will return None instead of a dict ."""
oldidlist = idlist idlist = [ ] for i in oldidlist : try : idlist . append ( int ( i ) ) except ValueError : # String aliases can be passed as well idlist . append ( i ) extra_fields = self . _listify ( extra_fields or [ ] ) extra_fields += self . _getbug_extra_fields getbugdata = { "ids" : idlist } if permissive : getbugdata [ "permissive" ] = 1 getbugdata . update ( self . _process_include_fields ( include_fields , exclude_fields , extra_fields ) ) r = self . _proxy . Bug . get ( getbugdata ) if self . _check_version ( 4 , 0 ) : bugdict = dict ( [ ( b [ 'id' ] , b ) for b in r [ 'bugs' ] ] ) else : bugdict = dict ( [ ( b [ 'id' ] , b [ 'internals' ] ) for b in r [ 'bugs' ] ] ) ret = [ ] for i in idlist : found = None if i in bugdict : found = bugdict [ i ] else : # Need to map an alias for valdict in bugdict . values ( ) : if i in self . _listify ( valdict . get ( "alias" , None ) ) : found = valdict break ret . append ( found ) return ret
def apply ( self , parent_environ = None ) : """Apply the context to the current python session . Note that this updates os . environ and possibly sys . path , if ` parent _ environ ` is not provided . Args : parent _ environ : Environment to interpret the context within , defaults to os . environ if None ."""
interpreter = Python ( target_environ = os . environ ) executor = self . _create_executor ( interpreter , parent_environ ) self . _execute ( executor ) interpreter . apply_environ ( )
def add_link ( dataset , source , target , count = 1 ) : """Add a link . Parameters dataset : ` dict ` of ( [ ` int ` , ` str ` ] or [ ` list ` of ` int ` , ` list ` of ` str ` ] ) Dataset . source : ` iterable ` of ` str ` Link source . target : ` str ` Link target . count : ` int ` , optional Link count ( default : 1 ) ."""
try : node = dataset [ source ] values , links = node if isinstance ( links , list ) : try : idx = links . index ( target ) values [ idx ] += count except ValueError : links . append ( target ) values . append ( count ) elif links == target : node [ 0 ] += count else : node [ 0 ] = [ values , count ] node [ 1 ] = [ links , target ] except KeyError : dataset [ source ] = [ count , target ]
def modelrepr ( instance ) -> str : """Default ` ` repr ` ` version of a Django model object , for debugging ."""
elements = [ ] # noinspection PyProtectedMember for f in instance . _meta . get_fields ( ) : # https : / / docs . djangoproject . com / en / 2.0 / ref / models / meta / if f . auto_created : continue if f . is_relation and f . related_model is None : continue fieldname = f . name try : value = repr ( getattr ( instance , fieldname ) ) except ObjectDoesNotExist : value = "<RelatedObjectDoesNotExist>" elements . append ( "{}={}" . format ( fieldname , value ) ) return "<{} <{}>>" . format ( type ( instance ) . __name__ , ", " . join ( elements ) )
def get_boundingbox ( self ) : """Return minimum and maximum x and z coordinates of the chunks that make up this world save"""
b = BoundingBox ( ) for rx , rz in self . regionfiles . keys ( ) : region = self . get_region ( rx , rz ) rx , rz = 32 * rx , 32 * rz for cc in region . get_chunk_coords ( ) : x , z = ( rx + cc [ 'x' ] , rz + cc [ 'z' ] ) b . expand ( x , None , z ) return b
def function_scoping ( self , node , frame , children = None , find_special = True ) : """In Jinja a few statements require the help of anonymous functions . Those are currently macros and call blocks and in the future also recursive loops . As there is currently technical limitation that doesn ' t allow reading and writing a variable in a scope where the initial value is coming from an outer scope , this function tries to fall back with a common error message . Additionally the frame passed is modified so that the argumetns are collected and callers are looked up . This will return the modified frame ."""
# we have to iterate twice over it , make sure that works if children is None : children = node . iter_child_nodes ( ) children = list ( children ) func_frame = frame . inner ( ) func_frame . inspect ( children , hard_scope = True ) # variables that are undeclared ( accessed before declaration ) and # declared locally * and * part of an outside scope raise a template # assertion error . Reason : we can ' t generate reasonable code from # it without aliasing all the variables . # this could be fixed in Python 3 where we have the nonlocal # keyword or if we switch to bytecode generation overriden_closure_vars = ( func_frame . identifiers . undeclared & func_frame . identifiers . declared & ( func_frame . identifiers . declared_locally | func_frame . identifiers . declared_parameter ) ) if overriden_closure_vars : self . fail ( 'It\'s not possible to set and access variables ' 'derived from an outer scope! (affects: %s)' % ', ' . join ( sorted ( overriden_closure_vars ) ) , node . lineno ) # remove variables from a closure from the frame ' s undeclared # identifiers . func_frame . identifiers . undeclared -= ( func_frame . identifiers . undeclared & func_frame . identifiers . declared ) # no special variables for this scope , abort early if not find_special : return func_frame func_frame . accesses_kwargs = False func_frame . accesses_varargs = False func_frame . accesses_caller = False func_frame . arguments = args = [ 'l_' + x . name for x in node . args ] undeclared = find_undeclared ( children , ( 'caller' , 'kwargs' , 'varargs' ) ) if 'caller' in undeclared : func_frame . accesses_caller = True func_frame . identifiers . add_special ( 'caller' ) args . append ( 'l_caller' ) if 'kwargs' in undeclared : func_frame . accesses_kwargs = True func_frame . identifiers . add_special ( 'kwargs' ) args . append ( 'l_kwargs' ) if 'varargs' in undeclared : func_frame . accesses_varargs = True func_frame . identifiers . add_special ( 'varargs' ) args . append ( 'l_varargs' ) return func_frame
def read_from_hdx ( identifier , configuration = None ) : # type : ( str , Optional [ Configuration ] ) - > Optional [ ' ResourceView ' ] """Reads the resource view given by identifier from HDX and returns ResourceView object Args : identifier ( str ) : Identifier of resource view configuration ( Optional [ Configuration ] ) : HDX configuration . Defaults to global configuration . Returns : Optional [ ResourceView ] : ResourceView object if successful read , None if not"""
resourceview = ResourceView ( configuration = configuration ) result = resourceview . _load_from_hdx ( 'resource view' , identifier ) if result : return resourceview return None
def subset ( self , words ) : """Get a new Vocab containing only the specified subset of words . If w is in words , but not in the original vocab , it will NOT be in the subset vocab . Indices will be in the order of ` words ` . Counts from the original vocab are preserved . : return ( Vocab ) : a new Vocab object"""
v = self . __class__ ( unk = self . _unk ) unique = lambda seq : len ( set ( seq ) ) == len ( seq ) assert unique ( words ) for w in words : if w in self : v . add ( w , count = self . count ( w ) ) return v
def execute_with_timeout ( fn , args = None , kwargs = None , timeout = None , fail_if_no_timer = True , signal_type = _default_signal_type , timer_type = _default_timer_type , timeout_exception_cls = TimeoutError , ) : """Executes specified function with timeout . Uses SIGALRM to interrupt it . : type fn : function : param fn : function to execute : type args : tuple : param args : function args : type kwargs : dict : param kwargs : function kwargs : type timeout : float : param timeout : timeout , seconds ; 0 or None means no timeout : type fail _ if _ no _ timer : bool : param fail _ if _ no _ timer : fail , if timer is nor available ; normally it ' s available only in the main thread : type signal _ type : signalnum : param signal _ type : type of signal to use ( see signal module ) : type timer _ type : signal . ITIMER _ REAL , signal . ITIMER _ VIRTUAL or signal . ITIMER _ PROF : param timer _ type : type of timer to use ( see signal module ) : type timeout _ exception _ cls : class : param timeout _ exception _ cls : exception to throw in case of timeout : return : fn call result ."""
if args is None : args = empty_tuple if kwargs is None : kwargs = empty_dict if timeout is None or timeout == 0 or signal_type is None or timer_type is None : return fn ( * args , ** kwargs ) def signal_handler ( signum , frame ) : raise timeout_exception_cls ( inspection . get_function_call_str ( fn , args , kwargs ) ) old_signal_handler = none timer_is_set = False try : try : old_signal_handler = signal . signal ( signal_type , signal_handler ) signal . setitimer ( timer_type , timeout ) timer_is_set = True except ValueError : if fail_if_no_timer : raise NotSupportedError ( "Timer is not available; the code is probably invoked from outside the main " "thread." ) return fn ( * args , ** kwargs ) finally : if timer_is_set : signal . setitimer ( timer_type , 0 ) if old_signal_handler is not none : signal . signal ( signal_type , old_signal_handler )
def store ( self , stream , linesep = None ) : """Serialize the Manifest to a binary stream"""
# either specified here , specified on the instance , or the OS # default linesep = linesep or self . linesep or os . linesep ManifestSection . store ( self , stream , linesep ) for sect in sorted ( self . sub_sections . values ( ) ) : sect . store ( stream , linesep )
def activate ( self ) : """If a task is suspended , this will re - activate the task . Usually it ' s best to check for activated before running this : : task = RefreshPolicyTask ( ' mytask ' ) for scheduler in task . task _ schedule : if scheduler . activated : scheduler . suspend ( ) else : scheduler . activate ( )"""
if 'activate' in self . data . links : self . make_request ( ActionCommandFailed , method = 'update' , etag = self . etag , resource = 'activate' ) self . _del_cache ( ) else : raise ActionCommandFailed ( 'Task is already activated. To ' 'suspend, call suspend() on this task schedule' )
def mine_block ( self , * args : Any , ** kwargs : Any ) -> BaseBlock : """Mines the current block . Proxies to the current Virtual Machine . See VM . : meth : ` ~ eth . vm . base . VM . mine _ block `"""
mined_block = self . get_vm ( self . header ) . mine_block ( * args , ** kwargs ) self . validate_block ( mined_block ) self . chaindb . persist_block ( mined_block ) self . header = self . create_header_from_parent ( mined_block . header ) return mined_block
def bm25_weight ( X , K1 = 100 , B = 0.8 ) : """Weighs each row of a sparse matrix X by BM25 weighting"""
# calculate idf per term ( user ) X = coo_matrix ( X ) N = float ( X . shape [ 0 ] ) idf = log ( N ) - log1p ( bincount ( X . col ) ) # calculate length _ norm per document ( artist ) row_sums = numpy . ravel ( X . sum ( axis = 1 ) ) average_length = row_sums . mean ( ) length_norm = ( 1.0 - B ) + B * row_sums / average_length # weight matrix rows by bm25 X . data = X . data * ( K1 + 1.0 ) / ( K1 * length_norm [ X . row ] + X . data ) * idf [ X . col ] return X
def y_score ( estimator , X ) : """Score examples from a new matrix X Args : estimator : an sklearn estimator object X : design matrix with the same features that the estimator was trained on Returns : a vector of scores of the same length as X Note that estimator . predict _ proba is preferred but when unavailable ( e . g . SVM without probability calibration ) decision _ function is used ."""
try : y = estimator . predict_proba ( X ) return y [ : , 1 ] except ( AttributeError ) : return estimator . decision_function ( X )
def _handle_upsert ( self , parts , unwritten_lobs = ( ) ) : """Handle reply messages from INSERT or UPDATE statements"""
self . description = None self . _received_last_resultset_part = True # set to ' True ' so that cursor . fetch * ( ) returns just empty list for part in parts : if part . kind == part_kinds . ROWSAFFECTED : self . rowcount = part . values [ 0 ] elif part . kind in ( part_kinds . TRANSACTIONFLAGS , part_kinds . STATEMENTCONTEXT , part_kinds . PARAMETERMETADATA ) : pass elif part . kind == part_kinds . WRITELOBREPLY : # This part occurrs after lobs have been submitted not at all or only partially during an insert . # In this case the parameter part of the Request message contains a list called ' unwritten _ lobs ' # with LobBuffer instances . # Those instances are in the same order as ' locator _ ids ' received in the reply message . These IDs # are then used to deliver the missing LOB data to the server via WRITE _ LOB _ REQUESTs . for lob_buffer , lob_locator_id in izip ( unwritten_lobs , part . locator_ids ) : # store locator _ id in every lob buffer instance for later reference : lob_buffer . locator_id = lob_locator_id self . _perform_lob_write_requests ( unwritten_lobs ) else : raise InterfaceError ( "Prepared insert statement response, unexpected part kind %d." % part . kind ) self . _executed = True
def _parse_cgroup_file ( self , stat_file ) : """Parse a cgroup pseudo file for key / values ."""
self . log . debug ( "Opening cgroup file: %s" % stat_file ) try : with open ( stat_file , 'r' ) as fp : if 'blkio' in stat_file : return self . _parse_blkio_metrics ( fp . read ( ) . splitlines ( ) ) elif 'cpuacct.usage' in stat_file : return dict ( { 'usage' : str ( int ( fp . read ( ) ) / 10000000 ) } ) elif 'memory.soft_limit_in_bytes' in stat_file : value = int ( fp . read ( ) ) # do not report kernel max default value ( uint64 * 4096) # see https : / / github . com / torvalds / linux / blob / 5b36577109be007a6ecf4b65b54cbc9118463c2b / mm / memcontrol . c # L2844 - L2845 # 2 * * 60 is kept for consistency of other cgroups metrics if value < 2 ** 60 : return dict ( { 'softlimit' : value } ) elif 'cpu.shares' in stat_file : value = int ( fp . read ( ) ) return { 'shares' : value } else : return dict ( map ( lambda x : x . split ( ' ' , 1 ) , fp . read ( ) . splitlines ( ) ) ) except IOError : # It is possible that the container got stopped between the API call and now . # Some files can also be missing ( like cpu . stat ) and that ' s fine . self . log . debug ( "Can't open %s. Its metrics will be missing." % stat_file )
def verify_cert ( self ) : '''Checks that the provided cert and key are valid and usable'''
log . debug ( 'Verifying the %s certificate, keyfile: %s' , self . certificate , self . keyfile ) try : ssl . create_default_context ( ) . load_cert_chain ( self . certificate , keyfile = self . keyfile ) except ssl . SSLError : error_string = 'SSL certificate and key do not match' log . error ( error_string ) raise SSLMismatchException ( error_string ) except IOError : log . error ( 'Unable to open either certificate or key file' ) raise log . debug ( 'Certificate looks good.' )
def selection_collision ( selections , poolsize ) : """Calculate the probability that two random values selected from an arbitrary sized pool of unique values will be equal . This is commonly known as the " Birthday Problem " . : param int selections : The number of random selections . : param int poolsize : The number of unique random values in the pool to choose from . : rtype : float : return : The chance that a collision will occur as a percentage ."""
# requirments = sys probability = 100.0 poolsize = float ( poolsize ) for i in range ( selections ) : probability = probability * ( poolsize - i ) / poolsize probability = ( 100.0 - probability ) return probability
def download_playlist_by_search ( self , playlist_name ) : """Download a playlist ' s songs by its name . : params playlist _ name : playlist name ."""
try : playlist = self . crawler . search_playlist ( playlist_name , self . quiet ) except RequestException as exception : click . echo ( exception ) else : self . download_playlist_by_id ( playlist . playlist_id , playlist . playlist_name )
def check_bom ( file ) : """Determines file codec from from its BOM record . If file starts with BOM record encoded with UTF - 8 or UTF - 16 ( BE / LE ) then corresponding encoding name is returned , otherwise None is returned . In both cases file current position is set to after - BOM bytes . The file must be open in binary mode and positioned at offset 0."""
# try to read first three bytes lead = file . read ( 3 ) if len ( lead ) == 3 and lead == codecs . BOM_UTF8 : # UTF - 8 , position is already OK , use canonical name return codecs . lookup ( 'utf-8' ) . name elif len ( lead ) >= 2 and lead [ : 2 ] == codecs . BOM_UTF16_BE : # need to backup one character if len ( lead ) == 3 : file . seek ( - 1 , os . SEEK_CUR ) return codecs . lookup ( 'utf-16-be' ) . name elif len ( lead ) >= 2 and lead [ : 2 ] == codecs . BOM_UTF16_LE : # need to backup one character if len ( lead ) == 3 : file . seek ( - 1 , os . SEEK_CUR ) return codecs . lookup ( 'utf-16-le' ) . name else : # no BOM , rewind file . seek ( - len ( lead ) , os . SEEK_CUR ) return None
def run_server ( port = 8000 ) : """Runs server on port with html response"""
from http . server import BaseHTTPRequestHandler , HTTPServer class VerboseHTMLHandler ( BaseHTTPRequestHandler ) : def do_HEAD ( s ) : s . send_response ( 200 ) s . send_header ( "Content-type" , "text/html" ) s . end_headers ( ) def do_GET ( s ) : global html data = changed_file ( ) if data is not None : html = html_from_markdown ( data ) s . send_response ( 200 ) s . send_header ( "Content-type" , "text/html" ) s . end_headers ( ) s . wfile . write ( standalone ( html ) . encode ( 'utf-8' ) ) class SilentHTMLHandler ( VerboseHTMLHandler ) : def log_message ( self , format , * args ) : return port = int ( port ) server_class = HTTPServer handler = VerboseHTMLHandler if verbose else SilentHTMLHandler try : httpd = server_class ( ( "localhost" , port ) , handler ) except PermissionError : sys . stderr . write ( "Permission denied\n" ) sys . exit ( 1 ) if verbose : print ( "Hosting server on port %d. Ctrl-c to exit" % port ) try : httpd . serve_forever ( ) except KeyboardInterrupt : pass httpd . server_close ( ) if verbose : print ( "\rShutting down server" )
def get_fieldmap ( self , path , return_list = False ) : """Get fieldmap ( s ) for specified path ."""
fieldmaps = self . _get_fieldmaps ( path ) if return_list : return fieldmaps else : if len ( fieldmaps ) == 1 : return fieldmaps [ 0 ] elif len ( fieldmaps ) > 1 : raise ValueError ( "More than one fieldmap found, but the " "'return_list' argument was set to False. " "Either ensure that there is only one " "fieldmap for this image, or set the " "'return_list' argument to True and handle " "the result as a list." ) else : # len ( fieldmaps ) = = 0 return None
def join_path ( * path_to_join , ** kwargs ) : """Join input path to sample data path ( usually in ~ / lisa _ data ) : param path _ to _ join : one or more paths : param get _ root : return dataset root path . If false , the path would be into " medical / orig " : return : joined path"""
if "get_root" in kwargs : get_root = kwargs [ "get_root" ] else : # default value get_root = False sdp = dataset_path ( get_root = get_root ) pth = os . path . join ( sdp , * path_to_join ) logger . debug ( "sample_data_path" + str ( sdp ) ) logger . debug ( "path " + str ( pth ) ) return pth
def add_eager_constraints ( self , models ) : """Set the constraints for an eager load of the relation . : type models : list"""
super ( MorphOneOrMany , self ) . add_eager_constraints ( models ) self . _query . where ( self . _morph_type , self . _morph_class )
def tap ( self , on_element ) : """Taps on a given element . : Args : - on _ element : The element to tap ."""
self . _actions . append ( lambda : self . _driver . execute ( Command . SINGLE_TAP , { 'element' : on_element . id } ) ) return self
def get_gef ( self ) : """Extract Gef INDRA Statements from the BioPAX model . This method uses a custom BioPAX Pattern ( one that is not implemented PatternBox ) to query for controlled BiochemicalReactions in which the same protein is in complex with GDP on the left hand side and in complex with GTP on the right hand side . This implies that the controller is a GEF for the GDP / GTP - bound protein ."""
p = self . _gef_gap_base ( ) s = _bpp ( 'Searcher' ) res = s . searchPlain ( self . model , p ) res_array = [ _match_to_array ( m ) for m in res . toArray ( ) ] for r in res_array : controller_pe = r [ p . indexOf ( 'controller PE' ) ] input_pe = r [ p . indexOf ( 'input PE' ) ] input_spe = r [ p . indexOf ( 'input simple PE' ) ] output_pe = r [ p . indexOf ( 'output PE' ) ] output_spe = r [ p . indexOf ( 'output simple PE' ) ] reaction = r [ p . indexOf ( 'Conversion' ) ] control = r [ p . indexOf ( 'Control' ) ] # Make sure the GEF is not a complex # TODO : it could be possible to extract certain complexes here , for # instance ones that only have a single protein if _is_complex ( controller_pe ) : continue members_in = self . _get_complex_members ( input_pe ) members_out = self . _get_complex_members ( output_pe ) if not ( members_in and members_out ) : continue # Make sure the outgoing complex has exactly 2 members # TODO : by finding matching proteins on either side , in principle # it would be possible to find Gef relationships in complexes # with more members if len ( members_out ) != 2 : continue # Make sure complex starts with GDP that becomes GTP gdp_in = False for member in members_in : if isinstance ( member , Agent ) and member . name == 'GDP' : gdp_in = True gtp_out = False for member in members_out : if isinstance ( member , Agent ) and member . name == 'GTP' : gtp_out = True if not ( gdp_in and gtp_out ) : continue ras_list = self . _get_agents_from_entity ( input_spe ) gef_list = self . _get_agents_from_entity ( controller_pe ) ev = self . _get_evidence ( control ) for gef , ras in itertools . product ( _listify ( gef_list ) , _listify ( ras_list ) ) : st = Gef ( gef , ras , evidence = ev ) st_dec = decode_obj ( st , encoding = 'utf-8' ) self . statements . append ( st_dec )
def OpenClient ( client_id = None , token = None ) : """Opens the client , getting potential approval tokens . Args : client _ id : The client id that should be opened . token : Token to use to open the client Returns : tuple containing ( client , token ) objects or ( None , None ) on if no appropriate aproval tokens were found ."""
if not token : try : token = ApprovalFind ( client_id , token = token ) except access_control . UnauthorizedAccess as e : logging . debug ( "No authorization found for access to client: %s" , e ) try : # Try and open with the token we managed to retrieve or the default . client = aff4 . FACTORY . Open ( rdfvalue . RDFURN ( client_id ) , mode = "r" , token = token ) return client , token except access_control . UnauthorizedAccess : logging . warning ( "Unable to find a valid reason for client %s. You may need " "to request approval." , client_id ) return None , None
def set_result ( self , result ) : """Complete all tasks ."""
for future in self . traverse ( ) : # All cancelled futures should have callbacks to removed itself # from this linked list . However , these callbacks are scheduled in # an event loop , so we could still find them in our list . future . set_result ( result ) if not self . done ( ) : super ( ) . set_result ( result )
def str_to_obj ( cls , file_path = None , text = '' , columns = None , remove_empty_rows = True , key_on = None , row_columns = None , deliminator = '\t' , eval_cells = True ) : """This will convert text file or text to a seaborn table and return it : param file _ path : str of the path to the file : param text : str of the csv text : param columns : list of str of columns to use : param row _ columns : list of str of columns in data but not to use : param remove _ empty _ rows : bool if True will remove empty rows : param key _ on : list of str of columns to key on : param deliminator : str to use as a deliminator : param eval _ cells : bool if True will try to evaluate numbers : return : SeabornTable"""
text = cls . _get_lines ( file_path , text ) if len ( text ) == 1 : text = text [ 0 ] . split ( '\r' ) list_of_list = [ [ cls . _eval_cell ( cell , _eval = eval_cells ) for cell in row . split ( deliminator ) ] for row in text if not remove_empty_rows or True in [ bool ( r ) for r in row ] ] if list_of_list [ 0 ] [ 0 ] == '' and list_of_list [ 0 ] [ - 1 ] == '' : list_of_list = [ row [ 1 : - 1 ] for row in list_of_list ] return cls . list_to_obj ( list_of_list , key_on = key_on , columns = columns , row_columns = row_columns )
def _generate_mix2pl_dataset ( n , m , outfile , useDirichlet = True ) : """Description : Generate a Mixture of 2 Plackett - Luce models dataset and save it to disk . Parameters : n : number of votes to generate m : number of alternatives outfile : open file object to which the dataset is written useDirichlet : boolean flag to use the Dirichlet distribution"""
params , votes = generate_mix2pl_dataset ( n , m , useDirichlet ) outfile . write ( str ( m ) + ',' + str ( n ) + '\n' ) outfile . write ( ',' . join ( map ( str , params ) ) + '\n' ) for vote in votes : outfile . write ( ',' . join ( map ( str , vote ) ) + '\n' ) return ( params , votes )
def get_row_missing ( xc , xd , cdiffs , index , cindices , dindices ) : """Calculate distance between index instance and all other instances ."""
row = np . empty ( 0 , dtype = np . double ) # initialize empty row cinst1 = xc [ index ] # continuous - valued features for index instance dinst1 = xd [ index ] # discrete - valued features for index instance # Boolean mask locating missing values for continuous features for index instance can = cindices [ index ] # Boolean mask locating missing values for discrete features for index instance dan = dindices [ index ] tf = len ( cinst1 ) + len ( dinst1 ) # total number of features . # Progressively compare current instance to all others . Excludes comparison with self indexed instance . ( Building the distance matrix triangle ) . for j in range ( index ) : dist = 0 dinst2 = xd [ j ] # discrete - valued features for compared instance cinst2 = xc [ j ] # continuous - valued features for compared instance # Manage missing values in discrete features # Boolean mask locating missing values for discrete features for compared instance dbn = dindices [ j ] # indexes where there is at least one missing value in the feature between an instance pair . idx = np . unique ( np . append ( dan , dbn ) ) # Number of features excluded from distance calculation due to one or two missing values within instance pair . Used to normalize distance values for comparison . dmc = len ( idx ) d1 = np . delete ( dinst1 , idx ) # delete unique missing features from index instance d2 = np . delete ( dinst2 , idx ) # delete unique missing features from compared instance # Manage missing values in continuous features # Boolean mask locating missing values for continuous features for compared instance cbn = cindices [ j ] # indexes where there is at least one missing value in the feature between an instance pair . idx = np . unique ( np . append ( can , cbn ) ) # Number of features excluded from distance calculation due to one or two missing values within instance pair . Used to normalize distance values for comparison . cmc = len ( idx ) c1 = np . delete ( cinst1 , idx ) # delete unique missing features from index instance c2 = np . delete ( cinst2 , idx ) # delete unique missing features from compared instance # delete unique missing features from continuous value difference scores cdf = np . delete ( cdiffs , idx ) # Add discrete feature distance contributions ( missing values excluded ) - Hamming distance dist += len ( d1 [ d1 != d2 ] ) # Add continuous feature distance contributions ( missing values excluded ) - Manhattan distance ( Note that 0-1 continuous value normalization is included ~ subtraction of minimums cancel out ) dist += np . sum ( np . absolute ( np . subtract ( c1 , c2 ) ) / cdf ) # Normalize distance calculation based on total number of missing values bypassed in either discrete or continuous features . tnmc = tf - dmc - cmc # Total number of unique missing counted # Distance normalized by number of features included in distance sum ( this seeks to handle missing values neutrally in distance calculation ) dist = dist / float ( tnmc ) row = np . append ( row , dist ) return row
def get_feature_layers ( self , input_layer = None , trainable = False , use_weighted_sum = False ) : """Get layers that output the Bi - LM feature . : param input _ layer : Use existing input layer . : param trainable : Whether the layers are still trainable . : param use _ weighted _ sum : Whether to use weighted sum of RNN layers . : return [ input _ layer , ] output _ layer : Input and output layer ."""
model = keras . models . clone_model ( self . model , input_layer ) if not trainable : for layer in model . layers : layer . trainable = False if use_weighted_sum : rnn_layers_forward = list ( map ( lambda x : model . get_layer ( x . name . split ( '/' ) [ 0 ] . split ( ':' ) [ 0 ] . split ( '_' ) [ 0 ] ) . output , self . rnn_layers_forward , ) ) rnn_layers_backward = list ( map ( lambda x : model . get_layer ( x . name . split ( '/' ) [ 0 ] . split ( ':' ) [ 0 ] . split ( '_' ) [ 0 ] ) . output , self . rnn_layers_backward , ) ) forward_layer = WeightedSum ( name = 'Bi-LM-Forward-Sum' ) ( rnn_layers_forward ) backward_layer_rev = WeightedSum ( name = 'Bi-LM-Backward-Sum-Rev' ) ( rnn_layers_backward ) backward_layer = keras . layers . Lambda ( function = self . _reverse_x , mask = lambda _ , mask : self . _reverse_x ( mask ) , name = 'Bi-LM-Backward-Sum' ) ( backward_layer_rev ) else : forward_layer = model . get_layer ( name = 'Bi-LM-Forward' ) . output backward_layer = model . get_layer ( name = 'Bi-LM-Backward' ) . output output_layer = keras . layers . Concatenate ( name = 'Bi-LM-Feature' ) ( [ forward_layer , backward_layer ] ) if input_layer is None : input_layer = model . layers [ 0 ] . input return input_layer , output_layer return output_layer
def p_domain_block ( self , p ) : '''domain _ block : DOMAIN IDENT LCURLY req _ section domain _ list RCURLY'''
d = Domain ( p [ 2 ] , p [ 4 ] , p [ 5 ] ) p [ 0 ] = ( 'domain' , d )
def compare_annotations ( ref_sample , test_sample , window_width , signal = None ) : """Compare a set of reference annotation locations against a set of test annotation locations . See the Comparitor class docstring for more information . Parameters ref _ sample : 1d numpy array Array of reference sample locations test _ sample : 1d numpy array Array of test sample locations to compare window _ width : int The maximum absolute difference in sample numbers that is permitted for matching annotations . signal : 1d numpy array , optional The original signal of the two annotations . Only used for plotting . Returns comparitor : Comparitor object Object containing parameters about the two sets of annotations Examples > > > import wfdb > > > from wfdb import processing > > > sig , fields = wfdb . rdsamp ( ' sample - data / 100 ' , channels = [ 0 ] ) > > > ann _ ref = wfdb . rdann ( ' sample - data / 100 ' , ' atr ' ) > > > xqrs = processing . XQRS ( sig = sig [ : , 0 ] , fs = fields [ ' fs ' ] ) > > > xqrs . detect ( ) > > > comparitor = processing . compare _ annotations ( ann _ ref . sample [ 1 : ] , xqrs . qrs _ inds , int ( 0.1 * fields [ ' fs ' ] ) , sig [ : , 0 ] ) > > > comparitor . print _ summary ( ) > > > comparitor . plot ( )"""
comparitor = Comparitor ( ref_sample = ref_sample , test_sample = test_sample , window_width = window_width , signal = signal ) comparitor . compare ( ) return comparitor
def admin_view_url ( admin_site : AdminSite , obj , view_type : str = "change" , current_app : str = None ) -> str : """Get a Django admin site URL for an object ."""
app_name = obj . _meta . app_label . lower ( ) model_name = obj . _meta . object_name . lower ( ) pk = obj . pk viewname = "admin:{}_{}_{}" . format ( app_name , model_name , view_type ) if current_app is None : current_app = admin_site . name url = reverse ( viewname , args = [ pk ] , current_app = current_app ) return url
def MultiOpen ( self , urns , mode = "rw" , token = None , aff4_type = None , age = NEWEST_TIME , follow_symlinks = True ) : """Opens a bunch of urns efficiently ."""
if not data_store . AFF4Enabled ( ) : raise NotImplementedError ( "AFF4 data store has been disabled." ) if token is None : token = data_store . default_token if mode not in [ "w" , "r" , "rw" ] : raise ValueError ( "Invalid mode %s" % mode ) symlinks = { } _ValidateAFF4Type ( aff4_type ) for urn , values in self . GetAttributes ( urns , age = age ) : try : obj = self . Open ( urn , mode = mode , token = token , local_cache = { urn : values } , age = age , follow_symlinks = False ) # We can ' t pass aff4 _ type to Open since it will raise on AFF4Symlinks . # Setting it here , if needed , so that BadGetAttributeError checking # works . if aff4_type : obj . aff4_type = aff4_type if follow_symlinks and isinstance ( obj , AFF4Symlink ) : target = obj . Get ( obj . Schema . SYMLINK_TARGET ) if target is not None : symlinks . setdefault ( target , [ ] ) . append ( obj . urn ) elif aff4_type : if isinstance ( obj , aff4_type ) : yield obj else : yield obj except IOError : pass if symlinks : for obj in self . MultiOpen ( symlinks , mode = mode , token = token , aff4_type = aff4_type , age = age ) : to_link = symlinks [ obj . urn ] for additional_symlink in to_link [ 1 : ] : clone = obj . __class__ ( obj . urn , clone = obj ) clone . symlink_urn = additional_symlink yield clone obj . symlink_urn = symlinks [ obj . urn ] [ 0 ] yield obj
def register_provider ( self , specification , provider ) : """Registers a : term : ` provider ` ( a : py : class : ` wiring . providers . Provider ` instance ) to be called when an object specified by : term : ` specification ` is needed . If there was already a provider for this specification it is overriden ."""
if provider . scope is not None and provider . scope not in self . scopes : raise UnknownScopeError ( provider . scope ) self . providers [ specification ] = provider
def shell ( ipython_args ) : """Runs a shell in the app context . Runs an interactive Python shell in the context of a given Flask application . The application will populate the default namespace of this shell according to it ' s configuration . This is useful for executing small snippets of management code without having to manually configuring the application ."""
import IPython from IPython . terminal . ipapp import load_default_config from traitlets . config . loader import Config from flask . globals import _app_ctx_stack app = _app_ctx_stack . top . app if 'IPYTHON_CONFIG' in app . config : config = Config ( app . config [ 'IPYTHON_CONFIG' ] ) else : config = load_default_config ( ) config . TerminalInteractiveShell . banner1 = '''Python %s on %s IPython: %s App: %s [%s] Instance: %s''' % ( sys . version , sys . platform , IPython . __version__ , app . import_name , app . env , app . instance_path ) IPython . start_ipython ( argv = ipython_args , user_ns = app . make_shell_context ( ) , config = config , )
def ensure_true ( bool_func : Callable [ [ Any ] , bool ] , message = None ) -> Filter_T : """Validate any object to ensure the result of applying a boolean function to it is True ."""
def validate ( value ) : if bool_func ( value ) is not True : _raise_failure ( message ) return value return validate
def get_draws ( fit , variables = None , ignore = None ) : """Extract draws from PyStan fit ."""
if ignore is None : ignore = [ ] if fit . mode == 1 : msg = "Model in mode 'test_grad'. Sampling is not conducted." raise AttributeError ( msg ) if fit . mode == 2 or fit . sim . get ( "samples" ) is None : msg = "Fit doesn't contain samples." raise AttributeError ( msg ) dtypes = infer_dtypes ( fit ) if variables is None : variables = fit . sim [ "pars_oi" ] elif isinstance ( variables , str ) : variables = [ variables ] variables = list ( variables ) for var , dim in zip ( fit . sim [ "pars_oi" ] , fit . sim [ "dims_oi" ] ) : if var in variables and np . prod ( dim ) == 0 : del variables [ variables . index ( var ) ] ndraws = [ s - w for s , w in zip ( fit . sim [ "n_save" ] , fit . sim [ "warmup2" ] ) ] nchain = len ( fit . sim [ "samples" ] ) # check if the values are in 0 - based ( < = 2.17 ) or 1 - based indexing ( > = 2.18) shift = 1 if any ( fit . sim [ "dims_oi" ] ) : # choose variable with lowest number of dims > 1 par_idx = min ( ( dim , i ) for i , dim in enumerate ( fit . sim [ "dims_oi" ] ) if dim ) [ 1 ] offset = int ( sum ( map ( np . product , fit . sim [ "dims_oi" ] [ : par_idx ] ) ) ) par_offset = int ( np . product ( fit . sim [ "dims_oi" ] [ par_idx ] ) ) par_keys = fit . sim [ "fnames_oi" ] [ offset : offset + par_offset ] shift = len ( par_keys ) for item in par_keys : _ , shape = item . replace ( "]" , "" ) . split ( "[" ) shape_idx_min = min ( int ( shape_value ) for shape_value in shape . split ( "," ) ) if shape_idx_min < shift : shift = shape_idx_min # If shift is higher than 1 , this will probably mean that Stan # has implemented sparse structure ( saves only non - zero parts ) , # but let ' s hope that dims are still corresponding the full shape shift = int ( min ( shift , 1 ) ) var_keys = OrderedDict ( ( var , [ ] ) for var in fit . sim [ "pars_oi" ] ) for key in fit . sim [ "fnames_oi" ] : var , * tails = key . split ( "[" ) loc = [ Ellipsis ] for tail in tails : loc = [ ] for i in tail [ : - 1 ] . split ( "," ) : loc . append ( int ( i ) - shift ) var_keys [ var ] . append ( ( key , loc ) ) shapes = dict ( zip ( fit . sim [ "pars_oi" ] , fit . sim [ "dims_oi" ] ) ) variables = [ var for var in variables if var not in ignore ] data = OrderedDict ( ) for var in variables : if var in data : continue keys_locs = var_keys . get ( var , [ ( var , [ Ellipsis ] ) ] ) shape = shapes . get ( var , [ ] ) dtype = dtypes . get ( var ) ndraw = max ( ndraws ) ary_shape = [ nchain , ndraw ] + shape ary = np . empty ( ary_shape , dtype = dtype , order = "F" ) for chain , ( pyholder , ndraw ) in enumerate ( zip ( fit . sim [ "samples" ] , ndraws ) ) : axes = [ chain , slice ( None ) ] for key , loc in keys_locs : ary_slice = tuple ( axes + loc ) ary [ ary_slice ] = pyholder . chains [ key ] [ - ndraw : ] data [ var ] = ary return data
def signal ( sig , action ) : """The point of this module and method is to decouple signal handlers from each other . Standard way to deal with handlers is to always store the old handler and call it . It creates a chain of handlers , making it impossible to later remove the handler . This method behaves like signal . signal ( ) from standard python library . It always returns SIG _ DFL indicating that the new handler is not supposed to call the old one ."""
assert callable ( action ) , ( "Second argument of signal() needs to be a " "callable, got %r instead" % ( action , ) ) global _handlers _install_handler ( sig ) if action in _handlers [ sig ] : log . debug ( 'signal' , "Handler for signal %s already registered. %r" , sig , action ) return SIG_DFL _handlers [ sig ] [ 1 ] . append ( action ) return SIG_DFL