signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def mtFeatureExtraction ( signal , fs , mt_win , mt_step , st_win , st_step ) :
"""Mid - term feature extraction"""
|
mt_win_ratio = int ( round ( mt_win / st_step ) )
mt_step_ratio = int ( round ( mt_step / st_step ) )
mt_features = [ ]
st_features , f_names = stFeatureExtraction ( signal , fs , st_win , st_step )
n_feats = len ( st_features )
n_stats = 2
mt_features , mid_feature_names = [ ] , [ ]
# for i in range ( n _ stats * n _ feats + 1 ) :
for i in range ( n_stats * n_feats ) :
mt_features . append ( [ ] )
mid_feature_names . append ( "" )
for i in range ( n_feats ) : # for each of the short - term features :
cur_p = 0
N = len ( st_features [ i ] )
mid_feature_names [ i ] = f_names [ i ] + "_" + "mean"
mid_feature_names [ i + n_feats ] = f_names [ i ] + "_" + "std"
while ( cur_p < N ) :
N1 = cur_p
N2 = cur_p + mt_win_ratio
if N2 > N :
N2 = N
cur_st_feats = st_features [ i ] [ N1 : N2 ]
mt_features [ i ] . append ( numpy . mean ( cur_st_feats ) )
mt_features [ i + n_feats ] . append ( numpy . std ( cur_st_feats ) )
# mt _ features [ i + 2 * n _ feats ] . append ( numpy . std ( cur _ st _ feats ) / ( numpy . mean ( cur _ st _ feats ) + 0.0000010 ) )
cur_p += mt_step_ratio
return numpy . array ( mt_features ) , st_features , mid_feature_names
|
def update ( self , validate = False ) :
"""Update the DB instance ' s status information by making a call to fetch
the current instance attributes from the service .
: type validate : bool
: param validate : By default , if EC2 returns no data about the
instance the update method returns quietly . If
the validate param is True , however , it will
raise a ValueError exception if no data is
returned from EC2."""
|
rs = self . connection . get_all_dbinstances ( self . id )
if len ( rs ) > 0 :
for i in rs :
if i . id == self . id :
self . __dict__ . update ( i . __dict__ )
elif validate :
raise ValueError ( '%s is not a valid Instance ID' % self . id )
return self . status
|
def hydrate ( self , broker = None ) :
"""Loads a Broker from a previously saved one . A Broker is created if one
isn ' t provided ."""
|
broker = broker or dr . Broker ( )
for path in glob ( os . path . join ( self . meta_data , "*" ) ) :
try :
with open ( path ) as f :
doc = ser . load ( f )
res = self . _hydrate_one ( doc )
comp , results , exec_time , ser_time = res
if results :
broker [ comp ] = results
broker . exec_times [ comp ] = exec_time + ser_time
except Exception as ex :
log . warning ( ex )
return broker
|
def add_local_node ( self , child_node , name = None ) :
"""Append a child that should alter the locals of this scope node .
: param child _ node : The child node that will alter locals .
: type child _ node : NodeNG
: param name : The name of the local that will be altered by
the given child node .
: type name : str or None"""
|
if name != "__class__" : # add _ _ class _ _ node as a child will cause infinite recursion later !
self . _append_node ( child_node )
self . set_local ( name or child_node . name , child_node )
|
def xorc_constraint ( v = 0 , sense = "maximize" ) :
"""XOR ( r as variable ) custom constraint"""
|
assert v in [ 0 , 1 ] , "v must be 0 or 1 instead of %s" % v . __repr__ ( )
model , x , y , z = _init ( )
r = model . addVar ( "r" , "B" )
n = model . addVar ( "n" , "I" )
# auxiliary
model . addCons ( r + quicksum ( [ x , y , z ] ) == 2 * n )
model . addCons ( x == v )
model . setObjective ( r , sense = sense )
_optimize ( "Custom XOR (as variable)" , model )
|
def orbit ( self ) :
"""Convert TLE to Orbit object , in order to make computations on it
Return :
~ beyond . orbits . orbit . Orbit :"""
|
data = { 'bstar' : self . bstar , 'ndot' : self . ndot , 'ndotdot' : self . ndotdot , 'tle' : self . text }
return Orbit ( self . epoch , self . to_list ( ) , "TLE" , "TEME" , 'Sgp4' , ** data )
|
def download_configuration ( self ) -> str :
"""downloads the current configuration from the cloud
Returns
the downloaded configuration or an errorCode"""
|
return self . _restCall ( "home/getCurrentState" , json . dumps ( self . _connection . clientCharacteristics ) )
|
def generate_identity_binding_access_token ( self , name , scope , jwt , retry = google . api_core . gapic_v1 . method . DEFAULT , timeout = google . api_core . gapic_v1 . method . DEFAULT , metadata = None , ) :
"""Exchange a JWT signed by third party identity provider to an OAuth 2.0
access token
Example :
> > > from google . cloud import iam _ credentials _ v1
> > > client = iam _ credentials _ v1 . IAMCredentialsClient ( )
> > > name = client . service _ account _ path ( ' [ PROJECT ] ' , ' [ SERVICE _ ACCOUNT ] ' )
> > > # TODO : Initialize ` scope ` :
> > > scope = [ ]
> > > # TODO : Initialize ` jwt ` :
> > > jwt = ' '
> > > response = client . generate _ identity _ binding _ access _ token ( name , scope , jwt )
Args :
name ( str ) : The resource name of the service account for which the credentials are
requested , in the following format :
` ` projects / - / serviceAccounts / { ACCOUNT _ EMAIL _ OR _ UNIQUEID } ` ` .
scope ( list [ str ] ) : Code to identify the scopes to be included in the OAuth 2.0 access token .
See https : / / developers . google . com / identity / protocols / googlescopes for more
information .
At least one value required .
jwt ( str ) : Required . Input token . Must be in JWT format according to RFC7523
( https : / / tools . ietf . org / html / rfc7523 ) and must have ' kid ' field in the
header . Supported signing algorithms : RS256 ( RS512 , ES256 , ES512 coming
soon ) . Mandatory payload fields ( along the lines of RFC 7523 , section
- iss : issuer of the token . Must provide a discovery document at
$ iss / . well - known / openid - configuration . The document needs to be
formatted according to section 4.2 of the OpenID Connect Discovery
1.0 specification .
- iat : Issue time in seconds since epoch . Must be in the past .
- exp : Expiration time in seconds since epoch . Must be less than 48
hours after iat . We recommend to create tokens that last shorter than
6 hours to improve security unless business reasons mandate longer
expiration times . Shorter token lifetimes are generally more secure
since tokens that have been exfiltrated by attackers can be used for
a shorter time . you can configure the maximum lifetime of the
incoming token in the configuration of the mapper . The resulting
Google token will expire within an hour or at " exp " , whichever is
earlier .
- sub : JWT subject , identity asserted in the JWT .
- aud : Configured in the mapper policy . By default the service account
email .
Claims from the incoming token can be transferred into the output token
accoding to the mapper configuration . The outgoing claim size is
limited . Outgoing claims size must be less than 4kB serialized as JSON
without whitespace .
Example header : { " alg " : " RS256 " , " kid " :
"92a4265e14ab04d4d228a48d10d4ca31610936f8 " } Example payload : { " iss " :
" https : / / accounts . google . com " , " iat " : 1517963104 , " exp " : 1517966704,
" aud " : " https : / / iamcredentials . googleapis . com / " , " sub " :
"113475438248934895348 " , " my \ _ claims " : { " additional \ _ claim " : " value " }
retry ( Optional [ google . api _ core . retry . Retry ] ) : A retry object used
to retry requests . If ` ` None ` ` is specified , requests will not
be retried .
timeout ( Optional [ float ] ) : The amount of time , in seconds , to wait
for the request to complete . Note that if ` ` retry ` ` is
specified , the timeout applies to each individual attempt .
metadata ( Optional [ Sequence [ Tuple [ str , str ] ] ] ) : Additional metadata
that is provided to the method .
Returns :
A : class : ` ~ google . cloud . iam _ credentials _ v1 . types . GenerateIdentityBindingAccessTokenResponse ` instance .
Raises :
google . api _ core . exceptions . GoogleAPICallError : If the request
failed for any reason .
google . api _ core . exceptions . RetryError : If the request failed due
to a retryable error and retry attempts failed .
ValueError : If the parameters are invalid ."""
|
# Wrap the transport method to add retry and timeout logic .
if "generate_identity_binding_access_token" not in self . _inner_api_calls :
self . _inner_api_calls [ "generate_identity_binding_access_token" ] = google . api_core . gapic_v1 . method . wrap_method ( self . transport . generate_identity_binding_access_token , default_retry = self . _method_configs [ "GenerateIdentityBindingAccessToken" ] . retry , default_timeout = self . _method_configs [ "GenerateIdentityBindingAccessToken" ] . timeout , client_info = self . _client_info , )
request = common_pb2 . GenerateIdentityBindingAccessTokenRequest ( name = name , scope = scope , jwt = jwt )
if metadata is None :
metadata = [ ]
metadata = list ( metadata )
try :
routing_header = [ ( "name" , name ) ]
except AttributeError :
pass
else :
routing_metadata = google . api_core . gapic_v1 . routing_header . to_grpc_metadata ( routing_header )
metadata . append ( routing_metadata )
return self . _inner_api_calls [ "generate_identity_binding_access_token" ] ( request , retry = retry , timeout = timeout , metadata = metadata )
|
def ip_to_bytes ( ip_str , big_endian = True ) :
"""Converts an IP given as a string to a byte sequence"""
|
if big_endian :
code = '>L'
else :
code = '<L'
return bytes ( struct . unpack ( code , socket . inet_aton ( ip_str ) ) [ 0 ] )
|
def activate_scene ( self , scene_uuid , duration = 1.0 ) :
"""Activate a scene .
See http : / / api . developer . lifx . com / docs / activate - scene
scene _ uuid : required String
The UUID for the scene you wish to activate
duration : Double
The time in seconds to spend performing the scene transition .
default : 1.0"""
|
argument_tuples = [ ( "duration" , duration ) , ]
return self . client . perform_request ( method = 'put' , endpoint = 'scenes/scene_id:{}/activate' , endpoint_args = [ scene_uuid ] , argument_tuples = argument_tuples )
|
def write ( self , buf ) :
"""Inserts a string buffer as a record .
Examples
> > > record = mx . recordio . MXRecordIO ( ' tmp . rec ' , ' w ' )
> > > for i in range ( 5 ) :
. . . record . write ( ' record _ % d ' % i )
> > > record . close ( )
Parameters
buf : string ( python2 ) , bytes ( python3)
Buffer to write ."""
|
assert self . writable
self . _check_pid ( allow_reset = False )
check_call ( _LIB . MXRecordIOWriterWriteRecord ( self . handle , ctypes . c_char_p ( buf ) , ctypes . c_size_t ( len ( buf ) ) ) )
|
def to_text ( self , tree , force_root = False ) :
"""Extract text from tags .
Skip any selectors specified and include attributes if specified .
Ignored tags will not have their attributes scanned either ."""
|
self . extract_tag_metadata ( tree )
text = [ ]
attributes = [ ]
comments = [ ]
blocks = [ ]
if not ( self . ignores . match ( tree ) if self . ignores else None ) : # The root of the document is the BeautifulSoup object
capture = self . captures . match ( tree ) if self . captures is not None else None
# Check attributes for normal tags
if capture :
for attr in self . attributes :
value = tree . attrs . get ( attr , '' ) . strip ( )
if value :
sel = self . construct_selector ( tree , attr = attr )
attributes . append ( ( value , sel ) )
# Walk children
for child in tree . children :
string = str ( child ) . strip ( )
is_comment = isinstance ( child , bs4 . Comment )
if isinstance ( child , bs4 . element . Tag ) :
t , b , a , c = self . to_text ( child )
text . extend ( t )
attributes . extend ( a )
comments . extend ( c )
blocks . extend ( b )
# Get content if not the root and not a comment ( unless we want comments ) .
elif not isinstance ( child , NON_CONTENT ) and ( not is_comment or self . comments ) :
string = str ( child ) . strip ( )
if string :
if is_comment :
sel = self . construct_selector ( tree ) + '<!--comment-->'
comments . append ( ( string , sel ) )
elif capture :
text . append ( string )
text . append ( ' ' )
elif self . comments :
for child in tree . descendants :
if isinstance ( child , bs4 . Comment ) :
string = str ( child ) . strip ( )
if string :
sel = self . construct_selector ( tree ) + '<!--comment-->'
comments . append ( ( string , sel ) )
text = self . store_blocks ( tree , blocks , text , force_root )
if tree . parent is None or force_root :
return blocks , attributes , comments
else :
return text , blocks , attributes , comments
|
def nearest_material ( name , complete = False ) :
r'''Returns the nearest hit to a given name from from dictionaries of
building , insulating , or refractory material from tables in [ 1 ] _ , [ 2 ] _ ,
and [ 3 ] _ . Function will pick the closest match based on a fuzzy search .
if ` complete ` is True , will only return hits with all three of density ,
heat capacity , and thermal conductivity available .
Parameters
name : str
Search keywords to be used by difflib function
complete : bool , optional
If True , returns only hits with all parameters available
Returns
ID : str
A key to one of the dictionaries mentioned above
Examples
> > > nearest _ material ( ' stainless steel ' )
' Metals , stainless steel '
References
. . [ 1 ] ASHRAE Handbook : Fundamentals . American Society of Heating ,
Refrigerating and Air - Conditioning Engineers , Incorporated , 2013.
. . [ 2 ] DIN EN 12524 ( 2000-07 ) Building Materials and Products
Hygrothermal Properties - Tabulated Design Values ; English Version of
DIN EN 12524.
. . [ 3 ] Gesellschaft , V . D . I . , ed . VDI Heat Atlas . 2nd edition .
Berlin ; New York : : Springer , 2010.'''
|
if complete :
hits = difflib . get_close_matches ( name , materials_dict . keys ( ) , n = 1000 , cutoff = 0 )
for hit in hits :
if materials_dict [ hit ] == 1 or materials_dict [ hit ] == 3 or ( ASHRAE [ hit ] [ 0 ] and ASHRAE [ hit ] [ 1 ] ) :
return hit
else :
ID = difflib . get_close_matches ( name , materials_dict . keys ( ) , n = 1 , cutoff = 0.6 )
if not ID :
ID = difflib . get_close_matches ( name , materials_dict . keys ( ) , n = 1 , cutoff = 0.3 )
if not ID :
ID = difflib . get_close_matches ( name , materials_dict . keys ( ) , n = 1 , cutoff = 0 )
return ID [ 0 ]
|
def get_params ( self , ctx ) :
"""Sort order of options before displaying .
: param click . core . Context ctx : Click context .
: return : super ( ) return value ."""
|
self . params . sort ( key = self . custom_sort )
return super ( ClickGroup , self ) . get_params ( ctx )
|
def log_url ( self , url_data , priority = None ) :
"""Log URL data in sitemap format ."""
|
self . xml_starttag ( u'url' )
self . xml_tag ( u'loc' , url_data . url )
if url_data . modified :
self . xml_tag ( u'lastmod' , self . format_modified ( url_data . modified , sep = "T" ) )
self . xml_tag ( u'changefreq' , self . frequency )
self . xml_tag ( u'priority' , "%.2f" % priority )
self . xml_endtag ( u'url' )
self . flush ( )
|
def load_code ( fp , magic_int , code_objects = { } ) :
"""marshal . load ( ) written in Python . When the Python bytecode magic loaded is the
same magic for the running Python interpreter , we can simply use the
Python - supplied marshal . load ( ) .
However we need to use this when versions are different since the internal
code structures are different . Sigh ."""
|
global internStrings , internObjects
internStrings = [ ]
internObjects = [ ]
seek_pos = fp . tell ( )
# Do a sanity check . Is this a code type ?
b = ord ( fp . read ( 1 ) )
if ( b & 0x80 ) :
b = b & 0x7f
c = chr ( b )
if c != 'c' :
raise TypeError ( "File %s doesn't smell like Python bytecode:\n" "expecting code indicator 'c'; got '%s'" % ( fp . name , c ) )
fp . seek ( seek_pos )
return load_code_internal ( fp , magic_int , code_objects = code_objects )
|
def _get_span_name ( servicer_context ) :
"""Generates a span name based off of the gRPC server rpc _ request _ info"""
|
method_name = servicer_context . _rpc_event . call_details . method [ 1 : ]
if isinstance ( method_name , bytes ) :
method_name = method_name . decode ( 'utf-8' )
method_name = method_name . replace ( '/' , '.' )
return '{}.{}' . format ( RECV_PREFIX , method_name )
|
def conv2d ( self , x_in : Connection , w_in : Connection , receptive_field_size , filters_number , stride = 1 , padding = 1 , name = "" ) :
"""Computes a 2 - D convolution given 4 - D input and filter tensors ."""
|
x_cols = self . tensor_3d_to_cols ( x_in , receptive_field_size , stride = stride , padding = padding )
mul = self . transpose ( self . matrix_multiply ( x_cols , w_in ) , 0 , 2 , 1 )
# output _ width = self . sum ( self . div ( self . sum ( self . sum ( self . shape ( x _ in , 2 ) , self . constant ( - 1 * receptive _ field _ size ) ) ,
# self . constant ( 2 * padding ) ) , self . constant ( stride ) ) , self . constant ( 1 ) )
# output _ height = ( h - f + 2 * p ) / s + 1
output = self . reshape ( mul , ( - 1 , filters_number , receptive_field_size , receptive_field_size ) )
output . name = name
return output
|
def dismiss ( self , member_ids ) :
"""踢人 . 注意别把自己给踢了 .
: param member _ ids : 组员 ids
: return : bool"""
|
url = 'http://www.shanbay.com/api/v1/team/member/'
data = { 'action' : 'dispel' , }
if isinstance ( member_ids , ( list , tuple ) ) :
data [ 'ids' ] = ',' . join ( map ( str , member_ids ) )
else :
data [ 'ids' ] = member_ids
r = self . request ( url , 'put' , data = data )
try :
return r . json ( ) [ 'msg' ] == "SUCCESS"
except Exception as e :
logger . exception ( e )
return False
|
def by_id ( self , id ) :
"""get adapter data by its id ."""
|
path = partial ( _path , self . adapter )
path = path ( id )
return self . _get ( path )
|
def __gen_token_anno_file ( self , top_level_layer ) :
"""creates an etree representation of a < multiFeat > file that describes
all the annotations that only span one token ( e . g . POS , lemma etc . ) .
Note : discoursegraphs will create one token annotation file for each
top level layer ( e . g . conano , tiger etc . ) ."""
|
base_paula_id = '{0}.{1}.tok' . format ( self . corpus_name , self . name )
paula_id = '{0}.{1}.{2}.tok_multiFeat' . format ( top_level_layer , self . corpus_name , self . name )
E , tree = gen_paula_etree ( paula_id )
mflist = E ( 'multiFeatList' , { XMLBASE : base_paula_id + '.xml' } )
for token_id in self . dg . tokens :
mfeat = E ( 'multiFeat' , { XLINKHREF : '#{0}' . format ( token_id ) } )
token_dict = self . dg . node [ token_id ]
for feature in token_dict : # TODO : highly inefficient ! refactor ! 1 ! !
if feature not in IGNORED_TOKEN_ATTRIBS and feature . startswith ( top_level_layer ) :
mfeat . append ( E ( 'feat' , { 'name' : feature , 'value' : token_dict [ feature ] } ) )
if self . human_readable : # adds token string as a < ! - - comment - - >
mfeat . append ( Comment ( token_dict [ self . dg . ns + ':token' ] ) )
mflist . append ( mfeat )
tree . append ( mflist )
self . files [ paula_id ] = tree
self . file2dtd [ paula_id ] = PaulaDTDs . multifeat
return paula_id
|
def Send ( self , command_id , data = b'' , size = 0 ) :
"""Send / buffer FileSync packets .
Packets are buffered and only flushed when this connection is read from . All
messages have a response from the device , so this will always get flushed .
Args :
command _ id : Command to send .
data : Optional data to send , must set data or size .
size : Optionally override size from len ( data ) ."""
|
if data :
if not isinstance ( data , bytes ) :
data = data . encode ( 'utf8' )
size = len ( data )
if not self . _CanAddToSendBuffer ( len ( data ) ) :
self . _Flush ( )
buf = struct . pack ( b'<2I' , self . id_to_wire [ command_id ] , size ) + data
self . send_buffer [ self . send_idx : self . send_idx + len ( buf ) ] = buf
self . send_idx += len ( buf )
|
def assets ( self , asset_type = None ) :
"""Retrieves all of the assets of a given asset _ type
Args :
asset _ type : ( str ) Either None , PHONE , HANDLER , or URL
Returns :"""
|
if not self . can_update ( ) :
self . _tcex . handle_error ( 910 , [ self . type ] )
if not asset_type :
return self . tc_requests . adversary_assets ( self . api_type , self . api_sub_type , self . unique_id )
if asset_type == 'PHONE' :
return self . tc_requests . adversary_phone_assets ( self . api_type , self . api_sub_type , self . unique_id )
if asset_type == 'HANDLER' :
return self . tc_requests . adversary_handle_assets ( self . api_type , self . api_sub_type , self . unique_id )
if asset_type == 'URL' :
return self . tc_requests . adversary_url_assets ( self . api_type , self . api_sub_type , self . unique_id )
self . _tcex . handle_error ( 925 , [ 'asset_type' , 'assets' , 'asset_type' , 'asset_type' , asset_type ] )
return None
|
def read_buf ( self ) :
"""Read database file"""
|
with open ( self . filepath , 'rb' ) as handler :
try :
buf = handler . read ( )
# There should be a header at least
if len ( buf ) < 124 :
raise KPError ( 'Unexpected file size. It should be more or' 'equal 124 bytes but it is ' '{0}!' . format ( len ( buf ) ) )
except :
raise
return buf
|
def read_args ( src , args = None ) :
r"""Read all arguments from buffer .
Advances buffer until end of last valid arguments . There can be any number
of whitespace characters between command and the first argument .
However , after that first argument , the command can only tolerate one
successive line break , before discontinuing the chain of arguments .
: param TexArgs args : existing arguments to extend
: return : parsed arguments
: rtype : TexArgs"""
|
args = args or TexArgs ( )
# Unlimited whitespace before first argument
candidate_index = src . num_forward_until ( lambda s : not s . isspace ( ) )
while src . peek ( ) . isspace ( ) :
args . append ( read_tex ( src ) )
# Restricted to only one line break after first argument
line_breaks = 0
while src . peek ( ) in ARG_START_TOKENS or ( src . peek ( ) . isspace ( ) and line_breaks == 0 ) :
space_index = src . num_forward_until ( lambda s : not s . isspace ( ) )
if space_index > 0 :
line_breaks += 1
if src . peek ( ( 0 , space_index ) ) . count ( "\n" ) <= 1 and src . peek ( space_index ) in ARG_START_TOKENS :
args . append ( read_tex ( src ) )
else :
line_breaks = 0
tex_text = read_tex ( src )
args . append ( tex_text )
if not args :
src . backward ( candidate_index )
return args
|
def message ( self , * args , ** kwargs ) :
"""Build a message . This method is useful if you want to
compose actors . See the actor composition documentation for
details .
Parameters :
* args ( tuple ) : Positional arguments to send to the actor .
* * kwargs ( dict ) : Keyword arguments to send to the actor .
Examples :
> > > ( add . message ( 1 , 2 ) | add . message ( 3 ) )
pipeline ( [ add ( 1 , 2 ) , add ( 3 ) ] )
Returns :
Message : A message that can be enqueued on a broker ."""
|
return self . message_with_options ( args = args , kwargs = kwargs )
|
def genty_repeat ( count ) :
"""To use in conjunction with a TestClass wrapped with @ genty .
Runs the wrapped test ' count ' times :
@ genty _ repeat ( count )
def test _ some _ function ( self )
Can also wrap a test already decorated with @ genty _ dataset
@ genty _ repeat ( 3)
@ genty _ dataset ( True , False )
def test _ some _ _ other _ function ( self , bool _ value ) :
This will run 6 tests in total , 3 each of the True and False cases .
: param count :
The number of times to run the test .
: type count :
` int `"""
|
if count < 0 :
raise ValueError ( "Really? Can't have {0} iterations. Please pick a value >= 0." . format ( count ) )
def wrap ( test_method ) :
test_method . genty_repeat_count = count
return test_method
return wrap
|
async def _run ( self ) :
"""后台任务更新时间戳和重置序号"""
|
tick_gen = _task_idle_ticks ( 0.5 * self . _shard_ttl )
self . _is_running = True
self . _ready_event . clear ( )
while True :
try :
await self . _lease_shard ( )
break
except grpc . RpcError as exc :
nap = _rand_uniform ( 3 , 15 )
logger . warn ( f'failed in gRPC [{exc.code()}]: {exc.details()} ' f'. napping {nap:.0f} secs ...' )
if await self . _continueAfterSleep ( nap ) :
continue
else :
return
assert self . _shard_id is not None
try :
while self . _is_running :
self . _ready_event . clear ( )
try :
await self . _renew_timestamp ( )
await self . _keepalive_shard ( )
except grpc . RpcError as exc : # exc . code ( ) = = grpc . StatusCode . UNAVAILABLE
nap = _rand_uniform ( 3 , 15 )
logger . warn ( f'failed in grpc[{exc.code()}]: {exc.details()}' f', napping {nap:.0f}secs ...' )
if await self . _continueAfterSleep ( nap ) :
continue
else :
break
self . _ready_event . set ( )
if await self . _continueAfterSleep ( next ( tick_gen ) ) :
continue
else :
break
except asyncio . CancelledError :
pass
except Exception :
logger . error ( f'Error in shard#{self._shard_id}:' , exc_info = True )
finally :
self . _ready_event . clear ( )
await self . _lease . revoke ( )
# 取消租约
logger . debug ( f'shard#{self._shard_id}, the lease revoked' )
|
def vertical_path ( size ) :
"""Creates a generator for progressing vertically through an image .
: param size : A tuple ( width , height ) of the image size
: return : A generator that yields a set of columns through the image .
Each column is a generator that yields pixel coordinates ."""
|
width , height = size
return ( ( ( x , y ) for y in range ( height ) ) for x in range ( width ) )
|
def DeleteAttributes ( self , subject , attributes , start = None , end = None , sync = True ) :
"""Remove some attributes from a subject ."""
|
_ = sync
# Unused
if not attributes :
return
if isinstance ( attributes , string_types ) :
raise ValueError ( "String passed to DeleteAttributes (non string iterable expected)." )
for attribute in attributes :
timestamp = self . _MakeTimestamp ( start , end )
attribute = utils . SmartUnicode ( attribute )
queries = self . _BuildDelete ( subject , attribute , timestamp )
self . _ExecuteQueries ( queries )
|
def get_data ( source , fields = '*' , env = None , first_row = 0 , count = - 1 , schema = None ) :
"""A utility function to get a subset of data from a Table , Query , Pandas dataframe or List .
Args :
source : the source of the data . Can be a Table , Pandas DataFrame , List of dictionaries or
lists , or a string , in which case it is expected to be the name of a table in BQ .
fields : a list of fields that we want to return as a list of strings , comma - separated string ,
or ' * ' for all .
env : if the data source is a Query module , this is the set of variable overrides for
parameterizing the Query .
first _ row : the index of the first row to return ; default 0 . Onl ; y used if count is non - negative .
count : the number or rows to return . If negative ( the default ) , return all rows .
schema : the schema of the data . Optional ; if supplied this can be used to help do type - coercion .
Returns :
A tuple consisting of a dictionary and a count ; the dictionary has two entries : ' cols '
which is a list of column metadata entries for Google Charts , and ' rows ' which is a list of
lists of values . The count is the total number of rows in the source ( independent of the
first _ row / count parameters ) .
Raises :
Exception if the request could not be fulfilled ."""
|
ipy = IPython . get_ipython ( )
if env is None :
env = { }
env . update ( ipy . user_ns )
if isinstance ( source , basestring ) :
source = datalab . utils . get_item ( ipy . user_ns , source , source )
if isinstance ( source , basestring ) :
source = datalab . bigquery . Table ( source )
if isinstance ( source , types . ModuleType ) or isinstance ( source , datalab . data . SqlStatement ) :
source = datalab . bigquery . Query ( source , values = env )
if isinstance ( source , list ) :
if len ( source ) == 0 :
return _get_data_from_empty_list ( source , fields , first_row , count , schema )
elif isinstance ( source [ 0 ] , dict ) :
return _get_data_from_list_of_dicts ( source , fields , first_row , count , schema )
elif isinstance ( source [ 0 ] , list ) :
return _get_data_from_list_of_lists ( source , fields , first_row , count , schema )
else :
raise Exception ( "To get tabular data from a list it must contain dictionaries or lists." )
elif isinstance ( source , pandas . DataFrame ) :
return _get_data_from_dataframe ( source , fields , first_row , count , schema )
elif ( isinstance ( source , google . datalab . bigquery . Query ) or isinstance ( source , google . datalab . bigquery . Table ) ) :
return google . datalab . utils . commands . _utils . get_data ( source , fields , env , first_row , count , schema )
elif isinstance ( source , datalab . bigquery . Query ) :
return _get_data_from_table ( source . results ( ) , fields , first_row , count , schema )
elif isinstance ( source , datalab . bigquery . Table ) :
return _get_data_from_table ( source , fields , first_row , count , schema )
else :
raise Exception ( "Cannot chart %s; unsupported object type" % source )
|
def from_array ( cls , content_type , extensions = [ ] , encoding = None , system = None , is_obsolete = False , docs = None , url = None , is_registered = False ) :
"""Creates a MIME : : Type from an array in the form of :
[ type - name , [ extensions ] , encoding , system ]
+ extensions + , + encoding + , and + system + are optional .
Type . from _ array ( " application / x - ruby " , [ ' rb ' ] , ' 8bit ' )
# Type . from _ array ( [ " application / x - ruby " , [ ' rb ' ] , ' 8bit ' ] )
These are equivalent to :
type = Type ( ' application / x - ruby ' )
type . extensions = [ ' rb ' ]
type . encoding = ' 8bit '"""
|
mt = cls ( content_type )
mt . extensions = extensions
mt . encoding = encoding
mt . system = system
mt . is_obsolete = is_obsolete
mt . docs = docs
mt . url = url
mt . registered = is_registered
return mt
|
def get_support ( variables , polynomial ) :
"""Gets the support of a polynomial ."""
|
support = [ ]
if is_number_type ( polynomial ) :
support . append ( [ 0 ] * len ( variables ) )
return support
for monomial in polynomial . expand ( ) . as_coefficients_dict ( ) :
tmp_support = [ 0 ] * len ( variables )
mon , _ = __separate_scalar_factor ( monomial )
symbolic_support = flatten ( split_commutative_parts ( mon ) )
for s in symbolic_support :
if isinstance ( s , Pow ) :
base = s . base
if is_adjoint ( base ) :
base = base . adjoint ( )
tmp_support [ variables . index ( base ) ] = s . exp
elif is_adjoint ( s ) :
tmp_support [ variables . index ( s . adjoint ( ) ) ] = 1
elif isinstance ( s , ( Operator , Symbol ) ) :
tmp_support [ variables . index ( s ) ] = 1
support . append ( tmp_support )
return support
|
def timescales_ ( self ) :
"""Implied relaxation timescales of the model .
The relaxation of any initial distribution towards equilibrium is
given , according to this model , by a sum of terms - - each corresponding
to the relaxation along a specific direction ( eigenvector ) in state
space - - which decay exponentially in time . See equation 19 . from [ 1 ] .
Returns
timescales : array - like , shape = ( n _ timescales , )
The longest implied relaxation timescales of the model , expressed
in units of time - step between indices in the source data supplied
to ` ` fit ( ) ` ` .
References
. . [ 1 ] Prinz , Jan - Hendrik , et al . " Markov models of molecular kinetics :
Generation and validation . " J . Chem . Phys . 134.17 ( 2011 ) : 174105."""
|
u , lv , rv = self . _get_eigensystem ( )
# make sure to leave off equilibrium distribution
with np . errstate ( invalid = 'ignore' , divide = 'ignore' ) :
timescales = - self . lag_time / np . log ( u [ 1 : ] )
return timescales
|
def p_expr_LT_expr ( p ) :
"""expr : expr LT expr"""
|
p [ 0 ] = make_binary ( p . lineno ( 2 ) , 'LT' , p [ 1 ] , p [ 3 ] , lambda x , y : x < y )
|
def activate ( self , branches , exclusive = False ) :
"""Activate branches
Parameters
branches : str or list
branch or list of branches to activate
exclusive : bool , optional ( default = False )
if True deactivate the remaining branches"""
|
if exclusive :
self . SetBranchStatus ( '*' , 0 )
if isinstance ( branches , string_types ) :
branches = [ branches ]
for branch in branches :
if '*' in branch :
matched_branches = self . glob ( branch )
for b in matched_branches :
self . SetBranchStatus ( b , 1 )
elif self . has_branch ( branch ) :
self . SetBranchStatus ( branch , 1 )
|
def _iter_chunk_offsets ( self ) :
"""Generate a ( chunk _ type , chunk _ offset ) 2 - tuple for each of the chunks
in the PNG image stream . Iteration stops after the IEND chunk is
returned ."""
|
chunk_offset = 8
while True :
chunk_data_len = self . _stream_rdr . read_long ( chunk_offset )
chunk_type = self . _stream_rdr . read_str ( 4 , chunk_offset , 4 )
data_offset = chunk_offset + 8
yield chunk_type , data_offset
if chunk_type == 'IEND' :
break
# incr offset for chunk len long , chunk type , chunk data , and CRC
chunk_offset += ( 4 + 4 + chunk_data_len + 4 )
|
def checkout ( self , transparent = False , ** kwargs ) :
"""create a pagseguro checkout"""
|
self . data [ 'currency' ] = self . config . CURRENCY
self . build_checkout_params ( ** kwargs )
if transparent :
response = self . post ( url = self . config . TRANSPARENT_CHECKOUT_URL )
else :
response = self . post ( url = self . config . CHECKOUT_URL )
return PagSeguroCheckoutResponse ( response . content , config = self . config )
|
def order_by ( self , key ) :
"""Returns new Enumerable sorted in ascending order by given key
: param key : key to sort by as lambda expression
: return : new Enumerable object"""
|
if key is None :
raise NullArgumentError ( u"No key for sorting given" )
kf = [ OrderingDirection ( key , reverse = False ) ]
return SortedEnumerable ( key_funcs = kf , data = self . _data )
|
def addAnalyses ( self , analyses ) :
"""Adds a collection of analyses to the Worksheet at once"""
|
actions_pool = ActionHandlerPool . get_instance ( )
actions_pool . queue_pool ( )
for analysis in analyses :
self . addAnalysis ( api . get_object ( analysis ) )
actions_pool . resume ( )
|
def json_decoder_to_deserializer ( decoder_cls : Union [ Type [ JSONDecoder ] , Callable [ [ ] , Type [ JSONDecoder ] ] ] ) -> Type [ Deserializer ] :
"""Converts a ` JSONDecoder ` class into an equivalent ` Deserializer ` class .
: param decoder _ cls : the decoder class type or a function that returns the type
: return : the equivalent ` Deserializer ` class"""
|
name = decoder_cls . __name__ if isinstance ( decoder_cls , type ) else "%sLambdaTypeReturn" % id ( decoder_cls )
return type ( "%sAsDeserializer" % name , ( _JSONDecoderAsDeserializer , ) , { "decoder_type" : property ( lambda self : decoder_cls if isinstance ( decoder_cls , type ) else decoder_cls ( ) ) } )
|
def get_by_user ( cls , user_id , with_deleted = False ) :
"""Get a community ."""
|
query = cls . query . filter_by ( id_user = user_id )
if not with_deleted :
query = query . filter ( cls . deleted_at . is_ ( None ) )
return query . order_by ( db . asc ( Community . title ) )
|
def insertPhenotypeAssociationSet ( self , phenotypeAssociationSet ) :
"""Inserts the specified phenotype annotation set into this repository ."""
|
datasetId = phenotypeAssociationSet . getParentContainer ( ) . getId ( )
attributes = json . dumps ( phenotypeAssociationSet . getAttributes ( ) )
try :
models . Phenotypeassociationset . create ( id = phenotypeAssociationSet . getId ( ) , name = phenotypeAssociationSet . getLocalId ( ) , datasetid = datasetId , dataurl = phenotypeAssociationSet . _dataUrl , attributes = attributes )
except Exception :
raise exceptions . DuplicateNameException ( phenotypeAssociationSet . getParentContainer ( ) . getId ( ) )
|
def list_file_jobs ( cls , offset = None , limit = None , api = None ) :
"""Query ( List ) async jobs
: param offset : Pagination offset
: param limit : Pagination limit
: param api : Api instance
: return : Collection object"""
|
api = api or cls . _API
return super ( AsyncJob , cls ) . _query ( api = api , url = cls . _URL [ 'list_file_jobs' ] , offset = offset , limit = limit , )
|
def parse ( self , limit = None ) :
"""Override Source . parse ( )
Parses version and interaction information from CTD
Args :
: param limit ( int , optional ) limit the number of rows processed
Returns :
: return None"""
|
if limit is not None :
LOG . info ( "Only parsing first %d rows" , limit )
LOG . info ( "Parsing files..." )
# pub _ map = dict ( )
# file _ path = ' / ' . join ( ( self . rawdir ,
# self . static _ files [ ' publications ' ] [ ' file ' ] ) )
# if os . path . exists ( file _ path ) is True :
# pub _ map = self . _ parse _ publication _ file (
# self . static _ files [ ' publications ' ] [ ' file ' ]
if self . test_only :
self . test_mode = True
self . geno = Genotype ( self . graph )
self . pathway = Pathway ( self . graph )
self . _parse_ctd_file ( limit , self . files [ 'chemical_disease_interactions' ] [ 'file' ] )
self . _parse_ctd_file ( limit , self . files [ 'gene_pathway' ] [ 'file' ] )
self . _parse_ctd_file ( limit , self . files [ 'gene_disease' ] [ 'file' ] )
self . _parse_curated_chem_disease ( limit )
LOG . info ( "Done parsing files." )
return
|
def unixtime ( mm = False ) :
"""返回当前时间的 ` ` unix时间戳 ` ` , 默认返回级别 ` ` second ` `
- 可设置 ` ` mm = True ` ` 来获取毫秒 , 毫秒使用 ` ` ( 秒 + 随机数 ) * 1000 ` ` 来实现 , 尽量防止出现相同
- 使用时间范围限制 : ( 2001/9/9 9:46:40 ~ 2286/11/21 1:46:3)
- 样例
. . code : : python
# 标准情况直接使用秒
len ( str ( unixtime ( ) ) )
# 输出 10
# 如果需要唯一标识 , 可以尝试使用
len ( str ( unixtime ( True ) ) )
# 输出 13"""
|
if mm :
return int ( ( time . mktime ( datetime . datetime . now ( ) . timetuple ( ) ) + random . random ( ) ) * 1000 )
else :
return int ( time . mktime ( datetime . datetime . now ( ) . timetuple ( ) ) )
|
def _basic_login ( self ) :
"""Obtain a new access token from the vendor .
First , try using the refresh _ token , if one is available , otherwise
authenticate using the user credentials ."""
|
_LOGGER . debug ( "No/Expired/Invalid access_token, re-authenticating..." )
self . access_token = self . access_token_expires = None
if self . refresh_token :
_LOGGER . debug ( "Trying refresh_token..." )
credentials = { 'grant_type' : "refresh_token" , 'scope' : "EMEA-V1-Basic EMEA-V1-Anonymous" , 'refresh_token' : self . refresh_token }
try :
self . _obtain_access_token ( credentials )
except ( requests . HTTPError , KeyError , ValueError ) :
_LOGGER . warning ( "Invalid refresh_token, will try user credentials." )
self . refresh_token = None
if not self . refresh_token :
_LOGGER . debug ( "Trying user credentials..." )
credentials = { 'grant_type' : "password" , 'scope' : "EMEA-V1-Basic EMEA-V1-Anonymous " "EMEA-V1-Get-Current-User-Account" , 'Username' : self . username , 'Password' : self . password }
self . _obtain_access_token ( credentials )
_LOGGER . debug ( "refresh_token = %s" , self . refresh_token )
_LOGGER . debug ( "access_token = %s" , self . access_token )
_LOGGER . debug ( "access_token_expires = %s" , self . access_token_expires . strftime ( "%Y-%m-%d %H:%M:%S" ) )
|
def _make_win ( n , mono = False ) :
"""Generate a window for a given length .
: param n : an integer for the length of the window .
: param mono : True for a mono window , False for a stereo window .
: return : an numpy array containing the window value ."""
|
if mono :
win = np . hanning ( n ) + 0.00001
else :
win = np . array ( [ np . hanning ( n ) + 0.00001 , np . hanning ( n ) + 0.00001 ] )
win = np . transpose ( win )
return win
|
def get_stddevs ( self , mag , imt , stddev_types , num_sites ) :
"""Returns the total standard deviation"""
|
stddevs = [ ]
for stddev_type in stddev_types :
assert stddev_type in self . DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const . StdDev . TOTAL :
sigma = self . _get_total_sigma ( imt , mag )
stddevs . append ( sigma + np . zeros ( num_sites ) )
return stddevs
|
def dragEnterEvent ( self , event ) :
"""Reimplements the : meth : ` QTabWidget . dragEnterEvent ` method .
: param event : QEvent .
: type event : QEvent"""
|
LOGGER . debug ( "> '{0}' widget drag enter event accepted!" . format ( self . __class__ . __name__ ) )
event . accept ( )
|
def forward ( self , ** kwargs ) :
"""remote http call to api endpoint
accept * ONLY * GET and POST"""
|
# rewrite url path
prefix = self . url_prefix
path = "" if request . path == "/" else request . path
path = path [ len ( prefix ) : ]
url = '%s%s' % ( self . url_root [ : - 1 ] , path )
if request . method == 'GET' :
resp = requests . get ( url , params = request . args )
data = json . loads ( resp . content )
elif request . method == 'POST' :
if request . headers [ 'Content-Type' ] . startswith ( 'application/json' ) : # data in JSON
resp = requests . post ( url , json = request . json )
data = request . json
else :
resp = requests . post ( url , json = request . form )
data = request . form
data = json . loads ( resp . content )
else : # method not allowed aborting
abort ( 405 )
# XXX
return jsonify ( data )
|
def predict ( self , u = 0 ) :
"""Predict next position .
Parameters
u : ndarray
Optional control vector . If non - zero , it is multiplied by B
to create the control input into the system ."""
|
# x = Fx + Bu
A = dot ( self . _F_inv . T , self . P_inv ) . dot ( self . _F_inv )
# pylint : disable = bare - except
try :
AI = self . inv ( A )
invertable = True
if self . _no_information :
try :
self . x = dot ( self . inv ( self . P_inv ) , self . x )
except :
self . x = dot ( 0 , self . x )
self . _no_information = False
except :
invertable = False
self . _no_information = True
if invertable :
self . x = dot ( self . _F , self . x ) + dot ( self . B , u )
self . P_inv = self . inv ( AI + self . Q )
# save priors
self . P_inv_prior = np . copy ( self . P_inv )
self . x_prior = np . copy ( self . x )
else :
I_PF = self . _I - dot ( self . P_inv , self . _F_inv )
FTI = self . inv ( self . _F . T )
FTIX = dot ( FTI , self . x )
AQI = self . inv ( A + self . Q )
self . x = dot ( FTI , dot ( I_PF , AQI ) . dot ( FTIX ) )
# save priors
self . x_prior = np . copy ( self . x )
self . P_inv_prior = np . copy ( AQI )
|
def run ( self , wrappers = [ "" , "" ] ) :
'''run the lilypond script on the hierarchy class
: param wrappers : this is useful for testing : use wrappers to put something around the outputted " lilypond string " from the hierarchy class .
For example if you ' re testing a pitch , you might put \r elative c { } around the note so that lilypond handles it properly without causing an error
: return : doesn ' t return anything , side effect that a PDF should be created .'''
|
opened_file = open ( self . lyfile , 'w' )
lilystring = self . piece_obj . toLily ( )
opened_file . writelines ( wrappers [ 0 ] + "\\version \"2.18.2\" \n" + lilystring + wrappers [ 1 ] )
opened_file . close ( )
# subprocess . Popen ( [ ' sudo ' , self . lily _ script , " - - output = " +
# self . folder , self . lyfile ] )
os . system ( self . lily_script + " --loglevel=WARNING --output=" + self . folder + " " + self . lyfile )
|
def _truncate ( self , x , k ) :
'''given a vector x , leave its top - k absolute - value entries alone , and set the rest to 0'''
|
not_F = np . argsort ( np . abs ( x ) ) [ : - k ]
x [ not_F ] = 0
return x
|
def create_key ( kwargs = None , call = None ) :
'''Upload a public key'''
|
if call != 'function' :
log . error ( 'The create_key function must be called with -f or --function.' )
return False
try :
result = query ( method = 'account' , command = 'keys' , args = { 'name' : kwargs [ 'name' ] , 'public_key' : kwargs [ 'public_key' ] } , http_method = 'post' )
except KeyError :
log . info ( '`name` and `public_key` arguments must be specified' )
return False
return result
|
def _set_ping_mpls ( self , v , load = False ) :
"""Setter method for ping _ mpls , mapped from YANG variable / brocade _ mpls _ rpc / ping _ mpls ( rpc )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ ping _ mpls is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ ping _ mpls ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = ping_mpls . ping_mpls , is_leaf = True , yang_name = "ping-mpls" , rest_name = "ping-mpls" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = False , extensions = { u'tailf-common' : { u'hidden' : u'full' , u'actionpoint' : u'mplsPing' } } , namespace = 'urn:brocade.com:mgmt:brocade-mpls' , defining_module = 'brocade-mpls' , yang_type = 'rpc' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """ping_mpls must be of a type compatible with rpc""" , 'defined-type' : "rpc" , 'generated-type' : """YANGDynClass(base=ping_mpls.ping_mpls, is_leaf=True, yang_name="ping-mpls", rest_name="ping-mpls", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'mplsPing'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""" , } )
self . __ping_mpls = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def do_flipper ( parser , token ) :
"""The flipper tag takes two arguments : the user to look up and the feature
to compare against ."""
|
nodelist = parser . parse ( ( 'endflipper' , ) )
tag_name , user_key , feature = token . split_contents ( )
parser . delete_first_token ( )
return FlipperNode ( nodelist , user_key , feature )
|
def create_image_summary ( name , val ) :
"""Args :
name ( str ) :
val ( np . ndarray ) : 4D tensor of NHWC . assume RGB if C = = 3.
Can be either float or uint8 . Range has to be [ 0,255 ] .
Returns :
tf . Summary :"""
|
assert isinstance ( name , six . string_types ) , type ( name )
n , h , w , c = val . shape
val = val . astype ( 'uint8' )
s = tf . Summary ( )
imparams = [ cv2 . IMWRITE_PNG_COMPRESSION , 9 ]
for k in range ( n ) :
arr = val [ k ]
# CV2 will only write correctly in BGR chanel order
if c == 3 :
arr = cv2 . cvtColor ( arr , cv2 . COLOR_RGB2BGR )
elif c == 4 :
arr = cv2 . cvtColor ( arr , cv2 . COLOR_RGBA2BGRA )
tag = name if n == 1 else '{}/{}' . format ( name , k )
retval , img_str = cv2 . imencode ( '.png' , arr , imparams )
if not retval : # Encoding has failed .
continue
img_str = img_str . tostring ( )
img = tf . Summary . Image ( )
img . height = h
img . width = w
# 1 - grayscale 3 - RGB 4 - RGBA
img . colorspace = c
img . encoded_image_string = img_str
s . value . add ( tag = tag , image = img )
return s
|
def datasets_status ( self , owner_slug , dataset_slug , ** kwargs ) : # noqa : E501
"""Get dataset creation status # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . datasets _ status ( owner _ slug , dataset _ slug , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str owner _ slug : Dataset owner ( required )
: param str dataset _ slug : Dataset name ( required )
: return : Result
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . datasets_status_with_http_info ( owner_slug , dataset_slug , ** kwargs )
# noqa : E501
else :
( data ) = self . datasets_status_with_http_info ( owner_slug , dataset_slug , ** kwargs )
# noqa : E501
return data
|
def golden_section_search ( fn , a , b , tolerance = 1e-5 ) :
"""WIKIPEDIA IMPLEMENTATION
golden section search
to find the minimum of f on [ a , b ]
f : a strictly unimodal function on [ a , b ]
example :
> > > f = lambda x : ( x - 2 ) * * 2
> > > x = gss ( f , 1,5)
2.000009644875678"""
|
c = b - GOLDEN * ( b - a )
d = a + GOLDEN * ( b - a )
while abs ( c - d ) > tolerance :
fc , fd = fn ( c ) , fn ( d )
if fc < fd :
b = d
d = c
# fd = fc ; fc = f ( c )
c = b - GOLDEN * ( b - a )
else :
a = c
c = d
# fc = fd ; fd = f ( d )
d = a + GOLDEN * ( b - a )
return ( b + a ) / 2
|
def profile_detail ( request , username , template_name = accounts_settings . ACCOUNTS_PROFILE_DETAIL_TEMPLATE , extra_context = None , ** kwargs ) :
"""Detailed view of an user .
: param username :
String of the username of which the profile should be viewed .
: param template _ name :
String representing the template name that should be used to display
the profile .
: param extra _ context :
Dictionary of variables which should be supplied to the template . The
` ` profile ` ` key is always the current profile .
* * Context * *
` ` profile ` `
Instance of the currently viewed ` ` Profile ` ` ."""
|
user = get_object_or_404 ( get_user_model ( ) , username__iexact = username )
profile_model = get_profile_model ( )
try :
profile = user . get_profile ( )
except profile_model . DoesNotExist :
profile = profile_model ( user = user )
profile . save ( )
if not profile . can_view_profile ( request . user ) :
return HttpResponseForbidden ( _ ( "You don't have permission to view this profile." ) )
if not extra_context :
extra_context = dict ( )
extra_context [ 'profile' ] = user . get_profile ( )
return ExtraContextTemplateView . as_view ( template_name = template_name , extra_context = extra_context ) ( request )
|
def stats_mouse ( events , table ) :
"""Returns statistics , positions and rescaled events for mouse events ."""
|
if not events :
return [ ] , [ ] , [ ]
distance , last , deltas = 0 , None , [ ]
HS = conf . MouseHeatmapSize
SC = dict ( ( "xy" [ i ] , conf . DefaultScreenSize [ i ] / float ( HS [ i ] ) ) for i in [ 0 , 1 ] )
xymap = collections . defaultdict ( int )
sizes = db . fetch ( "screen_sizes" , order = ( "dt" , ) )
sizeidx , sizelen = - 1 , len ( sizes )
# Scale by desktop size at event time
for e in events :
if last :
deltas . append ( e [ "dt" ] - last [ "dt" ] )
distance += math . sqrt ( sum ( abs ( e [ k ] - last [ k ] ) ** 2 for k in "xy" ) )
last = dict ( e )
# Copy , as we modify coordinates
if sizeidx < 0 : # Find latest size from before event
for i , size in reversed ( list ( enumerate ( sizes ) ) ) :
if e [ "dt" ] >= size [ "dt" ] :
SC = dict ( ( k , size [ k ] / float ( HS [ "y" == k ] ) ) for k in "xy" )
sizeidx = i
break
# for i , size
else : # Find next size from before event
while sizeidx < sizelen - 2 and e [ "dt" ] >= sizes [ sizeidx + 1 ] [ "dt" ] :
sizeidx += 1
if sizeidx < sizelen - 1 and e [ "dt" ] >= sizes [ sizeidx ] [ "dt" ] :
SC = dict ( ( k , sizes [ sizeidx ] [ k ] / float ( HS [ "y" == k ] ) ) for k in "xy" )
e [ "x" ] , e [ "y" ] = tuple ( min ( int ( e [ k ] / SC [ k ] ) , HS [ "y" == k ] ) for k in "xy" )
xymap [ ( e [ "x" ] , e [ "y" ] ) ] += 1
stats , positions = [ ] , [ dict ( x = x , y = y , count = v ) for ( x , y ) , v in xymap . items ( ) ]
if "moves" == table :
px = re . sub ( r"(\d)(?=(\d{3})+(?!\d))" , r"\1," , "%d" % math . ceil ( distance ) )
seconds = timedelta_seconds ( events [ - 1 ] [ "dt" ] - events [ 0 ] [ "dt" ] )
stats = [ ( "Total distance" , "%s pixels " % px ) , ( "" , "%.1f meters (if pixel is %smm)" % ( distance * conf . PixelLength , conf . PixelLength * 1000 ) ) , ( "Average speed" , "%.1f pixels per second" % ( distance / ( seconds or 1 ) ) ) , ( "" , "%.4f meters per second" % ( distance * conf . PixelLength / ( seconds or 1 ) ) ) , ]
elif "scrolls" == table :
counts = collections . Counter ( e [ "wheel" ] for e in events )
stats = [ ( "Scrolls per hour" , int ( len ( events ) / ( timedelta_seconds ( events [ - 1 ] [ "dt" ] - events [ 0 ] [ "dt" ] ) / 3600 or 1 ) ) ) , ( "Average interval" , sum ( deltas , datetime . timedelta ( ) ) / ( len ( deltas ) or 1 ) ) , ( "Scrolls down" , counts [ - 1 ] ) , ( "Scrolls up" , counts [ 1 ] ) , ]
elif "clicks" == table :
counts = collections . Counter ( e [ "button" ] for e in events )
NAMES = { 1 : "Left" , 2 : "Right" , 3 : "Middle" }
stats = [ ( "Clicks per hour" , int ( len ( events ) / ( timedelta_seconds ( events [ - 1 ] [ "dt" ] - events [ 0 ] [ "dt" ] ) / 3600 or 1 ) ) ) , ( "Average interval between clicks" , sum ( deltas , datetime . timedelta ( ) ) / ( len ( deltas ) or 1 ) ) , ( "Average distance between clicks" , "%.1f pixels" % ( distance / ( len ( events ) or 1 ) ) ) , ]
for k , v in sorted ( counts . items ( ) ) :
stats += [ ( "%s button clicks" % NAMES . get ( k , "%s." % k ) , v ) ]
return stats , positions , events
|
def lobstr ( args ) :
"""% prog lobstr lobstr _ index1 lobstr _ index2 . . .
Run lobSTR on a big BAM file . There can be multiple lobSTR indices . In
addition , bamfile can be S3 location and - - lobstr _ home can be S3 location
( e . g . s3 : / / hli - mv - data - science / htang / str - build / lobSTR / )"""
|
p = OptionParser ( lobstr . __doc__ )
p . add_option ( "--haploid" , default = "chrY,chrM" , help = "Use haploid model for these chromosomes" )
p . add_option ( "--chr" , help = "Run only this chromosome" )
p . add_option ( "--simulation" , default = False , action = "store_true" , help = "Simulation mode" )
p . set_home ( "lobstr" , default = "s3://hli-mv-data-science/htang/str-build/lobSTR/" )
p . set_cpus ( )
p . set_aws_opts ( store = "hli-mv-data-science/htang/str-data" )
opts , args = p . parse_args ( args )
bamfile = opts . input_bam_path
if len ( args ) < 1 or bamfile is None :
sys . exit ( not p . print_help ( ) )
lbindices = args
if opts . simulation : # Simulation mode
cmd , vcf_file = allelotype_on_chr ( bamfile , "chr4" , "/mnt/software/lobSTR/" , "TREDs" , haploid = opts . haploid )
stats_file = vcf_file . rsplit ( "." , 1 ) [ 0 ] + ".allelotype.stats"
results_dir = "lobstr_results"
mkdir ( results_dir )
sh ( cmd )
sh ( "mv {} {}/ && rm {}" . format ( vcf_file , results_dir , stats_file ) )
return
s3mode = bamfile . startswith ( "s3" )
store = opts . output_path
cleanup = not opts . nocleanup
workdir = opts . workdir
mkdir ( workdir )
os . chdir ( workdir )
lhome = opts . lobstr_home
if lhome . startswith ( "s3://" ) :
lhome = pull_from_s3 ( lhome , overwrite = False )
exec_id , sample_id = opts . workflow_execution_id , opts . sample_id
prefix = [ x for x in ( exec_id , sample_id ) if x ]
if prefix :
pf = "_" . join ( prefix )
else :
pf = bamfile . split ( "/" ) [ - 1 ] . split ( "." ) [ 0 ]
if s3mode :
gzfile = pf + ".{0}.vcf.gz" . format ( lbindices [ - 1 ] )
remotegzfile = "{0}/{1}" . format ( store , gzfile )
if check_exists_s3 ( remotegzfile ) :
logging . debug ( "Object `{0}` exists. Computation skipped." . format ( remotegzfile ) )
return
localbamfile = pf + ".bam"
localbaifile = localbamfile + ".bai"
if op . exists ( localbamfile ) :
logging . debug ( "BAM file already downloaded." )
else :
pull_from_s3 ( bamfile , localbamfile )
if op . exists ( localbaifile ) :
logging . debug ( "BAM index file already downloaded." )
else :
remotebaifile = bamfile + ".bai"
if check_exists_s3 ( remotebaifile ) :
pull_from_s3 ( remotebaifile , localbaifile )
else :
remotebaifile = bamfile . rsplit ( "." ) [ 0 ] + ".bai"
if check_exists_s3 ( remotebaifile ) :
pull_from_s3 ( remotebaifile , localbaifile )
else :
logging . debug ( "BAM index cannot be found in S3!" )
sh ( "samtools index {0}" . format ( localbamfile ) )
bamfile = localbamfile
chrs = [ opts . chr ] if opts . chr else ( range ( 1 , 23 ) + [ "X" , "Y" ] )
for lbidx in lbindices :
makefile = "makefile.{0}" . format ( lbidx )
mm = MakeManager ( filename = makefile )
vcffiles = [ ]
for chr in chrs :
cmd , vcffile = allelotype_on_chr ( bamfile , chr , lhome , lbidx , haploid = opts . haploid )
mm . add ( bamfile , vcffile , cmd )
filteredvcffile = vcffile . replace ( ".vcf" , ".filtered.vcf" )
cmd = "python -m jcvi.variation.str filtervcf {}" . format ( vcffile )
cmd += " --lobstr_home {}" . format ( lhome )
mm . add ( vcffile , filteredvcffile , cmd )
vcffiles . append ( filteredvcffile )
gzfile = bamfile . split ( "." ) [ 0 ] + ".{0}.vcf.gz" . format ( lbidx )
cmd = "vcf-concat {0} | vcf-sort" . format ( " " . join ( vcffiles ) )
cmd += " | bgzip -c > {0}" . format ( gzfile )
mm . add ( vcffiles , gzfile , cmd )
mm . run ( cpus = opts . cpus )
if s3mode :
push_to_s3 ( store , gzfile )
if cleanup :
mm . clean ( )
sh ( "rm -f {} {} *.bai *.stats" . format ( bamfile , mm . makefile ) )
|
def print_mso_auto_shape_type_spec ( ) :
"""print spec dictionary for msoAutoShapeType"""
|
auto_shape_types = MsoAutoShapeTypeCollection . load ( sort = 'const_name' )
out = render_mso_auto_shape_type_spec ( auto_shape_types )
print out
|
def transaction ( self ) :
"""A context manager for executing a transaction on this Database ."""
|
conn = self . get_connection ( )
tx = conn . transaction ( )
try :
yield conn
tx . commit ( )
except :
tx . rollback ( )
finally :
conn . close ( )
|
def get_identities ( self , item ) :
"""Return the identities from an item"""
|
item = item [ 'data' ]
# Changeset owner
user = item [ 'owner' ]
identity = self . get_sh_identity ( user )
yield identity
# Patchset uploader and author
if 'patchSets' in item :
for patchset in item [ 'patchSets' ] :
user = patchset [ 'uploader' ]
identity = self . get_sh_identity ( user )
yield identity
if 'author' in patchset :
user = patchset [ 'author' ]
identity = self . get_sh_identity ( user )
yield identity
if 'approvals' in patchset : # Approvals by
for approval in patchset [ 'approvals' ] :
user = approval [ 'by' ]
identity = self . get_sh_identity ( user )
yield identity
# Comments reviewers
if 'comments' in item :
for comment in item [ 'comments' ] :
user = comment [ 'reviewer' ]
identity = self . get_sh_identity ( user )
yield identity
|
def _determine_username ( self , ip ) :
"""SSH in as root and determine the username ."""
|
ssh = subprocess . Popen ( [ "ssh" , "-o" , "UserKnownHostsFile=/dev/null" , "-o" , "StrictHostKeyChecking=no" , "root@%s" % ip ] , stdin = subprocess . DEVNULL , stdout = subprocess . PIPE , stderr = subprocess . DEVNULL )
first_line = ssh . stdout . readline ( )
ssh . kill ( )
ssh . wait ( )
if first_line :
match = re . search ( r"Please login as the user \"(\w+)\" rather than " r"the user \"root\"." , first_line . decode ( 'utf-8' ) )
if match :
return match . groups ( ) [ 0 ]
else :
return None
|
def handle_set_citation ( self , line : str , position : int , tokens : ParseResults ) -> ParseResults :
"""Handle a ` ` SET Citation = { " X " , " Y " , " Z " , . . . } ` ` statement ."""
|
self . clear_citation ( )
values = tokens [ 'values' ]
if len ( values ) < 2 :
raise CitationTooShortException ( self . get_line_number ( ) , line , position )
citation_type = values [ 0 ]
if citation_type not in CITATION_TYPES :
raise InvalidCitationType ( self . get_line_number ( ) , line , position , citation_type )
if 2 == len ( values ) :
return self . handle_set_citation_double ( line , position , tokens )
citation_reference = values [ 2 ]
if citation_type == CITATION_TYPE_PUBMED and not is_int ( citation_reference ) :
raise InvalidPubMedIdentifierWarning ( self . get_line_number ( ) , line , position , citation_reference )
if 4 <= len ( values ) and not valid_date ( values [ 3 ] ) :
log . debug ( 'Invalid date: %s. Truncating entry.' , values [ 3 ] )
self . citation = dict ( zip ( CITATION_ENTRIES , values [ : 3 ] ) )
return tokens
if 5 <= len ( values ) :
values [ 4 ] = [ value . strip ( ) for value in values [ 4 ] . split ( '|' ) ]
if 6 < len ( values ) :
raise CitationTooLongException ( self . get_line_number ( ) , line , position )
self . citation = dict ( zip ( CITATION_ENTRIES , values ) )
return tokens
|
def _bsecurate_cli_compare_basis_files ( args ) :
'''Handles compare - basis - files subcommand'''
|
ret = curate . compare_basis_files ( args . file1 , args . file2 , args . readfmt1 , args . readfmt2 , args . uncontract_general )
if ret :
return "No difference found"
else :
return "DIFFERENCES FOUND. SEE ABOVE"
|
def real_pathspec ( self ) :
"""Returns a pathspec for an aff4 object even if there is none stored ."""
|
pathspec = self . Get ( self . Schema . PATHSPEC )
stripped_components = [ ]
parent = self
# TODO ( user ) : this code is potentially slow due to multiple separate
# aff4 . FACTORY . Open ( ) calls . OTOH the loop below is executed very rarely -
# only when we deal with deep files that got fetched alone and then
# one of the directories in their path gets updated .
while not pathspec and len ( parent . urn . Split ( ) ) > 1 : # We try to recurse up the tree to get a real pathspec .
# These directories are created automatically without pathspecs when a
# deep directory is listed without listing the parents .
# Note / fs / os or / fs / tsk won ' t be updateable so we will raise IOError
# if we try .
stripped_components . append ( parent . urn . Basename ( ) )
pathspec = parent . Get ( parent . Schema . PATHSPEC )
parent = FACTORY . Open ( parent . urn . Dirname ( ) , token = self . token )
if pathspec :
if stripped_components : # We stripped pieces of the URL , time to add them back .
new_path = utils . JoinPath ( * reversed ( stripped_components [ : - 1 ] ) )
pathspec . Append ( rdf_paths . PathSpec ( path = new_path , pathtype = pathspec . last . pathtype ) )
else :
raise IOError ( "Item has no pathspec." )
return pathspec
|
def Render ( self ) :
"""Generates a sequence of points suitable for plotting .
An empirical CDF is a step function ; linear interpolation
can be misleading .
Returns :
tuple of ( xs , ps )"""
|
xs = [ self . xs [ 0 ] ]
ps = [ 0.0 ]
for i , p in enumerate ( self . ps ) :
xs . append ( self . xs [ i ] )
ps . append ( p )
try :
xs . append ( self . xs [ i + 1 ] )
ps . append ( p )
except IndexError :
pass
return xs , ps
|
def _remove_finder ( importer , finder ) :
"""Remove an existing finder from pkg _ resources ."""
|
existing_finder = _get_finder ( importer )
if not existing_finder :
return
if isinstance ( existing_finder , ChainedFinder ) :
try :
existing_finder . finders . remove ( finder )
except ValueError :
return
if len ( existing_finder . finders ) == 1 :
pkg_resources . register_finder ( importer , existing_finder . finders [ 0 ] )
elif len ( existing_finder . finders ) == 0 :
pkg_resources . register_finder ( importer , pkg_resources . find_nothing )
else :
pkg_resources . register_finder ( importer , pkg_resources . find_nothing )
|
def configure_versioned_classes ( self ) :
"""Configures all versioned classes that were collected during
instrumentation process ."""
|
for cls in self . pending_classes :
self . audit_table ( cls . __table__ , cls . __versioned__ . get ( 'exclude' ) )
assign_actor ( self . base , self . transaction_cls , self . actor_cls )
|
def get_date ( self , p_tag ) :
"""Given a date tag , return a date object ."""
|
string = self . tag_value ( p_tag )
result = None
try :
result = date_string_to_date ( string ) if string else None
except ValueError :
pass
return result
|
def login_token ( api , username , password ) :
"""Login using pre routeros 6.43 authorization method ."""
|
sentence = api ( '/login' )
token = tuple ( sentence ) [ 0 ] [ 'ret' ]
encoded = encode_password ( token , password )
tuple ( api ( '/login' , ** { 'name' : username , 'response' : encoded } ) )
|
def get_kwargs_index ( target ) -> int :
"""Returns the index of the " * * kwargs " parameter if such a parameter exists in
the function arguments or - 1 otherwise .
: param target :
The target function for which the kwargs index should be determined
: return :
The keyword arguments index if it exists or - 1 if not"""
|
code = target . __code__
if not bool ( code . co_flags & inspect . CO_VARKEYWORDS ) :
return - 1
return ( code . co_argcount + code . co_kwonlyargcount + ( 1 if code . co_flags & inspect . CO_VARARGS else 0 ) )
|
def _parse_response_for_dict ( response ) :
'''Extracts name - values from response header . Filter out the standard
http headers .'''
|
if response is None :
return None
http_headers = [ 'server' , 'date' , 'location' , 'host' , 'via' , 'proxy-connection' , 'connection' ]
return_dict = _HeaderDict ( )
if response . headers :
for name , value in response . headers :
if not name . lower ( ) in http_headers :
return_dict [ name ] = value
return return_dict
|
def show_category ( self ) :
"""doc : http : / / open . youku . com / docs / doc ? id = 93"""
|
url = 'https://openapi.youku.com/v2/schemas/show/category.json'
r = requests . get ( url )
check_error ( r )
return r . json ( )
|
def path_upper ( self , object_id , limit_depth = 1000000 , db_session = None , * args , ** kwargs ) :
"""This returns you path to root node starting from object _ id
currently only for postgresql
: param object _ id :
: param limit _ depth :
: param db _ session :
: return :"""
|
return self . service . path_upper ( object_id = object_id , limit_depth = limit_depth , db_session = db_session , * args , ** kwargs )
|
def write_cfg ( path , value ) -> None :
""": param path : example : " / . rwmeta / developer _ settings . json "
: param value : dict"""
|
full_path = __build_path ( path )
with open ( full_path , 'w' ) as myfile :
myfile . write ( json . dumps ( value ) )
|
def TerminateFlow ( client_id , flow_id , reason = None , flow_state = rdf_flow_objects . Flow . FlowState . ERROR ) :
"""Terminates a flow and all of its children .
Args :
client _ id : Client ID of a flow to terminate .
flow _ id : Flow ID of a flow to terminate .
reason : String with a termination reason .
flow _ state : Flow state to be assigned to a flow after termination . Defaults
to FlowState . ERROR ."""
|
to_terminate = [ data_store . REL_DB . ReadFlowObject ( client_id , flow_id ) ]
while to_terminate :
next_to_terminate = [ ]
for rdf_flow in to_terminate :
_TerminateFlow ( rdf_flow , reason = reason , flow_state = flow_state )
next_to_terminate . extend ( data_store . REL_DB . ReadChildFlowObjects ( rdf_flow . client_id , rdf_flow . flow_id ) )
to_terminate = next_to_terminate
|
def post_message ( self , msg ) :
'''default post message call'''
|
if '_posted' in msg . __dict__ :
return
msg . _posted = True
msg . _timestamp = time . time ( )
type = msg . get_type ( )
if type != 'HEARTBEAT' or ( msg . type != mavlink . MAV_TYPE_GCS and msg . type != mavlink . MAV_TYPE_GIMBAL ) :
self . messages [ type ] = msg
if 'usec' in msg . __dict__ :
self . uptime = msg . usec * 1.0e-6
if 'time_boot_ms' in msg . __dict__ :
self . uptime = msg . time_boot_ms * 1.0e-3
if self . _timestamp is not None :
if self . notimestamps :
msg . _timestamp = self . uptime
else :
msg . _timestamp = self . _timestamp
src_system = msg . get_srcSystem ( )
src_component = msg . get_srcComponent ( )
src_tuple = ( src_system , src_component )
radio_tuple = ( ord ( '3' ) , ord ( 'D' ) )
if not ( src_tuple == radio_tuple or msg . get_type ( ) == 'BAD_DATA' ) :
if not src_tuple in self . last_seq :
last_seq = - 1
else :
last_seq = self . last_seq [ src_tuple ]
seq = ( last_seq + 1 ) % 256
seq2 = msg . get_seq ( )
if seq != seq2 and last_seq != - 1 :
diff = ( seq2 - seq ) % 256
self . mav_loss += diff
# print ( " lost % u seq = % u seq2 = % u last _ seq = % u src _ system = % u % s " % ( diff , seq , seq2 , last _ seq , src _ system , msg . get _ type ( ) ) )
self . last_seq [ src_tuple ] = seq2
self . mav_count += 1
self . timestamp = msg . _timestamp
if type == 'HEARTBEAT' and msg . get_srcComponent ( ) != mavlink . MAV_COMP_ID_GIMBAL :
self . target_system = msg . get_srcSystem ( )
self . target_component = msg . get_srcComponent ( )
if float ( mavlink . WIRE_PROTOCOL_VERSION ) >= 1 and msg . type != mavlink . MAV_TYPE_GCS :
self . flightmode = mode_string_v10 ( msg )
self . mav_type = msg . type
self . base_mode = msg . base_mode
elif type == 'PARAM_VALUE' :
s = str ( msg . param_id )
self . params [ str ( msg . param_id ) ] = msg . param_value
if msg . param_index + 1 == msg . param_count :
self . param_fetch_in_progress = False
self . param_fetch_complete = True
elif type == 'SYS_STATUS' and mavlink . WIRE_PROTOCOL_VERSION == '0.9' :
self . flightmode = mode_string_v09 ( msg )
elif type == 'GPS_RAW' :
if self . messages [ 'HOME' ] . fix_type < 2 :
self . messages [ 'HOME' ] = msg
elif type == 'GPS_RAW_INT' :
if self . messages [ 'HOME' ] . fix_type < 3 :
self . messages [ 'HOME' ] = msg
for hook in self . message_hooks :
hook ( self , msg )
if ( msg . get_signed ( ) and self . mav . signing . link_id == 0 and msg . get_link_id ( ) != 0 and self . target_system == msg . get_srcSystem ( ) and self . target_component == msg . get_srcComponent ( ) ) : # change to link _ id from incoming packet
self . mav . signing . link_id = msg . get_link_id ( )
|
def statcast_single_game ( game_pk , team = None ) :
"""Pulls statcast play - level data from Baseball Savant for a single game ,
identified by its MLB game ID ( game _ pk in statcast data )
INPUTS :
game _ pk : 6 - digit integer MLB game ID to retrieve"""
|
data = single_game_request ( game_pk )
data = postprocessing ( data , team )
return data
|
def get_conf_update ( self ) :
"""Get updated config from URL , fallback to local file if download fails ."""
|
dyn_conf = self . get_collection_rules ( )
if not dyn_conf :
return self . get_conf_file ( )
version = dyn_conf . get ( 'version' , None )
if version is None :
raise ValueError ( "ERROR: Could not find version in json" )
dyn_conf [ 'file' ] = self . collection_rules_file
logger . debug ( "Success reading config" )
config_hash = hashlib . sha1 ( json . dumps ( dyn_conf ) . encode ( 'utf-8' ) ) . hexdigest ( )
logger . debug ( 'sha1 of config: %s' , config_hash )
return dyn_conf
|
def steem_instance ( self ) :
'''Returns the steem instance if it already exists
otherwise uses the goodnode method to fetch a node
and instantiate the Steem class .'''
|
if self . s :
return self . s
for num_of_retries in range ( default . max_retry ) :
node = self . util . goodnode ( self . nodes )
try :
self . s = Steem ( keys = self . keys , nodes = [ node ] )
except Exception as e :
self . util . retry ( "COULD NOT GET STEEM INSTANCE" , e , num_of_retries , default . wait_time )
self . s = None
else :
return self . s
return False
|
def trimmed_split ( s , seps = ( ";" , "," ) ) :
"""Given a string s , split is by one of one of the seps ."""
|
for sep in seps :
if sep not in s :
continue
data = [ item . strip ( ) for item in s . strip ( ) . split ( sep ) ]
return data
return [ s ]
|
def scalars_impl ( self , run , tag_regex_string ) :
"""Given a tag regex and single run , return ScalarEvents .
Args :
run : A run string .
tag _ regex _ string : A regular expression that captures portions of tags .
Raises :
ValueError : if the scalars plugin is not registered .
Returns :
A dictionary that is the JSON - able response ."""
|
if not tag_regex_string : # The user provided no regex .
return { _REGEX_VALID_PROPERTY : False , _TAG_TO_EVENTS_PROPERTY : { } , }
# Construct the regex .
try :
regex = re . compile ( tag_regex_string )
except re . error :
return { _REGEX_VALID_PROPERTY : False , _TAG_TO_EVENTS_PROPERTY : { } , }
# Fetch the tags for the run . Filter for tags that match the regex .
run_to_data = self . _multiplexer . PluginRunToTagToContent ( scalars_metadata . PLUGIN_NAME )
tag_to_data = None
try :
tag_to_data = run_to_data [ run ]
except KeyError : # The run could not be found . Perhaps a configuration specified a run that
# TensorBoard has not read from disk yet .
payload = { }
if tag_to_data :
scalars_plugin_instance = self . _get_scalars_plugin ( )
if not scalars_plugin_instance :
raise ValueError ( ( 'Failed to respond to request for /scalars. ' 'The scalars plugin is oddly not registered.' ) )
form = scalars_plugin . OutputFormat . JSON
payload = { tag : scalars_plugin_instance . scalars_impl ( tag , run , None , form ) [ 0 ] for tag in tag_to_data . keys ( ) if regex . match ( tag ) }
return { _REGEX_VALID_PROPERTY : True , _TAG_TO_EVENTS_PROPERTY : payload , }
|
def fit_isochrone ( orbit , m0 = 2E11 , b0 = 1. , minimize_kwargs = None ) :
r"""Fit the toy Isochrone potential to the sum of the energy residuals relative
to the mean energy by minimizing the function
. . math : :
f ( m , b ) = \ sum _ i ( \ frac { 1 } { 2 } v _ i ^ 2 + \ Phi _ { \ rm iso } ( x _ i \ , | \ , m , b ) - < E > ) ^ 2
TODO : This should fail if the Hamiltonian associated with the orbit has
a frame other than StaticFrame
Parameters
orbit : ` ~ gala . dynamics . Orbit `
m0 : numeric ( optional )
Initial mass guess .
b0 : numeric ( optional )
Initial b guess .
minimize _ kwargs : dict ( optional )
Keyword arguments to pass through to ` scipy . optimize . minimize ` .
Returns
m : float
Best - fit scale mass for the Isochrone potential .
b : float
Best - fit core radius for the Isochrone potential ."""
|
pot = orbit . hamiltonian . potential
if pot is None :
raise ValueError ( "The orbit object must have an associated potential" )
w = np . squeeze ( orbit . w ( pot . units ) )
if w . ndim > 2 :
raise ValueError ( "Input orbit object must be a single orbit." )
def f ( p , w ) :
logm , logb = p
potential = IsochronePotential ( m = np . exp ( logm ) , b = np . exp ( logb ) , units = pot . units )
H = ( potential . value ( w [ : 3 ] ) . decompose ( pot . units ) . value + 0.5 * np . sum ( w [ 3 : ] ** 2 , axis = 0 ) )
return np . sum ( np . squeeze ( H - np . mean ( H ) ) ** 2 )
logm0 = np . log ( m0 )
logb0 = np . log ( b0 )
if minimize_kwargs is None :
minimize_kwargs = dict ( )
minimize_kwargs [ 'x0' ] = np . array ( [ logm0 , logb0 ] )
minimize_kwargs [ 'method' ] = minimize_kwargs . get ( 'method' , 'Nelder-Mead' )
res = minimize ( f , args = ( w , ) , ** minimize_kwargs )
if not res . success :
raise ValueError ( "Failed to fit toy potential to orbit." )
logm , logb = np . abs ( res . x )
m = np . exp ( logm )
b = np . exp ( logb )
return IsochronePotential ( m = m , b = b , units = pot . units )
|
def get_pre_auth_url_m ( self , redirect_uri ) :
"""快速获取pre auth url , 可以直接微信中发送该链接 , 直接授权"""
|
url = "https://mp.weixin.qq.com/safe/bindcomponent?action=bindcomponent&auth_type=3&no_scan=1&"
redirect_uri = quote ( redirect_uri , safe = '' )
return "{0}component_appid={1}&pre_auth_code={2}&redirect_uri={3}" . format ( url , self . component_appid , self . create_preauthcode ( ) [ 'pre_auth_code' ] , redirect_uri )
|
def do_examine ( self , arg ) :
"""Opens a unit test case ' s . out . compare file to examine the verbose comparison
report across values ."""
|
# We use their default editor ( if it has been set ) ; otherwise we can ' t do much of
# anything and issue a warning .
from os import getenv , path , system
testcase , output = arg . split ( )
target = path . join ( self . tests [ self . active ] . stagedir , "tests" , testcase , "{}.compare" . format ( output ) )
if getenv ( "EDITOR" ) is not None :
system ( "`$EDITOR {}`" . format ( target ) )
else :
msg . warn ( "$EDITOR not set in environment. Can't open {}" . format ( target ) )
|
def sign ( self , identity , blob ) :
"""Sign given blob and return the signature ( as bytes ) ."""
|
curve_name = identity . get_curve_name ( ecdh = False )
log . debug ( '"%s" signing %r (%s) on %s' , identity . to_string ( ) , blob , curve_name , self )
try :
result = self . _defs . sign_identity ( self . conn , identity = self . _identity_proto ( identity ) , challenge_hidden = blob , challenge_visual = '' , ecdsa_curve_name = curve_name )
log . debug ( 'result: %s' , result )
assert len ( result . signature ) == 65
assert result . signature [ : 1 ] == b'\x00'
return bytes ( result . signature [ 1 : ] )
except self . _defs . TrezorFailure as e :
msg = '{} error: {}' . format ( self , e )
log . debug ( msg , exc_info = True )
raise interface . DeviceError ( msg )
|
def in_flight_request_count ( self , node_id = None ) :
"""Get the number of in - flight requests for a node or all nodes .
Arguments :
node _ id ( int , optional ) : a specific node to check . If unspecified ,
return the total for all nodes
Returns :
int : pending in - flight requests for the node , or all nodes if None"""
|
if node_id is not None :
conn = self . _conns . get ( node_id )
if conn is None :
return 0
return len ( conn . in_flight_requests )
else :
return sum ( [ len ( conn . in_flight_requests ) for conn in list ( self . _conns . values ( ) ) ] )
|
def in_placement_grid ( self , pos : Union [ Point2 , Point3 , Unit ] ) -> bool :
"""Returns True if you can place something at a position . Remember , buildings usually use 2x2 , 3x3 or 5x5 of these grid points .
Caution : some x and y offset might be required , see ramp code :
https : / / github . com / Dentosal / python - sc2 / blob / master / sc2 / game _ info . py # L17 - L18"""
|
assert isinstance ( pos , ( Point2 , Point3 , Unit ) )
pos = pos . position . to2 . rounded
return self . _game_info . placement_grid [ pos ] != 0
|
def image_vacuum ( name ) :
'''Delete images not in use or installed via image _ present
. . warning : :
Only image _ present states that are included via the
top file will be detected .'''
|
name = name . lower ( )
ret = { 'name' : name , 'changes' : { } , 'result' : None , 'comment' : '' }
# list of images to keep
images = [ ]
# retrieve image _ present state data for host
for state in __salt__ [ 'state.show_lowstate' ] ( ) : # don ' t throw exceptions when not highstate run
if 'state' not in state :
continue
# skip if not from this state module
if state [ 'state' ] != __virtualname__ :
continue
# skip if not image _ present
if state [ 'fun' ] not in [ 'image_present' ] :
continue
# keep images installed via image _ present
if 'name' in state :
if _is_uuid ( state [ 'name' ] ) :
images . append ( state [ 'name' ] )
elif _is_docker_uuid ( state [ 'name' ] ) :
state [ 'name' ] = __salt__ [ 'imgadm.docker_to_uuid' ] ( state [ 'name' ] )
if not state [ 'name' ] :
continue
images . append ( state [ 'name' ] )
# retrieve images in use by vms
for image_uuid in __salt__ [ 'vmadm.list' ] ( order = 'image_uuid' ) :
if image_uuid not in images :
images . append ( image_uuid )
# purge unused images
ret [ 'result' ] = True
for image_uuid in __salt__ [ 'imgadm.list' ] ( ) :
if image_uuid in images :
continue
image = __salt__ [ 'imgadm.get' ] ( image_uuid )
if image [ 'manifest' ] [ 'name' ] == 'docker-layer' : # NOTE : docker images are made of multiple layers , loop over them
while image :
image_uuid = image [ 'manifest' ] [ 'uuid' ]
if image_uuid in __salt__ [ 'imgadm.delete' ] ( image_uuid ) :
ret [ 'changes' ] [ image_uuid ] = None
else :
ret [ 'result' ] = False
ret [ 'comment' ] = 'failed to delete images'
if 'origin' in image [ 'manifest' ] :
image = __salt__ [ 'imgadm.get' ] ( image [ 'manifest' ] [ 'origin' ] )
else :
image = None
else : # NOTE : normal images can just be delete
if image_uuid in __salt__ [ 'imgadm.delete' ] ( image_uuid ) :
ret [ 'changes' ] [ image_uuid ] = None
else :
ret [ 'result' ] = False
ret [ 'comment' ] = 'failed to delete images'
if ret [ 'result' ] and not ret [ 'changes' ] :
ret [ 'comment' ] = 'no images deleted'
elif ret [ 'result' ] and ret [ 'changes' ] :
ret [ 'comment' ] = 'images deleted'
return ret
|
def upgradeProcessor1to2 ( oldProcessor ) :
"""Batch processors stopped polling at version 2 , so they no longer needed the
idleInterval attribute . They also gained a scheduled attribute which
tracks their interaction with the scheduler . Since they stopped polling ,
we also set them up as a timed event here to make sure that they don ' t
silently disappear , never to be seen again : running them with the scheduler
gives them a chance to figure out what ' s up and set up whatever other state
they need to continue to run .
Since this introduces a new dependency of all batch processors on a powerup
for the IScheduler , install a Scheduler or a SubScheduler if one is not
already present ."""
|
newProcessor = oldProcessor . upgradeVersion ( oldProcessor . typeName , 1 , 2 , busyInterval = oldProcessor . busyInterval )
newProcessor . scheduled = extime . Time ( )
s = newProcessor . store
sch = iaxiom . IScheduler ( s , None )
if sch is None :
if s . parent is None : # Only site stores have no parents .
sch = Scheduler ( store = s )
else : # Substores get subschedulers .
sch = SubScheduler ( store = s )
installOn ( sch , s )
# And set it up to run .
sch . schedule ( newProcessor , newProcessor . scheduled )
return newProcessor
|
def get_create_security_group_commands ( self , sg_id , sg_rules ) :
"""Commands for creating ACL"""
|
cmds = [ ]
in_rules , eg_rules = self . _format_rules_for_eos ( sg_rules )
cmds . append ( "ip access-list %s dynamic" % self . _acl_name ( sg_id , n_const . INGRESS_DIRECTION ) )
for in_rule in in_rules :
cmds . append ( in_rule )
cmds . append ( "exit" )
cmds . append ( "ip access-list %s dynamic" % self . _acl_name ( sg_id , n_const . EGRESS_DIRECTION ) )
for eg_rule in eg_rules :
cmds . append ( eg_rule )
cmds . append ( "exit" )
return cmds
|
def _validate_compute_resources ( self , cr ) :
"""Checks contents of sub dictionary for managed clusters
: param cr : computeResources
: type cr : dict"""
|
for param in ( 'instanceRole' , 'maxvCpus' , 'minvCpus' , 'instanceTypes' , 'securityGroupIds' , 'subnets' , 'type' ) :
if param not in cr :
raise InvalidParameterValueException ( 'computeResources must contain {0}' . format ( param ) )
if self . iam_backend . get_role_by_arn ( cr [ 'instanceRole' ] ) is None :
raise InvalidParameterValueException ( 'could not find instanceRole {0}' . format ( cr [ 'instanceRole' ] ) )
if cr [ 'maxvCpus' ] < 0 :
raise InvalidParameterValueException ( 'maxVCpus must be positive' )
if cr [ 'minvCpus' ] < 0 :
raise InvalidParameterValueException ( 'minVCpus must be positive' )
if cr [ 'maxvCpus' ] < cr [ 'minvCpus' ] :
raise InvalidParameterValueException ( 'maxVCpus must be greater than minvCpus' )
if len ( cr [ 'instanceTypes' ] ) == 0 :
raise InvalidParameterValueException ( 'At least 1 instance type must be provided' )
for instance_type in cr [ 'instanceTypes' ] :
if instance_type == 'optimal' :
pass
# Optimal should pick from latest of current gen
elif instance_type not in EC2_INSTANCE_TYPES :
raise InvalidParameterValueException ( 'Instance type {0} does not exist' . format ( instance_type ) )
for sec_id in cr [ 'securityGroupIds' ] :
if self . ec2_backend . get_security_group_from_id ( sec_id ) is None :
raise InvalidParameterValueException ( 'security group {0} does not exist' . format ( sec_id ) )
if len ( cr [ 'securityGroupIds' ] ) == 0 :
raise InvalidParameterValueException ( 'At least 1 security group must be provided' )
for subnet_id in cr [ 'subnets' ] :
try :
self . ec2_backend . get_subnet ( subnet_id )
except InvalidSubnetIdError :
raise InvalidParameterValueException ( 'subnet {0} does not exist' . format ( subnet_id ) )
if len ( cr [ 'subnets' ] ) == 0 :
raise InvalidParameterValueException ( 'At least 1 subnet must be provided' )
if cr [ 'type' ] not in ( 'EC2' , 'SPOT' ) :
raise InvalidParameterValueException ( 'computeResources.type must be either EC2 | SPOT' )
if cr [ 'type' ] == 'SPOT' :
raise InternalFailure ( 'SPOT NOT SUPPORTED YET' )
|
def continuous_periods ( self ) :
"""Return a list of continuous data periods by removing the data gaps from the overall record ."""
|
result = [ ]
# For the first period
start_date = self . start_date
for gap in self . pot_data_gaps :
end_date = gap . start_date - timedelta ( days = 1 )
result . append ( PotPeriod ( start_date , end_date ) )
# For the next period
start_date = gap . end_date + timedelta ( days = 1 )
# For the last period
end_date = self . end_date
result . append ( PotPeriod ( start_date , end_date ) )
return result
|
def clean_pod_template ( pod_template ) :
"""Normalize pod template and check for type errors"""
|
if isinstance ( pod_template , str ) :
msg = ( 'Expected a kubernetes.client.V1Pod object, got %s' 'If trying to pass a yaml filename then use ' 'KubeCluster.from_yaml' )
raise TypeError ( msg % pod_template )
if isinstance ( pod_template , dict ) :
msg = ( 'Expected a kubernetes.client.V1Pod object, got %s' 'If trying to pass a dictionary specification then use ' 'KubeCluster.from_dict' )
raise TypeError ( msg % str ( pod_template ) )
pod_template = copy . deepcopy ( pod_template )
# Make sure metadata / labels / env objects exist , so they can be modified
# later without a lot of ` is None ` checks
if pod_template . metadata is None :
pod_template . metadata = client . V1ObjectMeta ( )
if pod_template . metadata . labels is None :
pod_template . metadata . labels = { }
if pod_template . spec . containers [ 0 ] . env is None :
pod_template . spec . containers [ 0 ] . env = [ ]
return pod_template
|
def add_file_handler_to_root ( log_fn ) :
"""Adds a file handler to the root logging .
: param log _ fn : the name of the log file .
: type log _ fn : str"""
|
file_handler = logging . FileHandler ( log_fn , mode = "w" )
file_handler . setFormatter ( logging . Formatter ( fmt = "[%(asctime)s %(name)s %(levelname)s] %(message)s" , datefmt = "%Y-%m-%d %H:%M:%S" , ) )
logging . root . addHandler ( file_handler )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.