signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get_tree ( profile , sha , recursive = True ) :
"""Fetch a tree .
Args :
profile
A profile generated from ` ` simplygithub . authentication . profile ` ` .
Such profiles tell this module ( i ) the ` ` repo ` ` to connect to ,
and ( ii ) the ` ` token ` ` to connect with .
sha
The SHA of the tree to fetch .
recursive
If ` ` True ` ` , traverse all subtrees and their subtrees , all the
way down . That will return a list of all objects in the tree ,
all levels deep .
Returns :
A dict with data about the tree ."""
|
resource = "/trees/" + sha
if recursive :
resource += "?recursive=1"
data = api . get_request ( profile , resource )
return prepare ( data )
|
def _get_addr ( self , v ) :
"""Get address of the basic block or CFG node specified by v .
: param v : Can be one of the following : a CFGNode , or an address .
: return : The address .
: rtype : int"""
|
if isinstance ( v , CFGNode ) :
return v . addr
elif type ( v ) is int :
return v
else :
raise AngrBladeError ( 'Unsupported SimRun argument type %s' % type ( v ) )
|
def msgblock ( key , text , side = '|' ) :
"""puts text inside a visual ascii block"""
|
blocked_text = '' . join ( [ ' + --- ' , key , ' ---\n' ] + [ ' ' + side + ' ' + line + '\n' for line in text . split ( '\n' ) ] + [ ' L ___ ' , key , ' ___\n' ] )
return blocked_text
|
def fput_object ( self , bucket_name , object_name , file_path , content_type = 'application/octet-stream' , metadata = None , sse = None , progress = None , part_size = DEFAULT_PART_SIZE ) :
"""Add a new object to the cloud storage server .
Examples :
minio . fput _ object ( ' foo ' , ' bar ' , ' filepath ' , ' text / plain ' )
: param bucket _ name : Bucket to read object from .
: param object _ name : Name of the object to read .
: param file _ path : Local file path to be uploaded .
: param content _ type : Content type of the object .
: param metadata : Any additional metadata to be uploaded along
with your PUT request .
: param progress : A progress object
: param part _ size : Multipart part size
: return : etag"""
|
# Open file in ' read ' mode .
with open ( file_path , 'rb' ) as file_data :
file_size = os . stat ( file_path ) . st_size
return self . put_object ( bucket_name , object_name , file_data , file_size , content_type , metadata , sse , progress , part_size )
|
def check_auth ( username , pwd ) :
"""This function is called to check if a username /
password combination is valid ."""
|
cfg = get_current_config ( )
return username == cfg [ "dashboard_httpauth" ] . split ( ":" ) [ 0 ] and pwd == cfg [ "dashboard_httpauth" ] . split ( ":" ) [ 1 ]
|
def validate ( mcs , bases , attributes ) :
"""Check attributes ."""
|
if bases [ 0 ] is object :
return None
mcs . check_model_cls ( attributes )
mcs . check_include_exclude ( attributes )
mcs . check_properties ( attributes )
|
def assert_element_present ( self , selector , by = By . CSS_SELECTOR , timeout = settings . SMALL_TIMEOUT ) :
"""Similar to wait _ for _ element _ present ( ) , but returns nothing .
Waits for an element to appear in the HTML of a page .
The element does not need be visible ( it may be hidden ) .
Returns True if successful . Default timeout = SMALL _ TIMEOUT ."""
|
if self . timeout_multiplier and timeout == settings . SMALL_TIMEOUT :
timeout = self . __get_new_timeout ( timeout )
self . wait_for_element_present ( selector , by = by , timeout = timeout )
return True
|
async def getStickerSet ( self , name ) :
"""See : https : / / core . telegram . org / bots / api # getstickerset"""
|
p = _strip ( locals ( ) )
return await self . _api_request ( 'getStickerSet' , _rectify ( p ) )
|
def _parse_relation ( chunk , type = "O" ) :
"""Returns a string of the roles and relations parsed from the given < chunk > element .
The chunk type ( which is part of the relation string ) can be given as parameter ."""
|
r1 = chunk . get ( XML_RELATION )
r2 = chunk . get ( XML_ID , chunk . get ( XML_OF ) )
r1 = [ x != "-" and x or None for x in r1 . split ( "|" ) ] or [ None ]
r2 = [ x != "-" and x or None for x in r2 . split ( "|" ) ] or [ None ]
r2 = [ x is not None and x . split ( _UID_SEPARATOR ) [ - 1 ] or x for x in r2 ]
if len ( r1 ) < len ( r2 ) :
r1 = r1 + r1 * ( len ( r2 ) - len ( r1 ) )
# [1 ] [ " SBJ " , " OBJ " ] = > " SBJ - 1 ; OBJ - 1"
if len ( r2 ) < len ( r1 ) :
r2 = r2 + r2 * ( len ( r1 ) - len ( r2 ) )
# [2,4 ] [ " OBJ " ] = > " OBJ - 2 ; OBJ - 4"
return ";" . join ( [ "-" . join ( [ x for x in ( type , r1 , r2 ) if x ] ) for r1 , r2 in zip ( r1 , r2 ) ] )
|
def find_credential ( self , url ) :
"""If the URL indicated appears to be a repository defined in this
config , return the credential for that repository ."""
|
for repository , cred in self . creds_by_repository . items ( ) :
if url . startswith ( repository ) :
return cred
|
def read ( self , entity = None , attrs = None , ignore = None , params = None ) :
"""Ignore usergroup from read and alter auth _ source _ ldap with auth _ source"""
|
if entity is None :
entity = type ( self ) ( self . _server_config , usergroup = self . usergroup , # pylint : disable = no - member
)
if ignore is None :
ignore = set ( )
ignore . add ( 'usergroup' )
if attrs is None :
attrs = self . read_json ( )
attrs [ 'auth_source' ] = attrs . pop ( 'auth_source_ldap' )
return super ( ExternalUserGroup , self ) . read ( entity , attrs , ignore , params )
|
def store ( self , record , value ) :
"""Store the value in the record ."""
|
record . _values [ self . name ] [ record . id ] = value
|
def _compute_distance ( self , rup , dists , C ) :
"""equation 3 pag 1960:
` ` c31 * logR + c32 * ( R - Rref ) ` `"""
|
rref = 1.0
c31 = - 1.7
return ( c31 * np . log10 ( dists . rhypo ) + C [ 'c32' ] * ( dists . rhypo - rref ) )
|
def parse ( item ) :
r"""> > > Tag . parse ( ' x ' ) = = { ' key ' : ' x ' , ' value ' : None }
True
> > > Tag . parse ( ' x = yes ' ) = = { ' key ' : ' x ' , ' value ' : ' yes ' }
True
> > > Tag . parse ( ' x = 3 ' ) [ ' value ' ]
> > > Tag . parse ( ' x = red fox \ \ : green eggs ' ) [ ' value ' ]
' red fox ; green eggs '
> > > Tag . parse ( ' x = red fox : green eggs ' ) [ ' value ' ]
' red fox : green eggs '
> > > Tag . parse ( ' x = a \ \ nb \ \ nc ' ) [ ' value ' ]
' a \ nb \ nc '"""
|
key , sep , value = item . partition ( '=' )
value = value . replace ( '\\:' , ';' )
value = value . replace ( '\\s' , ' ' )
value = value . replace ( '\\n' , '\n' )
value = value . replace ( '\\r' , '\r' )
value = value . replace ( '\\\\' , '\\' )
value = value or None
return { 'key' : key , 'value' : value , }
|
def mainloop ( self ) :
"""Handles events and calls their handler for infinity ."""
|
while self . keep_going :
with self . lock :
if self . on_connect and not self . readable ( 2 ) :
self . on_connect ( )
self . on_connect = None
if not self . keep_going :
break
self . process_once ( )
|
def msg_curse ( self , args = None , max_width = None ) :
"""Return the dict to display in the curse interface ."""
|
# Init the return message
ret = [ ]
# Only process if stats exist and display plugin enable . . .
if not self . stats or args . disable_process :
return ret
# Compute the sort key
process_sort_key = glances_processes . sort_key
# Header
self . __msg_curse_header ( ret , process_sort_key , args )
# Process list
# Loop over processes ( sorted by the sort key previously compute )
first = True
for p in self . __sort_stats ( process_sort_key ) :
ret . extend ( self . get_process_curses_data ( p , first , args ) )
# End of extended stats
first = False
if glances_processes . process_filter is not None :
if args . reset_minmax_tag :
args . reset_minmax_tag = not args . reset_minmax_tag
self . __mmm_reset ( )
self . __msg_curse_sum ( ret , args = args )
self . __msg_curse_sum ( ret , mmm = 'min' , args = args )
self . __msg_curse_sum ( ret , mmm = 'max' , args = args )
# Return the message with decoration
return ret
|
def _parameter_constraints ( self , theta_E , gamma , q , phi_G , s_scale ) :
"""sets bounds to parameters due to numerical stability
: param theta _ E :
: param gamma :
: param q :
: param phi _ G :
: param s _ scale :
: return :"""
|
if theta_E < 0 :
theta_E = 0
if s_scale < 0.00000001 :
s_scale = 0.00000001
if gamma < 1.2 :
gamma = 1.2
theta_E = 0
if gamma > 2.9 :
gamma = 2.9
theta_E = 0
if q < 0.01 :
q = 0.01
theta_E = 0
if q > 1 :
q = 1.
theta_E = 0
return theta_E , gamma , q , phi_G , s_scale
|
def check_marginal_likelihoods ( tree , feature ) :
"""Sanity check : combined bottom - up and top - down likelihood of each node of the tree must be the same .
: param tree : ete3 . Tree , the tree of interest
: param feature : str , character for which the likelihood is calculated
: return : void , stores the node marginal likelihoods in the get _ personalised _ feature _ name ( feature , LH ) feature ."""
|
lh_feature = get_personalized_feature_name ( feature , LH )
lh_sf_feature = get_personalized_feature_name ( feature , LH_SF )
for node in tree . traverse ( ) :
if not node . is_root ( ) and not ( node . is_leaf ( ) and node . dist == 0 ) :
node_loglh = np . log10 ( getattr ( node , lh_feature ) . sum ( ) ) - getattr ( node , lh_sf_feature )
parent_loglh = np . log10 ( getattr ( node . up , lh_feature ) . sum ( ) ) - getattr ( node . up , lh_sf_feature )
assert ( round ( node_loglh , 2 ) == round ( parent_loglh , 2 ) )
|
def bounds ( sceneid ) :
"""Retrieve image bounds .
Attributes
sceneid : str
Sentinel - 2 sceneid .
Returns
out : dict
dictionary with image bounds ."""
|
scene_params = _sentinel_parse_scene_id ( sceneid )
sentinel_address = "{}/{}" . format ( SENTINEL_BUCKET , scene_params [ "key" ] )
with rasterio . open ( "{}/preview.jp2" . format ( sentinel_address ) ) as src :
wgs_bounds = transform_bounds ( * [ src . crs , "epsg:4326" ] + list ( src . bounds ) , densify_pts = 21 )
info = { "sceneid" : sceneid }
info [ "bounds" ] = list ( wgs_bounds )
return info
|
def humanize ( t ) :
"""> > > print humanize ( 0)
now
> > > print humanize ( 1)
in a minute
> > > print humanize ( 60)
in a minute
> > > print humanize ( 61)
in 2 minutes
> > > print humanize ( 3600)
in an hour
> > > print humanize ( 3601)
in 2 hours"""
|
m , s = divmod ( t , 60 )
if s :
m += 1
# ceil minutes
h , m = divmod ( m , 60 )
if m and h :
h += 1
# ceil hours
# d , h = divmod ( h , 24)
if h > 1 :
res = 'in %d hours' % h
elif h == 1 :
res = 'in an hour'
else :
if m > 1 :
res = 'in %d minutes' % m
elif m == 1 :
res = 'in a minute'
else :
res = 'now'
return res
|
def p_type_def ( self , p ) :
'''type _ def : IDENT COLON OBJECT SEMI
| IDENT COLON LCURLY enum _ list RCURLY SEMI'''
|
if len ( p ) == 5 :
p [ 0 ] = ( p [ 1 ] , p [ 3 ] )
elif len ( p ) == 7 :
p [ 0 ] = ( p [ 1 ] , p [ 4 ] )
|
def get_remainder_set ( self , j ) :
"""Return the set of children with indices less than j of all ancestors
of j . The set C from ( arXiv : 1701.07072 ) .
: param int j : fermionic site index
: return : children of j - ancestors , with indices less than j
: rtype : list ( FenwickNode )"""
|
result = [ ]
ancestors = self . get_update_set ( j )
# This runs in O ( log ( N ) log ( N ) ) where N is the number of qubits .
for a in ancestors :
for c in a . children :
if c . index < j :
result . append ( c )
return result
|
def get_effective_rlzs ( rlzs ) :
"""Group together realizations with the same unique identifier ( uid )
and yield the first representative of each group ."""
|
effective = [ ]
for uid , group in groupby ( rlzs , operator . attrgetter ( 'uid' ) ) . items ( ) :
rlz = group [ 0 ]
if all ( path == '@' for path in rlz . lt_uid ) : # empty realization
continue
effective . append ( Realization ( rlz . value , sum ( r . weight for r in group ) , rlz . lt_path , rlz . ordinal , rlz . lt_uid ) )
return effective
|
def get_streaming_playlist ( cookie , path , video_type = 'M3U8_AUTO_480' ) :
'''获取流媒体 ( 通常是视频 ) 的播放列表 .
默认得到的是m3u8格式的播放列表 , 因为它最通用 .
path - 视频的绝对路径
video _ type - 视频格式 , 可以根据网速及片源 , 选择不同的格式 .'''
|
url = '' . join ( [ const . PCS_URL , 'file?method=streaming' , '&path=' , encoder . encode_uri_component ( path ) , '&type=' , video_type , '&app_id=250528' , ] )
req = net . urlopen ( url , headers = { 'Cookie' : cookie . header_output ( ) } )
if req :
return req . data
else :
return None
|
def query_log ( self , filter = None , query = None , count = None , offset = None , sort = None , ** kwargs ) :
"""Search the query and event log .
Searches the query and event log to find query sessions that match the specified
criteria . Searching the * * logs * * endpoint uses the standard Discovery query syntax
for the parameters that are supported .
: param str filter : A cacheable query that excludes documents that don ' t mention
the query content . Filter searches are better for metadata - type searches and for
assessing the concepts in the data set .
: param str query : A query search returns all documents in your data set with full
enrichments and full text , but with the most relevant documents listed first . Use
a query search when you want to find the most relevant search results . You cannot
use * * natural _ language _ query * * and * * query * * at the same time .
: param int count : Number of results to return . The maximum for the * * count * * and
* * offset * * values together in any one query is * * 10000 * * .
: param int offset : The number of query results to skip at the beginning . For
example , if the total number of results that are returned is 10 and the offset is
8 , it returns the last two results . The maximum for the * * count * * and * * offset * *
values together in any one query is * * 10000 * * .
: param list [ str ] sort : A comma - separated list of fields in the document to sort
on . You can optionally specify a sort direction by prefixing the field with ` - `
for descending or ` + ` for ascending . Ascending is the default sort direction if no
prefix is specified .
: param dict headers : A ` dict ` containing the request headers
: return : A ` DetailedResponse ` containing the result , headers and HTTP status code .
: rtype : DetailedResponse"""
|
headers = { }
if 'headers' in kwargs :
headers . update ( kwargs . get ( 'headers' ) )
sdk_headers = get_sdk_headers ( 'discovery' , 'V1' , 'query_log' )
headers . update ( sdk_headers )
params = { 'version' : self . version , 'filter' : filter , 'query' : query , 'count' : count , 'offset' : offset , 'sort' : self . _convert_list ( sort ) }
url = '/v1/logs'
response = self . request ( method = 'GET' , url = url , headers = headers , params = params , accept_json = True )
return response
|
def get_vocab ( docs ) :
"""Build a DataFrame containing all the words in the docs provided along with their POS tags etc
> > > doc = nlp ( " Hey Mr . Tangerine Man ! " )
< BLANKLINE >
> > > get _ vocab ( [ doc ] )
word pos tag dep ent _ type ent _ iob sentiment
0 ! PUNCT . punct O 0.0
1 Hey INTJ UH intj O 0.0
2 Man NOUN NN ROOT PERSON I 0.0
3 Mr . PROPN NNP compound O 0.0
4 Tangerine PROPN NNP compound PERSON B 0.0"""
|
if isinstance ( docs , spacy . tokens . doc . Doc ) :
return get_vocab ( [ docs ] )
vocab = set ( )
for doc in tqdm ( docs ) :
for tok in doc :
vocab . add ( ( tok . text , tok . pos_ , tok . tag_ , tok . dep_ , tok . ent_type_ , tok . ent_iob_ , tok . sentiment ) )
# TODO : add ent type info and other flags , e . g . like _ url , like _ email , etc
return pd . DataFrame ( sorted ( vocab ) , columns = 'word pos tag dep ent_type ent_iob sentiment' . split ( ) )
|
def all_from ( cls , * args , ** kwargs ) :
"""Query for items passing PyQuery args explicitly ."""
|
pq_items = cls . _get_items ( * args , ** kwargs )
return [ cls ( item = i ) for i in pq_items . items ( ) ]
|
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_date_and_time_info ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
logical_chassis_fwdl_status = ET . Element ( "logical_chassis_fwdl_status" )
config = logical_chassis_fwdl_status
output = ET . SubElement ( logical_chassis_fwdl_status , "output" )
cluster_fwdl_entries = ET . SubElement ( output , "cluster-fwdl-entries" )
fwdl_entries = ET . SubElement ( cluster_fwdl_entries , "fwdl-entries" )
date_and_time_info = ET . SubElement ( fwdl_entries , "date-and-time-info" )
date_and_time_info . text = kwargs . pop ( 'date_and_time_info' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def Metropolis ( self , compute_target , mh_options ) :
"""Performs a certain number of Metropolis steps .
Parameters
compute _ target : function
computes the target density for the proposed values
mh _ options : dict
+ ' type _ prop ' : { ' random _ walk ' , ' independent ' }
type of proposal : either Gaussian random walk , or independent Gaussian
+ ' adaptive ' : bool
If True , the covariance matrix of the random walk proposal is
set to a ` rw _ scale ` times the weighted cov matrix of the particle
sample ( ignored if proposal is independent )
+ ' rw _ scale ' : float ( default = None )
see above ( ignored if proposal is independent )
+ ' indep _ scale ' : float ( default = 1.1)
for an independent proposal , the proposal distribution is
Gaussian with mean set to the particle mean , cov set to
` indep _ scale ` times particle covariance
+ ' nsteps ' : int ( default : 0)
number of steps ; if 0 , the number of steps is chosen adaptively
as follows : we stop when the average distance between the
starting points and the stopping points increase less than a
certain fraction
+ ' delta _ dist ' : float ( default : 0.1)
threshold for when nsteps = 0"""
|
opts = mh_options . copy ( )
nsteps = opts . pop ( 'nsteps' , 0 )
delta_dist = opts . pop ( 'delta_dist' , 0.1 )
proposal = self . choose_proposal ( ** opts )
xout = self . copy ( )
xp = self . __class__ ( theta = np . empty_like ( self . theta ) )
step_ars = [ ]
for _ in self . mcmc_iterate ( nsteps , self . arr , xout . arr , delta_dist ) :
xp . arr [ : , : ] , delta_lp = proposal . step ( xout . arr )
compute_target ( xp )
lp_acc = xp . lpost - xout . lpost + delta_lp
accept = ( np . log ( stats . uniform . rvs ( size = self . N ) ) < lp_acc )
xout . copyto ( xp , where = accept )
step_ars . append ( np . mean ( accept ) )
xout . acc_rates = self . acc_rates + [ step_ars ]
return xout
|
def send_result ( self , return_code , output , service_description = '' , time_stamp = 0 , specific_servers = None ) :
'''Send result to the Skinken WS'''
|
if time_stamp == 0 :
time_stamp = int ( time . time ( ) )
if specific_servers == None :
specific_servers = self . servers
else :
specific_servers = set ( self . servers ) . intersection ( specific_servers )
for server in specific_servers :
post_data = { }
post_data [ 'time_stamp' ] = time_stamp
post_data [ 'host_name' ] = self . servers [ server ] [ 'custom_fqdn' ]
post_data [ 'service_description' ] = service_description
post_data [ 'return_code' ] = return_code
post_data [ 'output' ] = output
if self . servers [ server ] [ 'availability' ] :
url = '%s://%s:%s%s' % ( self . servers [ server ] [ 'protocol' ] , self . servers [ server ] [ 'host' ] , self . servers [ server ] [ 'port' ] , self . servers [ server ] [ 'uri' ] )
auth = ( self . servers [ server ] [ 'username' ] , self . servers [ server ] [ 'password' ] )
try :
response = requests . post ( url , auth = auth , headers = self . http_headers , verify = self . servers [ server ] [ 'verify' ] , timeout = self . servers [ server ] [ 'timeout' ] , data = post_data )
if response . status_code == 400 :
LOG . error ( "[ws_shinken][%s]: HTTP status: %s - The content of the WebService call is incorrect" , server , response . status_code )
elif response . status_code == 401 :
LOG . error ( "[ws_shinken][%s]: HTTP status: %s - You must provide an username and password" , server , response . status_code )
elif response . status_code == 403 :
LOG . error ( "[ws_shinken][%s]: HTTP status: %s - The username or password is wrong" , server , response . status_code )
elif response . status_code != 200 :
LOG . error ( "[ws_shinken][%s]: HTTP status: %s" , server , response . status_code )
except ( requests . ConnectionError , requests . Timeout ) , error :
self . servers [ server ] [ 'availability' ] = False
LOG . error ( error )
else :
LOG . error ( "[ws_shinken][%s]: Data not sent, server is unavailable" , server )
if self . servers [ server ] [ 'availability' ] == False and self . servers [ server ] [ 'cache' ] == True :
self . servers [ server ] [ 'csv' ] . writerow ( post_data )
LOG . info ( "[ws_shinken][%s]: Data cached" , server )
|
def get_validation_data ( doc ) :
"""Validate the docstring .
Parameters
doc : Docstring
A Docstring object with the given function name .
Returns
tuple
errors : list of tuple
Errors occurred during validation .
warnings : list of tuple
Warnings occurred during validation .
examples _ errs : str
Examples usage displayed along the error , otherwise empty string .
Notes
The errors codes are defined as :
- First two characters : Section where the error happens :
* GL : Global ( no section , like section ordering errors )
* SS : Short summary
* ES : Extended summary
* PR : Parameters
* RT : Returns
* YD : Yields
* RS : Raises
* WN : Warns
* SA : See Also
* NT : Notes
* RF : References
* EX : Examples
- Last two characters : Numeric error code inside the section
For example , EX02 is the second codified error in the Examples section
( which in this case is assigned to examples that do not pass the tests ) .
The error codes , their corresponding error messages , and the details on how
they are validated , are not documented more than in the source code of this
function ."""
|
errs = [ ]
wrns = [ ]
if not doc . raw_doc :
errs . append ( error ( 'GL08' ) )
return errs , wrns , ''
if doc . start_blank_lines != 1 :
errs . append ( error ( 'GL01' ) )
if doc . end_blank_lines != 1 :
errs . append ( error ( 'GL02' ) )
if doc . double_blank_lines :
errs . append ( error ( 'GL03' ) )
mentioned_errs = doc . mentioned_private_classes
if mentioned_errs :
errs . append ( error ( 'GL04' , mentioned_private_classes = ', ' . join ( mentioned_errs ) ) )
for line in doc . raw_doc . splitlines ( ) :
if re . match ( "^ *\t" , line ) :
errs . append ( error ( 'GL05' , line_with_tabs = line . lstrip ( ) ) )
unexpected_sections = [ section for section in doc . section_titles if section not in ALLOWED_SECTIONS ]
for section in unexpected_sections :
errs . append ( error ( 'GL06' , section = section , allowed_sections = ', ' . join ( ALLOWED_SECTIONS ) ) )
correct_order = [ section for section in ALLOWED_SECTIONS if section in doc . section_titles ]
if correct_order != doc . section_titles :
errs . append ( error ( 'GL07' , correct_sections = ', ' . join ( correct_order ) ) )
if ( doc . deprecated_with_directive and not doc . extended_summary . startswith ( '.. deprecated:: ' ) ) :
errs . append ( error ( 'GL09' ) )
if not doc . summary :
errs . append ( error ( 'SS01' ) )
else :
if not doc . summary [ 0 ] . isupper ( ) :
errs . append ( error ( 'SS02' ) )
if doc . summary [ - 1 ] != '.' :
errs . append ( error ( 'SS03' ) )
if doc . summary != doc . summary . lstrip ( ) :
errs . append ( error ( 'SS04' ) )
elif ( doc . is_function_or_method and doc . summary . split ( ' ' ) [ 0 ] [ - 1 ] == 's' ) :
errs . append ( error ( 'SS05' ) )
if doc . num_summary_lines > 1 :
errs . append ( error ( 'SS06' ) )
if not doc . extended_summary :
wrns . append ( ( 'ES01' , 'No extended summary found' ) )
# PR01 : Parameters not documented
# PR02 : Unknown parameters
# PR03 : Wrong parameters order
errs += doc . parameter_mismatches
for param in doc . doc_parameters :
if not param . startswith ( "*" ) : # Check can ignore var / kwargs
if not doc . parameter_type ( param ) :
if ':' in param :
errs . append ( error ( 'PR10' , param_name = param . split ( ':' ) [ 0 ] ) )
else :
errs . append ( error ( 'PR04' , param_name = param ) )
else :
if doc . parameter_type ( param ) [ - 1 ] == '.' :
errs . append ( error ( 'PR05' , param_name = param ) )
common_type_errors = [ ( 'integer' , 'int' ) , ( 'boolean' , 'bool' ) , ( 'string' , 'str' ) ]
for wrong_type , right_type in common_type_errors :
if wrong_type in doc . parameter_type ( param ) :
errs . append ( error ( 'PR06' , param_name = param , right_type = right_type , wrong_type = wrong_type ) )
if not doc . parameter_desc ( param ) :
errs . append ( error ( 'PR07' , param_name = param ) )
else :
if not doc . parameter_desc ( param ) [ 0 ] . isupper ( ) :
errs . append ( error ( 'PR08' , param_name = param ) )
if doc . parameter_desc ( param ) [ - 1 ] != '.' :
errs . append ( error ( 'PR09' , param_name = param ) )
if doc . is_function_or_method :
if not doc . returns :
if doc . method_returns_something :
errs . append ( error ( 'RT01' ) )
else :
if len ( doc . returns ) == 1 and doc . returns [ 0 ] . name :
errs . append ( error ( 'RT02' ) )
for name_or_type , type_ , desc in doc . returns :
if not desc :
errs . append ( error ( 'RT03' ) )
else :
desc = ' ' . join ( desc )
if not desc [ 0 ] . isupper ( ) :
errs . append ( error ( 'RT04' ) )
if not desc . endswith ( '.' ) :
errs . append ( error ( 'RT05' ) )
if not doc . yields and 'yield' in doc . method_source :
errs . append ( error ( 'YD01' ) )
if not doc . see_also :
wrns . append ( error ( 'SA01' ) )
else :
for rel_name , rel_desc in doc . see_also . items ( ) :
if rel_desc :
if not rel_desc . endswith ( '.' ) :
errs . append ( error ( 'SA02' , reference_name = rel_name ) )
if not rel_desc [ 0 ] . isupper ( ) :
errs . append ( error ( 'SA03' , reference_name = rel_name ) )
else :
errs . append ( error ( 'SA04' , reference_name = rel_name ) )
if rel_name . startswith ( 'pandas.' ) :
errs . append ( error ( 'SA05' , reference_name = rel_name , right_reference = rel_name [ len ( 'pandas.' ) : ] ) )
examples_errs = ''
if not doc . examples :
wrns . append ( error ( 'EX01' ) )
else :
examples_errs = doc . examples_errors
if examples_errs :
errs . append ( error ( 'EX02' , doctest_log = examples_errs ) )
for err in doc . validate_pep8 ( ) :
errs . append ( error ( 'EX03' , error_code = err . error_code , error_message = err . message , times_happening = ' ({} times)' . format ( err . count ) if err . count > 1 else '' ) )
examples_source_code = '' . join ( doc . examples_source_code )
for wrong_import in ( 'numpy' , 'pandas' ) :
if 'import {}' . format ( wrong_import ) in examples_source_code :
errs . append ( error ( 'EX04' , imported_library = wrong_import ) )
return errs , wrns , examples_errs
|
def size ( self , width = None , height = None ) :
u'''Set / get window size .'''
|
info = CONSOLE_SCREEN_BUFFER_INFO ( )
status = self . GetConsoleScreenBufferInfo ( self . hout , byref ( info ) )
if not status :
return None
if width is not None and height is not None :
wmin = info . srWindow . Right - info . srWindow . Left + 1
hmin = info . srWindow . Bottom - info . srWindow . Top + 1
# print wmin , hmin
width = max ( width , wmin )
height = max ( height , hmin )
# print width , height
self . SetConsoleScreenBufferSize ( self . hout , self . fixcoord ( width , height ) )
else :
return ( info . dwSize . X , info . dwSize . Y )
|
def dump ( self , * args , ** kwargs ) :
"""Dumps a representation of the Model on standard output ."""
|
lxml . etree . dump ( self . _obj , * args , ** kwargs )
|
def send_response_only ( self , code , message = None ) :
"""Send the response header only ."""
|
if message is None :
if code in self . responses :
message = self . responses [ code ] [ 0 ]
else :
message = ''
if self . request_version != 'HTTP/0.9' :
if not hasattr ( self , '_headers_buffer' ) :
self . _headers_buffer = [ ]
self . _headers_buffer . append ( ( "%s %d %s\r\n" % ( self . protocol_version , code , message ) ) . encode ( 'latin-1' , 'strict' ) )
|
def _add ( self , replace , section , name , * args ) :
"""Add records . The first argument is the replace mode . If
false , RRs are added to an existing RRset ; if true , the RRset
is replaced with the specified contents . The second
argument is the section to add to . The third argument
is always a name . The other arguments can be :
- rdataset . . .
- ttl , rdata . . .
- ttl , rdtype , string . . ."""
|
if isinstance ( name , ( str , unicode ) ) :
name = dns . name . from_text ( name , None )
if isinstance ( args [ 0 ] , dns . rdataset . Rdataset ) :
for rds in args :
if replace :
self . delete ( name , rds . rdtype )
for rd in rds :
self . _add_rr ( name , rds . ttl , rd , section = section )
else :
args = list ( args )
ttl = int ( args . pop ( 0 ) )
if isinstance ( args [ 0 ] , dns . rdata . Rdata ) :
if replace :
self . delete ( name , args [ 0 ] . rdtype )
for rd in args :
self . _add_rr ( name , ttl , rd , section = section )
else :
rdtype = args . pop ( 0 )
if isinstance ( rdtype , str ) :
rdtype = dns . rdatatype . from_text ( rdtype )
if replace :
self . delete ( name , rdtype )
for s in args :
rd = dns . rdata . from_text ( self . zone_rdclass , rdtype , s , self . origin )
self . _add_rr ( name , ttl , rd , section = section )
|
def create_route ( self , route_table_id , destination_cidr_block , gateway_id = None , instance_id = None ) :
"""Creates a new route in the route table within a VPC . The route ' s target
can be either a gateway attached to the VPC or a NAT instance in the
VPC .
: type route _ table _ id : str
: param route _ table _ id : The ID of the route table for the route .
: type destination _ cidr _ block : str
: param destination _ cidr _ block : The CIDR address block used for the
destination match .
: type gateway _ id : str
: param gateway _ id : The ID of the gateway attached to your VPC .
: type instance _ id : str
: param instance _ id : The ID of a NAT instance in your VPC .
: rtype : bool
: return : True if successful"""
|
params = { 'RouteTableId' : route_table_id , 'DestinationCidrBlock' : destination_cidr_block }
if gateway_id is not None :
params [ 'GatewayId' ] = gateway_id
elif instance_id is not None :
params [ 'InstanceId' ] = instance_id
return self . get_status ( 'CreateRoute' , params )
|
def transform_single ( self , data , center , i = 0 ) :
"""Compute entries of ` data ` in hypercube centered at ` center `
Parameters
data : array - like
Data to find in entries in cube . Warning : first column must be index column .
center : array - like
Center points for the cube . Cube is found as all data in ` [ center - self . radius _ , center + self . radius _ ] `
i : int , default 0
Optional counter to aid in verbose debugging ."""
|
lowerbounds , upperbounds = center - self . radius_ , center + self . radius_
# Slice the hypercube
entries = ( data [ : , self . di_ ] >= lowerbounds ) & ( data [ : , self . di_ ] <= upperbounds )
hypercube = data [ np . invert ( np . any ( entries == False , axis = 1 ) ) ]
if self . verbose > 1 :
print ( "There are %s points in cube %s/%s" % ( hypercube . shape [ 0 ] , i + 1 , len ( self . centers_ ) ) )
return hypercube
|
def has_nested_keys ( mapping , k1 , * more ) :
"""Return ` ` True ` ` if ` mapping [ k1 ] [ k2 ] . . . [ kN ] ` is valid .
Example : :
. . . ' x ' : 0,
. . . ' z ' : 1,
. . . ' b ' : 3
> > > has _ nested _ keys ( D , ' a ' , ' x ' )
True
> > > has _ nested _ keys ( D , ' a ' , ' y ' , ' z ' )
True
> > > has _ nested _ keys ( D , ' a ' , ' q ' )
False
When a single key is passed , this is just another way of writing ` ` k1 in
mapping ` ` : :
> > > has _ nested _ keys ( D , ' b ' )
True"""
|
if k1 in mapping :
if more :
return has_nested_keys ( mapping [ k1 ] , * more )
else :
return True
else :
return False
|
def length ( self ) :
"""The total discretized length of every entity .
Returns
length : float , summed length of every entity"""
|
length = float ( sum ( i . length ( self . vertices ) for i in self . entities ) )
return length
|
def rst_to_json ( text ) :
"""I convert Restructured Text with field lists into Dictionaries !
TODO : Convert to text node approach ."""
|
records = [ ]
last_type = None
key = None
data = { }
directive = False
lines = text . splitlines ( )
for index , line in enumerate ( lines ) : # check for directives
if len ( line ) and line . strip ( ) . startswith ( ".." ) :
directive = True
continue
# set the title
if len ( line ) and ( line [ 0 ] in string . ascii_letters or line [ 0 ] . isdigit ( ) ) :
directive = False
try :
if lines [ index + 1 ] [ 0 ] not in DIVIDERS :
continue
except IndexError :
continue
data = text_cleanup ( data , key , last_type )
data = { "title" : line . strip ( ) }
records . append ( data )
continue
# Grab standard fields ( int , string , float )
if len ( line ) and line [ 0 ] . startswith ( ":" ) :
data = text_cleanup ( data , key , last_type )
index = line . index ( ":" , 1 )
key = line [ 1 : index ]
value = line [ index + 1 : ] . strip ( )
data [ key ] , last_type = type_converter ( value )
directive = False
continue
# Work on multi - line strings
if len ( line ) and line [ 0 ] . startswith ( " " ) and directive == False :
if not isinstance ( data [ key ] , str ) : # Not a string so continue on
continue
value = line . strip ( )
if not len ( value ) : # empty string , continue on
continue
# add next line
data [ key ] += "\n{}" . format ( value )
continue
if last_type == STRING_TYPE and not len ( line ) :
if key in data . keys ( ) :
data [ key ] += "\n"
return json . dumps ( records )
|
def _CreateImage ( media_service , opener , url ) :
"""Creates an image and uploads it to the server .
Args :
media _ service : a SudsServiceProxy instance for AdWords ' s MediaService .
opener : an OpenerDirector instance .
url : a str URL used to load image data .
Returns :
The image that was successfully uploaded ."""
|
# Note : The utf - 8 decode is for 2to3 Python 3 compatibility .
image_data = opener . open ( url ) . read ( ) . decode ( 'utf-8' )
image = { 'type' : 'IMAGE' , 'data' : image_data , 'xsi_type' : 'Image' }
return media_service . upload ( image ) [ 0 ]
|
def run_subprocess ( command ) :
"""command is the command to run , as a string .
runs a subprocess , returns stdout and stderr from the subprocess as strings ."""
|
x = subprocess . Popen ( command , shell = True , stdout = subprocess . PIPE , stderr = subprocess . PIPE )
out , err = x . communicate ( )
out = out . decode ( 'utf-8' )
err = err . decode ( 'utf-8' )
if x . returncode != 0 :
print ( 'STDERR from called program: {}' . format ( err ) )
print ( 'STDOUT from called program: {}' . format ( out ) )
raise subprocess . CalledProcessError ( x . returncode , command )
return out , err
|
def initialize_form ( self , instance , model , data = None , extra = None ) :
"""Takes a " finalized " query and generate it ' s form data"""
|
model_fields = self . get_fields_from_model ( model , self . _filter_fields )
forms = [ ]
if instance :
for field_data in instance . list_fields ( ) :
forms . append ( AdvancedFilterQueryForm . _parse_query_dict ( field_data , model ) )
formset = AFQFormSetNoExtra if not extra else AFQFormSet
self . fields_formset = formset ( data = data , initial = forms or None , model_fields = model_fields )
|
def from_xyz_string ( xyz_string ) :
"""Args :
xyz _ string : string of the form ' x , y , z ' , ' - x , - y , z ' ,
' - 2y + 1/2 , 3x + 1/2 , z - y + 1/2 ' , etc .
Returns :
SymmOp"""
|
rot_matrix = np . zeros ( ( 3 , 3 ) )
trans = np . zeros ( 3 )
toks = xyz_string . strip ( ) . replace ( " " , "" ) . lower ( ) . split ( "," )
re_rot = re . compile ( r"([+-]?)([\d\.]*)/?([\d\.]*)([x-z])" )
re_trans = re . compile ( r"([+-]?)([\d\.]+)/?([\d\.]*)(?![x-z])" )
for i , tok in enumerate ( toks ) : # build the rotation matrix
for m in re_rot . finditer ( tok ) :
factor = - 1 if m . group ( 1 ) == "-" else 1
if m . group ( 2 ) != "" :
factor *= float ( m . group ( 2 ) ) / float ( m . group ( 3 ) ) if m . group ( 3 ) != "" else float ( m . group ( 2 ) )
j = ord ( m . group ( 4 ) ) - 120
rot_matrix [ i , j ] = factor
# build the translation vector
for m in re_trans . finditer ( tok ) :
factor = - 1 if m . group ( 1 ) == "-" else 1
num = float ( m . group ( 2 ) ) / float ( m . group ( 3 ) ) if m . group ( 3 ) != "" else float ( m . group ( 2 ) )
trans [ i ] = num * factor
return SymmOp . from_rotation_and_translation ( rot_matrix , trans )
|
def peek ( self , size = - 1 ) :
"""Return buffered data without advancing the file position .
Always returns at least one byte of data , unless at EOF .
The exact number of bytes returned is unspecified ."""
|
self . _check_can_read ( )
if self . _mode == _MODE_READ_EOF or not self . _fill_buffer ( ) :
return b""
return self . _buffer
|
def get_port_stats ( port ) :
"""Iterate over connections and count states for specified port
: param port : port for which stats are collected
: return : Counter with port states"""
|
cnts = defaultdict ( int )
for c in psutil . net_connections ( ) :
c_port = c . laddr [ 1 ]
if c_port != port :
continue
status = c . status . lower ( )
cnts [ status ] += 1
return cnts
|
def encode ( cls , value ) :
"""convert a boolean value into something we can persist to redis .
An empty string is the representation for False .
: param value : bool
: return : bytes"""
|
if value not in [ True , False ] :
raise InvalidValue ( 'not a boolean' )
return b'1' if value else b''
|
def stop ( self ) :
"""Stop execution of all current and future payloads"""
|
if not self . running . wait ( 0.2 ) :
return
self . _logger . debug ( 'runner disabled: %s' , self )
with self . _lock :
self . running . clear ( )
self . _stopped . wait ( )
|
def _worker_fn ( samples , batchify_fn , dataset = None ) :
"""Function for processing data in worker process ."""
|
# pylint : disable = unused - argument
# it is required that each worker process has to fork a new MXIndexedRecordIO handle
# preserving dataset as global variable can save tons of overhead and is safe in new process
global _worker_dataset
batch = batchify_fn ( [ _worker_dataset [ i ] for i in samples ] )
buf = io . BytesIO ( )
ForkingPickler ( buf , pickle . HIGHEST_PROTOCOL ) . dump ( batch )
return buf . getvalue ( )
|
def delete_individual ( self , ind_obj ) :
"""Delete a case from the database
Args :
ind _ obj ( puzzle . models . Individual ) : initialized individual model"""
|
logger . info ( "Deleting individual {0} from database" . format ( ind_obj . ind_id ) )
self . session . delete ( ind_obj )
self . save ( )
return ind_obj
|
def update_edge_todo ( self , elev_fn , dem_proc ) :
"""Can figure out how to update the todo based on the elev filename"""
|
for key in self . edges [ elev_fn ] . keys ( ) :
self . edges [ elev_fn ] [ key ] . set_data ( 'todo' , data = dem_proc . edge_todo )
|
def getVariances ( self ) :
"""get variances"""
|
var = [ ]
var . append ( self . Cr . K ( ) . diagonal ( ) )
if self . bgRE :
var . append ( self . Cg . K ( ) . diagonal ( ) )
var . append ( self . Cn . K ( ) . diagonal ( ) )
var = sp . array ( var )
return var
|
def path ( self , which = None ) :
"""Extend ` ` nailgun . entity _ mixins . Entity . path ` ` .
The format of the returned path depends on the value of ` ` which ` ` :
bulk _ resume
/ foreman _ tasks / api / tasks / bulk _ resume
bulk _ search
/ foreman _ tasks / api / tasks / bulk _ search
summary
/ foreman _ tasks / api / tasks / summary
Otherwise , call ` ` super ` ` ."""
|
if which in ( 'bulk_resume' , 'bulk_search' , 'summary' ) :
return '{0}/{1}' . format ( super ( ForemanTask , self ) . path ( 'base' ) , which )
return super ( ForemanTask , self ) . path ( which )
|
def action_setattr ( ) :
"""Creates a setter that will set the action attribute with context ' s key
for name to the current value ."""
|
def action_setattr ( value , context , ** _params ) :
setattr ( context [ "action" ] , context [ "key" ] , value )
return _attr ( )
return action_setattr
|
def watch ( self , resource , namespace = None , name = None , label_selector = None , field_selector = None , resource_version = None , timeout = None ) :
"""Stream events for a resource from the Kubernetes API
: param resource : The API resource object that will be used to query the API
: param namespace : The namespace to query
: param name : The name of the resource instance to query
: param label _ selector : The label selector with which to filter results
: param field _ selector : The field selector with which to filter results
: param resource _ version : The version with which to filter results . Only events with
a resource _ version greater than this value will be returned
: param timeout : The amount of time in seconds to wait before terminating the stream
: return : Event object with these keys :
' type ' : The type of event such as " ADDED " , " DELETED " , etc .
' raw _ object ' : a dict representing the watched object .
' object ' : A ResourceInstance wrapping raw _ object .
Example :
client = DynamicClient ( k8s _ client )
v1 _ pods = client . resources . get ( api _ version = ' v1 ' , kind = ' Pod ' )
for e in v1 _ pods . watch ( resource _ version = 0 , namespace = default , timeout = 5 ) :
print ( e [ ' type ' ] )
print ( e [ ' object ' ] . metadata )"""
|
watcher = watch . Watch ( )
for event in watcher . stream ( resource . get , namespace = namespace , name = name , field_selector = field_selector , label_selector = label_selector , resource_version = resource_version , serialize = False , timeout_seconds = timeout ) :
event [ 'object' ] = ResourceInstance ( resource , event [ 'object' ] )
yield event
|
def delete_collection_custom_resource_definition ( self , ** kwargs ) : # noqa : E501
"""delete _ collection _ custom _ resource _ definition # noqa : E501
delete collection of CustomResourceDefinition # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . delete _ collection _ custom _ resource _ definition ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param bool include _ uninitialized : If true , partially initialized resources are included in the response .
: param str pretty : If ' true ' , then the output is pretty printed .
: param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications .
: param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything .
: param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything .
: param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned .
: param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv .
: param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity .
: param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion .
: return : V1Status
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . delete_collection_custom_resource_definition_with_http_info ( ** kwargs )
# noqa : E501
else :
( data ) = self . delete_collection_custom_resource_definition_with_http_info ( ** kwargs )
# noqa : E501
return data
|
def rect ( self ) :
"""A dictionary with the size and location of the element ."""
|
if self . _w3c :
return self . _execute ( Command . GET_ELEMENT_RECT ) [ 'value' ]
else :
rect = self . size . copy ( )
rect . update ( self . location )
return rect
|
def get_advanced_search_form ( self , data ) :
"""Hook to dynamically change the advanced search form"""
|
if self . get_advanced_search_form_class ( ) :
self . _advanced_search_form = self . get_advanced_search_form_class ( ) ( data = data )
return self . _advanced_search_form
|
def repr_args ( args ) :
"""formats a list of function arguments prettily but as working code
( kwargs are tuples ( argname , argvalue )"""
|
res = [ ]
for x in args :
if isinstance ( x , tuple ) and len ( x ) == 2 :
key , value = x
# todo : exclude this key if value is its default
res += [ "%s=%s" % ( key , repr_arg ( value ) ) ]
else :
res += [ repr_arg ( x ) ]
return ', ' . join ( res )
|
def check_venv ( self ) :
"""Ensure we ' re inside a virtualenv ."""
|
if self . zappa :
venv = self . zappa . get_current_venv ( )
else : # Just for ` init ` , when we don ' t have settings yet .
venv = Zappa . get_current_venv ( )
if not venv :
raise ClickException ( click . style ( "Zappa" , bold = True ) + " requires an " + click . style ( "active virtual environment" , bold = True , fg = "red" ) + "!\n" + "Learn more about virtual environments here: " + click . style ( "http://docs.python-guide.org/en/latest/dev/virtualenvs/" , bold = False , fg = "cyan" ) )
|
def create_from_yaml ( self , name , yamlfile ) :
"""Create new environment using conda - env via a yaml specification file .
Unlike other methods , this calls conda - env , and requires a named
environment and uses channels as defined in rcfiles .
Parameters
name : string
Environment name
yamlfile : string
Path to yaml file with package spec ( as created by conda env export"""
|
logger . debug ( str ( ( name , yamlfile ) ) )
cmd_list = [ 'env' , 'create' , '-n' , name , '-f' , yamlfile , '--json' ]
return self . _call_and_parse ( cmd_list )
|
def safe_dump ( data , abspath , pk_protocol = py23 . pk_protocol , enable_verbose = True ) :
"""A stable version of : func : ` dump ` , this method will silently overwrite
existing file .
There ' s a issue with : func : ` dump ` : If your program is interrupted while
writing , you got an incomplete file , and you also lose the original file .
So this method write pickle to a temporary file first , then rename to what
you expect , and silently overwrite old one . This way can guarantee atomic
write .
* * 中文文档 * *
在对文件进行写入时 , 如果程序中断 , 则会留下一个不完整的文件 。 如果使用了覆盖式
写入 , 则我们即没有得到新文件 , 同时也丢失了原文件 。 所以为了保证写操作的原子性
( 要么全部完成 , 要么全部都不完成 ) , 更好的方法是 : 首先将文件写入一个临时文件中 ,
完成后再讲文件重命名 , 覆盖旧文件 。 这样即使中途程序被中断 , 也仅仅是留下了一个
未完成的临时文件而已 , 不会影响原文件 。"""
|
abspath = lower_ext ( str ( abspath ) )
abspath_temp = "%s.tmp" % abspath
dump ( data , abspath_temp , pk_protocol = pk_protocol , enable_verbose = enable_verbose )
shutil . move ( abspath_temp , abspath )
|
def revert_snapshot ( name , vm_snapshot = None , cleanup = False , ** kwargs ) :
'''Revert snapshot to the previous from current ( if available ) or to the specific .
: param name : domain name
: param vm _ snapshot : name of the snapshot to revert
: param cleanup : Remove all newer than reverted snapshots . Values : True or False ( default False ) .
: param connection : libvirt connection URI , overriding defaults
. . versionadded : : 2019.2.0
: param username : username to connect with , overriding defaults
. . versionadded : : 2019.2.0
: param password : password to connect with , overriding defaults
. . versionadded : : 2019.2.0
. . versionadded : : 2016.3.0
CLI Example :
. . code - block : : bash
salt ' * ' virt . revert < domain >
salt ' * ' virt . revert < domain > < snapshot >'''
|
ret = dict ( )
conn = __get_conn ( ** kwargs )
domain = _get_domain ( conn , name )
snapshots = domain . listAllSnapshots ( )
_snapshots = list ( )
for snap_obj in snapshots :
_snapshots . append ( { 'idx' : _parse_snapshot_description ( snap_obj , unix_time = True ) [ 'created' ] , 'ptr' : snap_obj } )
snapshots = [ w_ptr [ 'ptr' ] for w_ptr in sorted ( _snapshots , key = lambda item : item [ 'idx' ] , reverse = True ) ]
del _snapshots
if not snapshots :
conn . close ( )
raise CommandExecutionError ( 'No snapshots found' )
elif len ( snapshots ) == 1 :
conn . close ( )
raise CommandExecutionError ( 'Cannot revert to itself: only one snapshot is available.' )
snap = None
for p_snap in snapshots :
if not vm_snapshot :
if p_snap . isCurrent ( ) and snapshots [ snapshots . index ( p_snap ) + 1 : ] :
snap = snapshots [ snapshots . index ( p_snap ) + 1 : ] [ 0 ]
break
elif p_snap . getName ( ) == vm_snapshot :
snap = p_snap
break
if not snap :
conn . close ( )
raise CommandExecutionError ( snapshot and 'Snapshot "{0}" not found' . format ( vm_snapshot ) or 'No more previous snapshots available' )
elif snap . isCurrent ( ) :
conn . close ( )
raise CommandExecutionError ( 'Cannot revert to the currently running snapshot.' )
domain . revertToSnapshot ( snap )
ret [ 'reverted' ] = snap . getName ( )
if cleanup :
delete = list ( )
for p_snap in snapshots :
if p_snap . getName ( ) != snap . getName ( ) :
delete . append ( p_snap . getName ( ) )
p_snap . delete ( )
else :
break
ret [ 'deleted' ] = delete
else :
ret [ 'deleted' ] = 'N/A'
conn . close ( )
return ret
|
def move_into ( self , destination_folder ) : # type : ( Folder ) - > None
"""Move the Folder into a different folder .
This makes the Folder provided a child folder of the destination _ folder .
Raises :
AuthError : Raised if Outlook returns a 401 , generally caused by an invalid or expired access token .
Args :
destination _ folder : A : class : ` Folder < pyOutlook . core . folder . Folder > ` that should become the parent
Returns :
A new : class : ` Folder < pyOutlook . core . folder . Folder > ` that is now
inside of the destination _ folder ."""
|
headers = self . headers
endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/' + self . id + '/move'
payload = '{ "DestinationId": "' + destination_folder . id + '"}'
r = requests . post ( endpoint , headers = headers , data = payload )
if check_response ( r ) :
return_folder = r . json ( )
return self . _json_to_folder ( self . account , return_folder )
|
def from_bboxes ( bboxes ) :
"""Compute a BoundingBox enclosing all specified bboxes
: param bboxes : a list of BoundingBoxes
: return : BoundingBox"""
|
north = max ( [ b . north for b in bboxes ] )
south = min ( [ b . south for b in bboxes ] )
west = min ( [ b . west for b in bboxes ] )
east = max ( [ b . east for b in bboxes ] )
return BoundingBox ( north = north , west = west , south = south , east = east )
|
def update_gradients_diag ( self , dL_dKdiag , X ) :
"""derivative of the diagonal of the covariance matrix with respect to the parameters ."""
|
self . variance . gradient = np . sum ( dL_dKdiag )
self . period . gradient = 0
self . lengthscale . gradient = 0
|
def acc_difference ( points ) :
"""Computes the accelaration difference between each adjacent point
Args :
points ( : obj : ` Point ` )
Returns :
: obj : ` list ` of int : Indexes of changepoints"""
|
data = [ 0 ]
for before , after in pairwise ( points ) :
data . append ( before . acc - after . acc )
return data
|
def cli ( env , identifier , details ) :
"""Invoices and all that mess"""
|
manager = AccountManager ( env . client )
top_items = manager . get_billing_items ( identifier )
title = "Invoice %s" % identifier
table = formatting . Table ( [ "Item Id" , "Category" , "Description" , "Single" , "Monthly" , "Create Date" , "Location" ] , title = title )
table . align [ 'category' ] = 'l'
table . align [ 'description' ] = 'l'
for item in top_items :
fqdn = "%s.%s" % ( item . get ( 'hostName' , '' ) , item . get ( 'domainName' , '' ) )
# category id = 2046 , ram _ usage doesn ' t have a name . . .
category = utils . lookup ( item , 'category' , 'name' ) or item . get ( 'categoryCode' )
description = nice_string ( item . get ( 'description' ) )
if fqdn != '.' :
description = "%s (%s)" % ( item . get ( 'description' ) , fqdn )
table . add_row ( [ item . get ( 'id' ) , category , nice_string ( description ) , "$%.2f" % float ( item . get ( 'oneTimeAfterTaxAmount' ) ) , "$%.2f" % float ( item . get ( 'recurringAfterTaxAmount' ) ) , utils . clean_time ( item . get ( 'createDate' ) , out_format = "%Y-%m-%d" ) , utils . lookup ( item , 'location' , 'name' ) ] )
if details :
for child in item . get ( 'children' , [ ] ) :
table . add_row ( [ '>>>' , utils . lookup ( child , 'category' , 'name' ) , nice_string ( child . get ( 'description' ) ) , "$%.2f" % float ( child . get ( 'oneTimeAfterTaxAmount' ) ) , "$%.2f" % float ( child . get ( 'recurringAfterTaxAmount' ) ) , '---' , '---' ] )
env . fout ( table )
|
def geo_name ( self ) :
"""Return a name of the state or county , or , for other lowever levels , the
name of the level type in the county .
: return :"""
|
if self . level == 'county' :
return str ( self . county_name )
elif self . level == 'state' :
return self . state_name
else :
if hasattr ( self , 'county' ) :
return "{} in {}" . format ( self . level , str ( self . county_name ) )
elif hasattr ( self , 'state' ) :
return "{} in {}" . format ( self . level , self . state_name )
else :
return "a {}" . format ( self . level )
|
def open_tunnel ( * args , ** kwargs ) :
"""Open an SSH Tunnel , wrapper for : class : ` SSHTunnelForwarder `
Arguments :
destination ( Optional [ tuple ] ) :
SSH server ' s IP address and port in the format
( ` ` ssh _ address ` ` , ` ` ssh _ port ` ` )
Keyword Arguments :
debug _ level ( Optional [ int or str ] ) :
log level for : class : ` logging . Logger ` instance , i . e . ` ` DEBUG ` `
skip _ tunnel _ checkup ( boolean ) :
Enable / disable the local side check and populate
: attr : ` ~ SSHTunnelForwarder . tunnel _ is _ up `
Default : True
. . versionadded : : 0.1.0
. . note : :
A value of ` ` debug _ level ` ` set to 1 = = ` ` TRACE ` ` enables tracing mode
. . note : :
See : class : ` SSHTunnelForwarder ` for keyword arguments
* * Example * * : :
from sshtunnel import open _ tunnel
with open _ tunnel ( SERVER ,
ssh _ username = SSH _ USER ,
ssh _ port = 22,
ssh _ password = SSH _ PASSWORD ,
remote _ bind _ address = ( REMOTE _ HOST , REMOTE _ PORT ) ,
local _ bind _ address = ( ' ' , LOCAL _ PORT ) ) as server :
def do _ something ( port ) :
pass
print ( " LOCAL PORTS : " , server . local _ bind _ port )
do _ something ( server . local _ bind _ port )"""
|
# Attach a console handler to the logger or create one if not passed
kwargs [ 'logger' ] = create_logger ( logger = kwargs . get ( 'logger' , None ) , loglevel = kwargs . pop ( 'debug_level' , None ) )
ssh_address_or_host = kwargs . pop ( 'ssh_address_or_host' , None )
# Check if deprecated arguments ssh _ address or ssh _ host were used
for deprecated_argument in [ 'ssh_address' , 'ssh_host' ] :
ssh_address_or_host = SSHTunnelForwarder . _process_deprecated ( ssh_address_or_host , deprecated_argument , kwargs )
ssh_port = kwargs . pop ( 'ssh_port' , None )
skip_tunnel_checkup = kwargs . pop ( 'skip_tunnel_checkup' , True )
if not args :
if isinstance ( ssh_address_or_host , tuple ) :
args = ( ssh_address_or_host , )
else :
args = ( ( ssh_address_or_host , ssh_port ) , )
forwarder = SSHTunnelForwarder ( * args , ** kwargs )
forwarder . skip_tunnel_checkup = skip_tunnel_checkup
return forwarder
|
def cut_sequences_relative ( records , slices , record_id ) :
"""Cuts records to slices , indexed by non - gap positions in record _ id"""
|
with _record_buffer ( records ) as r :
try :
record = next ( i for i in r ( ) if i . id == record_id )
except StopIteration :
raise ValueError ( "Record with id {0} not found." . format ( record_id ) )
new_slices = _update_slices ( record , slices )
for record in multi_cut_sequences ( r ( ) , new_slices ) :
yield record
|
def set_legend ( self , legend ) :
"""legend needs to be a list , tuple or None"""
|
assert ( isinstance ( legend , list ) or isinstance ( legend , tuple ) or legend is None )
if legend :
self . legend = [ quote ( a ) for a in legend ]
else :
self . legend = None
|
def wait_for_port ( self , port , timeout = 10 , ** probe_kwargs ) :
"""block until specified port starts accepting connections , raises an exc ProbeTimeout
if timeout is reached
: param port : int , port number
: param timeout : int or float ( seconds ) , time to wait for establishing the connection
: param probe _ kwargs : arguments passed to Probe constructor
: return : None"""
|
Probe ( timeout = timeout , fnc = functools . partial ( self . is_port_open , port ) , ** probe_kwargs ) . run ( )
|
def get_new_T_PI_parameters ( self , event ) :
"""calcualte statisics when temperatures are selected"""
|
# remember the last saved interpretation
if "saved" in list ( self . pars . keys ( ) ) :
if self . pars [ 'saved' ] :
self . last_saved_pars = { }
for key in list ( self . pars . keys ( ) ) :
self . last_saved_pars [ key ] = self . pars [ key ]
self . pars [ 'saved' ] = False
t1 = self . tmin_box . GetValue ( )
t2 = self . tmax_box . GetValue ( )
if ( t1 == "" or t2 == "" ) :
print ( "empty interpretation bounds" )
return
if float ( t2 ) < float ( t1 ) :
print ( "upper bound less than lower bound" )
return
index_1 = self . T_list . index ( t1 )
index_2 = self . T_list . index ( t2 )
# if ( index _ 2 - index _ 1 ) + 1 > = self . acceptance _ criteria [ ' specimen _ int _ n ' ] :
if ( index_2 - index_1 ) + 1 >= 3 :
if self . Data [ self . s ] [ 'T_or_MW' ] != "MW" :
self . pars = thellier_gui_lib . get_PI_parameters ( self . Data , self . acceptance_criteria , self . preferences , self . s , float ( t1 ) + 273. , float ( t2 ) + 273. , self . GUI_log , THERMAL , MICROWAVE )
self . Data [ self . s ] [ 'pars' ] = self . pars
else :
self . pars = thellier_gui_lib . get_PI_parameters ( self . Data , self . acceptance_criteria , self . preferences , self . s , float ( t1 ) , float ( t2 ) , self . GUI_log , THERMAL , MICROWAVE )
self . Data [ self . s ] [ 'pars' ] = self . pars
self . update_GUI_with_new_interpretation ( )
self . Add_text ( self . s )
|
def stripQuotes ( value ) :
"""Strip single or double quotes off string ; remove embedded quote pairs"""
|
if value [ : 1 ] == '"' :
value = value [ 1 : ]
if value [ - 1 : ] == '"' :
value = value [ : - 1 ]
# replace " " with "
value = re . sub ( _re_doubleq2 , '"' , value )
elif value [ : 1 ] == "'" :
value = value [ 1 : ]
if value [ - 1 : ] == "'" :
value = value [ : - 1 ]
# replace ' ' with '
value = re . sub ( _re_singleq2 , "'" , value )
return value
|
def prompt_for_bilateral_choice ( self , prompt , option1 , option2 ) :
"""Prompt the user for a response that must be one of the two supplied choices .
NOTE : The user input verification is case - insensitive , but will return the original case provided
by the given options ."""
|
if prompt is None :
prompt = ''
prompt = prompt . rstrip ( ) + ' (' + option1 + '/' + option2 + ')'
while True :
user_input = self . __screen . input ( prompt )
if str ( user_input ) . lower ( ) == option1 . lower ( ) :
return option1
elif str ( user_input ) . lower ( ) == option2 . lower ( ) :
return option2
|
def get_forwarders ( resolv = "resolv.conf" ) :
"""Find the forwarders in / etc / resolv . conf , default to 8.8.8.8 and
8.8.4.4"""
|
ns = [ ]
if os . path . exists ( resolv ) :
for l in open ( resolv ) :
if l . startswith ( "nameserver" ) :
address = l . strip ( ) . split ( " " , 2 ) [ 1 ]
# forwarding to ourselves would be bad
if not address . startswith ( "127" ) :
ns . append ( address )
if not ns :
ns = [ '8.8.8.8' , '8.8.4.4' ]
return ns
|
def initialize ( self , max_batch_size : int , max_input_length : int , get_max_output_length_function : Callable ) :
"""Delayed construction of modules to ensure multiple Inference models can agree on computing a common
maximum output length .
: param max _ batch _ size : Maximum batch size .
: param max _ input _ length : Maximum input length .
: param get _ max _ output _ length _ function : Callable to compute maximum output length ."""
|
self . max_batch_size = max_batch_size
self . max_input_length = max_input_length
if self . max_input_length > self . training_max_seq_len_source :
logger . warning ( "Model was only trained with sentences up to a length of %d, " "but a max_input_len of %d is used." , self . training_max_seq_len_source , self . max_input_length )
self . get_max_output_length = get_max_output_length_function
# check the maximum supported length of the encoder & decoder :
if self . max_supported_seq_len_source is not None :
utils . check_condition ( self . max_input_length <= self . max_supported_seq_len_source , "Encoder only supports a maximum length of %d" % self . max_supported_seq_len_source )
if self . max_supported_seq_len_target is not None :
decoder_max_len = self . get_max_output_length ( max_input_length )
utils . check_condition ( decoder_max_len <= self . max_supported_seq_len_target , "Decoder only supports a maximum length of %d, but %d was requested. Note that the " "maximum output length depends on the input length and the source/target length " "ratio observed during training." % ( self . max_supported_seq_len_target , decoder_max_len ) )
self . encoder_module , self . encoder_default_bucket_key = self . _get_encoder_module ( )
self . decoder_module , self . decoder_default_bucket_key = self . _get_decoder_module ( )
max_encoder_data_shapes = self . _get_encoder_data_shapes ( self . encoder_default_bucket_key , self . max_batch_size )
max_decoder_data_shapes = self . _get_decoder_data_shapes ( self . decoder_default_bucket_key , self . max_batch_size * self . beam_size )
self . encoder_module . bind ( data_shapes = max_encoder_data_shapes , for_training = False , grad_req = "null" )
self . decoder_module . bind ( data_shapes = max_decoder_data_shapes , for_training = False , grad_req = "null" )
self . load_params_from_file ( self . params_fname )
self . encoder_module . init_params ( arg_params = self . params , aux_params = self . aux_params , allow_missing = False )
self . decoder_module . init_params ( arg_params = self . params , aux_params = self . aux_params , allow_missing = False )
if self . cache_output_layer_w_b :
if self . output_layer . weight_normalization : # precompute normalized output layer weight imperatively
assert self . output_layer . weight_norm is not None
weight = self . params [ self . output_layer . weight_norm . weight . name ] . as_in_context ( self . context )
scale = self . params [ self . output_layer . weight_norm . scale . name ] . as_in_context ( self . context )
self . output_layer_w = self . output_layer . weight_norm ( weight , scale )
else :
self . output_layer_w = self . params [ self . output_layer . w . name ] . as_in_context ( self . context )
self . output_layer_b = self . params [ self . output_layer . b . name ] . as_in_context ( self . context )
|
def _lim_moment ( self , u , order = 1 ) :
"""This method calculates the kth order limiting moment of
the distribution . It is given by -
E ( u ) = Integral ( - inf to u ) [ ( x ^ k ) * pdf ( x ) dx ] + ( u ^ k ) ( 1 - cdf ( u ) )
where , pdf is the probability density function and cdf is the
cumulative density function of the distribution .
Reference
Klugman , S . A . , Panjer , H . H . and Willmot , G . E . ,
Loss Models , From Data to Decisions , Fourth Edition ,
Wiley , definition 3.5 and equation 3.8.
Parameters
u : float
The point at which the moment is to be calculated .
order : int
The order of the moment , default is first order ."""
|
def fun ( x ) :
return np . power ( x , order ) * self . factor . pdf ( x )
return ( integrate . quad ( fun , - np . inf , u ) [ 0 ] + np . power ( u , order ) * ( 1 - self . factor . cdf ( u ) ) )
|
def set_bounds ( self , start , stop ) :
"""Sets boundaries for all instruments in constellation"""
|
for instrument in self . instruments :
instrument . bounds = ( start , stop )
|
def expand_defaults ( self , client = False , getenv = True , getshell = True ) :
"""Compile env , client _ env , shell and client _ shell commands"""
|
if not isinstance ( self . _default , six . string_types ) :
self . expanded_default = self . _default
else :
self . expanded_default = coerce ( self . type , expand_defaults ( self . _default , client , getenv , getshell ) )
|
def Start ( self , seed_list : List [ str ] = None , skip_seeds : bool = False ) -> None :
"""Start connecting to the seed list .
Args :
seed _ list : a list of host : port strings if not supplied use list from ` protocol . xxx . json `
skip _ seeds : skip connecting to seed list"""
|
if not seed_list :
seed_list = settings . SEED_LIST
logger . debug ( "Starting up nodeleader" )
if not skip_seeds :
logger . debug ( "Attempting to connect to seed list..." )
for bootstrap in seed_list :
if not is_ip_address ( bootstrap ) :
host , port = bootstrap . split ( ':' )
bootstrap = f"{hostname_to_ip(host)}:{port}"
addr = Address ( bootstrap )
self . KNOWN_ADDRS . append ( addr )
self . SetupConnection ( addr )
logger . debug ( "Starting up nodeleader: starting peer, mempool, and blockheight check loops" )
# check in on peers every 10 seconds
self . start_peer_check_loop ( )
self . start_memcheck_loop ( )
self . start_blockheight_loop ( )
if settings . ACCEPT_INCOMING_PEERS and not self . incoming_server_running :
class OneShotFactory ( Factory ) :
def __init__ ( self , leader ) :
self . leader = leader
def buildProtocol ( self , addr ) :
print ( f"building new protocol for addr: {addr}" )
self . leader . AddKnownAddress ( Address ( f"{addr.host}:{addr.port}" ) )
p = NeoNode ( incoming_client = True )
p . factory = self
return p
def listen_err ( err ) :
print ( f"Failed start listening server for reason: {err.value}" )
def listen_ok ( value ) :
self . incoming_server_running = True
logger . debug ( f"Starting up nodeleader: setting up listen server on port: {settings.NODE_PORT}" )
server_endpoint = TCP4ServerEndpoint ( self . reactor , settings . NODE_PORT )
listenport_deferred = server_endpoint . listen ( OneShotFactory ( leader = self ) )
listenport_deferred . addCallback ( listen_ok )
listenport_deferred . addErrback ( listen_err )
|
def _update_route ( dcidr , router_ip , old_router_ip , vpc_info , con , route_table_id , update_reason ) :
"""Update an existing route entry in the route table ."""
|
instance = eni = None
try :
instance , eni = find_instance_and_eni_by_ip ( vpc_info , router_ip )
logging . info ( "--- updating existing route in RT '%s' " "%s -> %s (%s, %s) (old IP: %s, reason: %s)" % ( route_table_id , dcidr , router_ip , instance . id , eni . id , old_router_ip , update_reason ) )
try :
con . replace_route ( route_table_id = route_table_id , destination_cidr_block = dcidr , instance_id = instance . id , interface_id = eni . id )
except Exception as e :
raise Exception ( "replace_route failed: %s" % str ( e ) )
CURRENT_STATE . routes [ dcidr ] = ( router_ip , str ( instance . id ) , str ( eni . id ) )
except Exception as e :
msg = "*** failed to update route in RT '%s' %s -> %s (%s)" % ( route_table_id , dcidr , old_router_ip , e . message )
update_reason += " [ERROR update route: %s]" % e . message
logging . error ( msg )
_rt_state_update ( route_table_id , dcidr , router_ip , instance . id if instance else "(none)" , eni . id if eni else "(none)" , old_router_ip , update_reason )
|
def needs ( self , reglist ) :
"""Returns if this instruction need any of the registers
in reglist ."""
|
if isinstance ( reglist , str ) :
reglist = [ reglist ]
reglist = single_registers ( reglist )
return len ( [ x for x in self . requires if x in reglist ] ) > 0
|
def _send_str ( self , cmd , args ) :
"""Format :
{ Command } { args length ( little endian ) } { str }
Length :
{4 } { 4 } { str length }"""
|
logger . debug ( "{} {}" . format ( cmd , args ) )
args = args . encode ( 'utf-8' )
le_args_len = self . _little_endian ( len ( args ) )
data = cmd . encode ( ) + le_args_len + args
logger . debug ( "Send string: {}" . format ( data ) )
self . connection . write ( data )
|
def filter_strings_by_length ( strings : list , length : int ) -> list :
"""Returns string elements from a given list that have a specific length .
Args :
strings ( list ) : A list of strings .
length ( int ) : The desired length of strings .
Returns :
list : A list of strings from the input list that match the specified length .
Examples :
> > > filter _ strings _ by _ length ( [ ' Python ' , ' list ' , ' exercises ' , ' practice ' , ' solution ' ] , 8)
[ ' practice ' , ' solution ' ]
> > > filter _ strings _ by _ length ( [ ' Python ' , ' list ' , ' exercises ' , ' practice ' , ' solution ' ] , 6)
[ ' Python ' ]
> > > filter _ strings _ by _ length ( [ ' Python ' , ' list ' , ' exercises ' , ' practice ' , ' solution ' ] , 9)
[ ' exercises ' ]"""
|
filtered_strings = [ s for s in strings if len ( s ) == length ]
return filtered_strings
|
def _expectation ( X , centers , weights , concentrations , posterior_type = "soft" ) :
"""Compute the log - likelihood of each datapoint being in each cluster .
Parameters
centers ( mu ) : array , [ n _ centers x n _ features ]
weights ( alpha ) : array , [ n _ centers , ] ( alpha )
concentrations ( kappa ) : array , [ n _ centers , ]
Returns
posterior : array , [ n _ centers , n _ examples ]"""
|
n_examples , n_features = np . shape ( X )
n_clusters , _ = centers . shape
if n_features <= 50 : # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else :
vmf_f = _vmf_log_asymptotic
f_log = np . zeros ( ( n_clusters , n_examples ) )
for cc in range ( n_clusters ) :
f_log [ cc , : ] = vmf_f ( X , concentrations [ cc ] , centers [ cc , : ] )
posterior = np . zeros ( ( n_clusters , n_examples ) )
if posterior_type == "soft" :
weights_log = np . log ( weights )
posterior = np . tile ( weights_log . T , ( n_examples , 1 ) ) . T + f_log
for ee in range ( n_examples ) :
posterior [ : , ee ] = np . exp ( posterior [ : , ee ] - logsumexp ( posterior [ : , ee ] ) )
elif posterior_type == "hard" :
weights_log = np . log ( weights )
weighted_f_log = np . tile ( weights_log . T , ( n_examples , 1 ) ) . T + f_log
for ee in range ( n_examples ) :
posterior [ np . argmax ( weighted_f_log [ : , ee ] ) , ee ] = 1.0
return posterior
|
def __restore_mbi ( self , hProcess , new_mbi , old_mbi , bSkipMappedFiles , bSkipOnError ) :
"""Used internally by L { restore _ memory _ snapshot } ."""
|
# # print " Restoring % s - % s " % (
# # HexDump . address ( old _ mbi . BaseAddress , self . get _ bits ( ) ) ,
# # HexDump . address ( old _ mbi . BaseAddress + old _ mbi . RegionSize ,
# # self . get _ bits ( ) ) )
try : # Restore the region state .
if new_mbi . State != old_mbi . State :
if new_mbi . is_free ( ) :
if old_mbi . is_reserved ( ) : # Free - > Reserved
address = win32 . VirtualAllocEx ( hProcess , old_mbi . BaseAddress , old_mbi . RegionSize , win32 . MEM_RESERVE , old_mbi . Protect )
if address != old_mbi . BaseAddress :
self . free ( address )
msg = "Error restoring region at address %s"
msg = msg % HexDump ( old_mbi . BaseAddress , self . get_bits ( ) )
raise RuntimeError ( msg )
# permissions already restored
new_mbi . Protect = old_mbi . Protect
else : # elif old _ mbi . is _ commited ( ) :
# Free - > Commited
address = win32 . VirtualAllocEx ( hProcess , old_mbi . BaseAddress , old_mbi . RegionSize , win32 . MEM_RESERVE | win32 . MEM_COMMIT , old_mbi . Protect )
if address != old_mbi . BaseAddress :
self . free ( address )
msg = "Error restoring region at address %s"
msg = msg % HexDump ( old_mbi . BaseAddress , self . get_bits ( ) )
raise RuntimeError ( msg )
# permissions already restored
new_mbi . Protect = old_mbi . Protect
elif new_mbi . is_reserved ( ) :
if old_mbi . is_commited ( ) : # Reserved - > Commited
address = win32 . VirtualAllocEx ( hProcess , old_mbi . BaseAddress , old_mbi . RegionSize , win32 . MEM_COMMIT , old_mbi . Protect )
if address != old_mbi . BaseAddress :
self . free ( address )
msg = "Error restoring region at address %s"
msg = msg % HexDump ( old_mbi . BaseAddress , self . get_bits ( ) )
raise RuntimeError ( msg )
# permissions already restored
new_mbi . Protect = old_mbi . Protect
else : # elif old _ mbi . is _ free ( ) :
# Reserved - > Free
win32 . VirtualFreeEx ( hProcess , old_mbi . BaseAddress , old_mbi . RegionSize , win32 . MEM_RELEASE )
else : # elif new _ mbi . is _ commited ( ) :
if old_mbi . is_reserved ( ) : # Commited - > Reserved
win32 . VirtualFreeEx ( hProcess , old_mbi . BaseAddress , old_mbi . RegionSize , win32 . MEM_DECOMMIT )
else : # elif old _ mbi . is _ free ( ) :
# Commited - > Free
win32 . VirtualFreeEx ( hProcess , old_mbi . BaseAddress , old_mbi . RegionSize , win32 . MEM_DECOMMIT | win32 . MEM_RELEASE )
new_mbi . State = old_mbi . State
# Restore the region permissions .
if old_mbi . is_commited ( ) and old_mbi . Protect != new_mbi . Protect :
win32 . VirtualProtectEx ( hProcess , old_mbi . BaseAddress , old_mbi . RegionSize , old_mbi . Protect )
new_mbi . Protect = old_mbi . Protect
# Restore the region data .
# Ignore write errors when the region belongs to a mapped file .
if old_mbi . has_content ( ) :
if old_mbi . Type != 0 :
if not bSkipMappedFiles :
self . poke ( old_mbi . BaseAddress , old_mbi . content )
else :
self . write ( old_mbi . BaseAddress , old_mbi . content )
new_mbi . content = old_mbi . content
# On error , skip this region or raise an exception .
except Exception :
if not bSkipOnError :
raise
msg = "Error restoring region at address %s: %s"
msg = msg % ( HexDump ( old_mbi . BaseAddress , self . get_bits ( ) ) , traceback . format_exc ( ) )
warnings . warn ( msg , RuntimeWarning )
|
def isense_parse ( self , filepath , modulename ) :
"""Parses the specified file from either memory , cached disk or full disk
depending on whether the fetch is via SSH or not and how long it has been
since we last checked the modification time of the file ."""
|
# We only want to check whether the file has been modified for reload
from datetime import datetime
if modulename not in self . _last_isense_check :
self . _last_isense_check [ modulename ] = datetime . utcnow ( )
self . parse ( filepath , True )
else :
elapsed = ( datetime . utcnow ( ) - self . _last_isense_check [ modulename ] ) . seconds
if elapsed > 60 :
self . parse ( filepath , True )
self . _last_isense_check [ modulename ] = datetime . utcnow ( )
|
def max_width ( self ) :
"""Get maximum width of progress bar
: rtype : int
: returns : Maximum column width of progress bar"""
|
value , unit = float ( self . _width_str [ : - 1 ] ) , self . _width_str [ - 1 ]
ensure ( unit in [ "c" , "%" ] , ValueError , "Width unit must be either 'c' or '%'" )
if unit == "c" :
ensure ( value <= self . columns , ValueError , "Terminal only has {} columns, cannot draw " "bar of size {}." . format ( self . columns , value ) )
retval = value
else : # unit = = " % "
ensure ( 0 < value <= 100 , ValueError , "value=={} does not satisfy 0 < value <= 100" . format ( value ) )
dec = value / 100
retval = dec * self . columns
return floor ( retval )
|
def get_update_sql ( self , rows ) :
"""Returns SQL UPDATE for rows ` ` rows ` `
. . code - block : : sql
UPDATE table _ name
SET
field1 = new _ values . field1
field2 = new _ values . field2
FROM (
VALUES
(1 , ' value1 ' , ' value2 ' ) ,
(2 , ' value1 ' , ' value2 ' )
) AS new _ values ( id , field1 , field2)
WHERE table _ name . id = new _ values . id ;"""
|
field_names = self . get_field_names ( )
pk = field_names [ 0 ]
update_field_names = field_names [ 1 : ]
num_columns = len ( rows [ 0 ] )
if num_columns < 2 :
raise Exception ( 'At least 2 fields must be passed to get_update_sql' )
all_null_indices = [ all ( row [ index ] is None for row in rows ) for index in range ( 1 , num_columns ) ]
field_names_sql = '({0})' . format ( ', ' . join ( field_names ) )
row_values = [ ]
sql_args = [ ]
for row in rows :
placeholders = [ ]
for value in row :
sql_args . append ( value )
placeholders . append ( '%s' )
row_values . append ( '({0})' . format ( ', ' . join ( placeholders ) ) )
row_values_sql = ', ' . join ( row_values )
# build field list for SET portion
set_field_list = [ '{0} = NULL' . format ( field_name ) if all_null_indices [ idx ] else '{0} = new_values.{0}' . format ( field_name ) for idx , field_name in enumerate ( update_field_names ) ]
set_field_list_sql = ', ' . join ( set_field_list )
self . sql = 'UPDATE {0} SET {1} FROM (VALUES {2}) AS new_values {3} WHERE {0}.{4} = new_values.{4}' . format ( self . tables [ 0 ] . get_identifier ( ) , set_field_list_sql , row_values_sql , field_names_sql , pk )
return self . sql , sql_args
|
def query ( self , tableClass , comparison = None , limit = None , offset = None , sort = None ) :
"""Return a generator of instances of C { tableClass } ,
or tuples of instances if C { tableClass } is a
tuple of classes .
Examples : :
fastCars = s . query ( Vehicle ,
axiom . attributes . AND (
Vehicle . wheels = = 4,
Vehicle . maxKPH > 200 ) ,
limit = 100,
sort = Vehicle . maxKPH . descending )
quotesByClient = s . query ( ( Client , Quote ) ,
axiom . attributes . AND (
Client . active = = True ,
Quote . client = = Client . storeID ,
Quote . created > = someDate ) ,
limit = 10,
sort = ( Client . name . ascending ,
Quote . created . descending ) )
@ param tableClass : a subclass of Item to look for instances of ,
or a tuple of subclasses .
@ param comparison : a provider of L { IComparison } , or None , to match
all items available in the store . If tableClass is a tuple , then
the comparison must refer to all Item subclasses in that tuple ,
and specify the relationships between them .
@ param limit : an int to limit the total length of the results , or None
for all available results .
@ param offset : an int to specify a starting point within the available
results , or None to start at 0.
@ param sort : an L { ISort } , something that comes from an SQLAttribute ' s
' ascending ' or ' descending ' attribute .
@ return : an L { ItemQuery } object , which is an iterable of Items or
tuples of Items , according to tableClass ."""
|
if isinstance ( tableClass , tuple ) :
queryClass = MultipleItemQuery
else :
queryClass = ItemQuery
return queryClass ( self , tableClass , comparison , limit , offset , sort )
|
def enr_at_fpr ( fg_vals , bg_vals , fpr = 0.01 ) :
"""Computes the enrichment at a specific FPR ( default 1 % ) .
Parameters
fg _ vals : array _ like
The list of values for the positive set .
bg _ vals : array _ like
The list of values for the negative set .
fpr : float , optional
The FPR ( between 0.0 and 1.0 ) .
Returns
enrichment : float
The enrichment at the specified FPR ."""
|
pos = np . array ( fg_vals )
neg = np . array ( bg_vals )
s = scoreatpercentile ( neg , 100 - fpr * 100 )
neg_matches = float ( len ( neg [ neg >= s ] ) )
if neg_matches == 0 :
return float ( "inf" )
return len ( pos [ pos >= s ] ) / neg_matches * len ( neg ) / float ( len ( pos ) )
|
def check_measurements_against_sensitivities ( self , magnitude , phase = 0 , return_plot = False ) :
"""Check for all configurations if the sensitivities add up to a given
homogeneous model
Parameters
magnitude : float
magnitude used for the homogeneous model
phase : float , optional , default = 0
phase value used for the homogeneous model
return _ plot : bool , optional , default = False
create a plot analyzing the differences
Returns
results : Nx6 numpy . ndarray
Results of the analysis .
* magnitude measurement [ Ohm ]
* sum of sensitivities [ Volt ]
* relative deviation of sensitivity - sum from measurement [ in
percent ]
fig : matplotlib . figure , optional
figure object . Only returned of return _ plot = True
axes : list
list of axes corresponding to the figure
Examples
> > > # ! / usr / bin / python
import crtomo . tdManager as CRtdMan
tdm = CRtdMan . tdMan (
elem _ file = ' grid / elem . dat ' ,
elec _ file = ' grid / elec . dat ' ,
config _ file = ' config / config . dat ' ,
results , fig , axes = tdm . check _ measurements _ against _ sensitivities (
magnitude = 100,
phase = - 10,
return _ plot = True
fig . savefig ( ' sensitivity _ comparison . png ' , dpi = 300)"""
|
# generate a temporary tdMan instance
tdm = tdMan ( grid = self . grid , configs = self . configs , )
tdm . add_homogeneous_model ( magnitude , phase )
measurements = tdm . measurements ( )
Z = measurements [ : , 0 ] * np . exp ( 1j * measurements [ : , 1 ] / 1000 )
results = [ ]
for nr in range ( 0 , tdm . configs . nr_of_configs ) :
sensitivities = tdm . get_sensitivity ( nr )
sens_re = sensitivities [ 0 ] [ 0 ]
sens_im = sensitivities [ 0 ] [ 1 ]
sens_mag = 1.0 / measurements [ nr , 0 ] * ( np . real ( Z [ nr ] ) * sens_re + np . imag ( Z [ nr ] ) * sens_im )
V_mag_from_sens = sens_mag . sum ( ) / magnitude
if phase != 0 :
outer = 1 / ( 1 + ( np . imag ( Z [ nr ] ) / np . real ( Z [ nr ] ) ) ** 2 )
inner1 = - sens_re / np . real ( Z [ nr ] ) ** 2 * np . imag ( Z [ nr ] )
inner2 = sens_im * np . real ( Z [ nr ] )
sens_pha = outer * ( inner1 + inner2 )
V_pha_from_sens = sens_pha . sum ( ) / phase
else :
V_pha_from_sens = None
print ( 'WARNING: We still do not know where the minus sign comes ' + 'from!' )
V_mag_from_sens *= - 1
results . append ( ( measurements [ nr ] [ 0 ] , V_mag_from_sens , ( measurements [ nr ] [ 0 ] - V_mag_from_sens ) / measurements [ nr ] [ 0 ] * 100 , measurements [ nr ] [ 1 ] , V_pha_from_sens , ( measurements [ nr ] [ 1 ] - V_mag_from_sens ) / measurements [ nr ] [ 1 ] * 100 , ) )
results = np . array ( results )
if return_plot :
nr_x = 2
if phase == 0 :
nr_x = 1
fig , axes = plt . subplots ( 1 , nr_x , figsize = ( 15 / 2.54 , 7 / 2.54 ) )
fig . suptitle ( 'Comparison sum of sensitivities to measurements' )
# plot phase first
if phase != 0 :
ax = axes [ 1 ]
ax . plot ( results [ : , 5 ] , '.' )
ax . set_xlabel ( 'configuration number' )
ax . set_ylabel ( r'$\frac{V_i^{\mathrm{pha}} - ' + r' \sum s_{ij}^{\mathrm{pha}} \cdot ' + r'\phi_0}{V_i}~[\%]$' )
# set ax for magnitude plot
ax = axes [ 0 ]
else :
ax = axes
ax . plot ( results [ : , 2 ] , '.' )
ax . set_xlabel ( 'configuration number' )
# ax . set _ ylabel ( ' deviation from magnitude measurement [ \ % ] ' )
ax . set_ylabel ( r'$\frac{V_i^{\mathrm{mag}} - ' + r'\sum s_{ij}^{\mathrm{mag}} \cdot ' + r'\sigma_0}{V_i}~[\%]$' )
fig . tight_layout ( )
return results , fig , axes
else :
return results
|
def ensure_dir ( directory : str ) -> None :
"""Create a directory if it doesn ' t exist ."""
|
if not os . path . isdir ( directory ) :
LOG . debug ( f"Directory {directory} does not exist, creating it." )
os . makedirs ( directory )
|
def _recv_cf ( self , data ) :
"""Process a received ' Consecutive Frame ' frame"""
|
if self . rx_state != ISOTP_WAIT_DATA :
return 0
self . rx_timer . cancel ( )
# CFs are never longer than the FF
if len ( data ) > self . rx_ll_dl :
return 1
# CFs have usually the LL _ DL length
if len ( data ) < self . rx_ll_dl : # this is only allowed for the last CF
if self . rx_len - self . rx_idx > self . rx_ll_dl :
warning ( "Received a CF with insuffifient length" )
return 1
if six . indexbytes ( data , 0 ) & 0x0f != self . rx_sn : # Wrong sequence number
warning ( "RX state was reset because wrong sequence number was " "received" )
self . rx_state = ISOTP_IDLE
return 1
self . rx_sn = ( self . rx_sn + 1 ) % 16
self . rx_buf += data [ 1 : ]
self . rx_idx = len ( self . rx_buf )
if self . rx_idx >= self . rx_len : # we are done
self . rx_buf = self . rx_buf [ 0 : self . rx_len ]
self . rx_state = ISOTP_IDLE
self . rx_queue . put ( self . rx_buf )
for cb in self . rx_callbacks :
cb ( self . rx_buf )
self . call_release ( )
self . rx_buf = None
return 0
# perform blocksize handling , if enabled
if self . rxfc_bs != 0 :
self . rx_bs += 1
# check if we reached the end of the block
if self . rx_bs >= self . rxfc_bs and not self . listen_mode : # send our FC frame
load = self . ea_hdr
load += struct . pack ( "BBB" , N_PCI_FC , self . rxfc_bs , self . rxfc_stmin )
self . can_send ( load )
# wait for another CF
self . rx_timer . set_timeout ( self . cf_timeout , self . _rx_timer_handler )
return 0
|
def get_blocked ( self ) :
"""Return a UserList of Redditors with whom the user has blocked ."""
|
url = self . reddit_session . config [ 'blocked' ]
return self . reddit_session . request_json ( url )
|
def _to_rest_hide ( model , props ) :
"""Purge fields not allowed during a REST serialization
This is done on fields with ` to _ rest = False ` ."""
|
hide = model . get_fields_by_prop ( 'to_rest' , False )
for field in hide :
try :
del props [ field ]
except KeyError :
continue
|
def entropy_bits_nrange ( minimum : Union [ int , float ] , maximum : Union [ int , float ] ) -> float :
"""Calculate the number of entropy bits in a range of numbers ."""
|
# Shannon :
# d = fabs ( maximum - minimum )
# ent = - ( 1 / d ) * log ( 1 / d , 2 ) * d
# Aprox form : log10 ( digits ) * log2(10)
if not isinstance ( minimum , ( int , float ) ) :
raise TypeError ( 'minimum can only be int or float' )
if not isinstance ( maximum , ( int , float ) ) :
raise TypeError ( 'maximum can only be int or float' )
if minimum < 0 :
raise ValueError ( 'minimum should be greater than 0' )
if maximum < 0 :
raise ValueError ( 'maximum should be greater than 0' )
dif = fabs ( maximum - minimum )
if dif == 0 :
return 0.0
ent = log10 ( dif ) * 3.321928
return ent
|
def iMath ( image , operation , * args ) :
"""Perform various ( often mathematical ) operations on the input image / s .
Additional parameters should be specific for each operation .
See the the full iMath in ANTs , on which this function is based .
ANTsR function : ` iMath `
Arguments
image : ANTsImage
input object , usually antsImage
operation
a string e . g . " GetLargestComponent " . . . the special case of " GetOperations "
or " GetOperationsFull " will return a list of operations and brief
description . Some operations may not be valid ( WIP ) , but most are .
* args : non - keyword arguments
additional parameters specific to the operation
Example
> > > import ants
> > > img = ants . image _ read ( ants . get _ ants _ data ( ' r16 ' ) )
> > > img2 = ants . iMath ( img , ' Canny ' , 1 , 5 , 12)"""
|
if operation not in _iMathOps :
raise ValueError ( 'Operation not recognized' )
imagedim = image . dimension
outimage = image . clone ( )
args = [ imagedim , outimage , operation , image ] + [ a for a in args ]
processed_args = _int_antsProcessArguments ( args )
libfn = utils . get_lib_fn ( 'iMath' )
libfn ( processed_args )
return outimage
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.