signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def to_dataframe ( self ) :
"""Convert the confusion matrix to a dataframe .
Returns :
A DataFrame with " target " , " predicted " , " count " columns ."""
|
data = [ ]
for target_index , target_row in enumerate ( self . _cm ) :
for predicted_index , count in enumerate ( target_row ) :
data . append ( ( self . _labels [ target_index ] , self . _labels [ predicted_index ] , count ) )
return pd . DataFrame ( data , columns = [ 'target' , 'predicted' , 'count' ] )
|
def result ( self , wait = 0 ) :
"""return the full list of results from the chain when it finishes . blocks until timeout .
: param int wait : how many milliseconds to wait for a result
: return : an unsorted list of results"""
|
if self . started :
return result_group ( self . group , wait = wait , count = self . length ( ) , cached = self . cached )
|
async def execute ( self , query , * , dc = None , near = None , limit = None , consistency = None ) :
"""Executes a prepared query
Parameters :
query ( ObjectID ) : Query ID
dc ( str ) : Specify datacenter that will be used .
Defaults to the agent ' s local datacenter .
near ( str ) : Sort the resulting list in ascending order based on
the estimated round trip time from that node
limit ( int ) : Limit the list ' s size to the given number of nodes
consistency ( Consistency ) : Force consistency
Returns :
Object :
Raises :
NotFound : the query does not exist
Returns a body like this : :
" Service " : " redis " ,
" Nodes " : [
" Node " : {
" Node " : " foobar " ,
" Address " : " 10.1.10.12 " ,
" TaggedAddresses " : {
" lan " : " 10.1.10.12 " ,
" wan " : " 10.1.10.12"
" Service " : {
" ID " : " redis " ,
" Service " : " redis " ,
" Tags " : None ,
" Port " : 8000
" Checks " : [
" Node " : " foobar " ,
" CheckID " : " service : redis " ,
" Name " : " Service ' redis ' check " ,
" Status " : " passing " ,
" Notes " : " " ,
" Output " : " " ,
" ServiceID " : " redis " ,
" ServiceName " : " redis "
" Node " : " foobar " ,
" CheckID " : " serfHealth " ,
" Name " : " Serf Health Status " ,
" Status " : " passing " ,
" Notes " : " " ,
" Output " : " " ,
" ServiceID " : " " ,
" ServiceName " : " "
" DNS " : {
" TTL " : timedelta ( seconds = 10)
" Datacenter " : " dc3 " ,
" Failovers " : 2
The * * Nodes * * section contains the list of healthy nodes providing
the given service , as specified by the constraints of the prepared
query .
* * Service * * has the service name that the query was selecting . This is
useful for context in case an empty list of nodes is returned .
* * DNS * * has information used when serving the results over DNS . This
is just a copy of the structure given when the prepared query was
created .
* * Datacenter * * has the datacenter that ultimately provided the list of
nodes and * * Failovers * * has the number of remote datacenters that were
queried while executing the query . This provides some insight into
where the data came from . This will be zero during non - failover
operations where there were healthy nodes found in the local
datacenter ."""
|
query_id = extract_attr ( query , keys = [ "ID" ] )
response = await self . _api . get ( "/v1/query/%s/execute" % query_id , params = { "dc" : dc , "near" : near , "limit" : limit } , consistency = consistency )
return response . body
|
def _ctype_key_value ( keys , vals ) :
"""Returns ctype arrays for the key - value args , and the whether string keys are used .
For internal use only ."""
|
if isinstance ( keys , ( tuple , list ) ) :
assert ( len ( keys ) == len ( vals ) )
c_keys = [ ]
c_vals = [ ]
use_str_keys = None
for key , val in zip ( keys , vals ) :
c_key_i , c_val_i , str_keys_i = _ctype_key_value ( key , val )
c_keys += c_key_i
c_vals += c_val_i
use_str_keys = str_keys_i if use_str_keys is None else use_str_keys
assert ( use_str_keys == str_keys_i ) , "inconsistent types of keys detected."
c_keys_arr = c_array ( ctypes . c_char_p , c_keys ) if use_str_keys else c_array ( ctypes . c_int , c_keys )
c_vals_arr = c_array ( ctypes . c_void_p , c_vals )
return ( c_keys_arr , c_vals_arr , use_str_keys )
assert ( isinstance ( keys , ( int , ) + string_types ) ) , "unexpected type for keys: " + str ( type ( keys ) )
use_str_keys = isinstance ( keys , string_types )
if isinstance ( vals , NDArray ) :
c_keys = c_str_array ( [ keys ] ) if use_str_keys else c_array_buf ( ctypes . c_int , array ( 'i' , [ keys ] ) )
return ( c_keys , c_handle_array ( [ vals ] ) , use_str_keys )
else :
for value in vals :
assert ( isinstance ( value , NDArray ) )
c_keys = c_str_array ( [ keys ] * len ( vals ) ) if use_str_keys else c_array_buf ( ctypes . c_int , array ( 'i' , [ keys ] * len ( vals ) ) )
return ( c_keys , c_handle_array ( vals ) , use_str_keys )
|
def geom2shp ( geom , out_fn , fields = False ) :
"""Write out a new shapefile for input geometry"""
|
from pygeotools . lib import timelib
driverName = "ESRI Shapefile"
drv = ogr . GetDriverByName ( driverName )
if os . path . exists ( out_fn ) :
drv . DeleteDataSource ( out_fn )
out_ds = drv . CreateDataSource ( out_fn )
out_lyrname = os . path . splitext ( os . path . split ( out_fn ) [ 1 ] ) [ 0 ]
geom_srs = geom . GetSpatialReference ( )
geom_type = geom . GetGeometryType ( )
out_lyr = out_ds . CreateLayer ( out_lyrname , geom_srs , geom_type )
if fields :
field_defn = ogr . FieldDefn ( "name" , ogr . OFTString )
field_defn . SetWidth ( 128 )
out_lyr . CreateField ( field_defn )
field_defn = ogr . FieldDefn ( "path" , ogr . OFTString )
field_defn . SetWidth ( 254 )
out_lyr . CreateField ( field_defn )
# field _ defn = ogr . FieldDefn ( " date " , ogr . OFTString )
# This allows sorting by date
field_defn = ogr . FieldDefn ( "date" , ogr . OFTInteger )
field_defn . SetWidth ( 32 )
out_lyr . CreateField ( field_defn )
field_defn = ogr . FieldDefn ( "decyear" , ogr . OFTReal )
field_defn . SetPrecision ( 8 )
field_defn . SetWidth ( 64 )
out_lyr . CreateField ( field_defn )
out_feat = ogr . Feature ( out_lyr . GetLayerDefn ( ) )
out_feat . SetGeometry ( geom )
if fields : # Hack to force output extesion to tif , since out _ fn is shp
out_path = os . path . splitext ( out_fn ) [ 0 ] + '.tif'
out_feat . SetField ( "name" , os . path . split ( out_path ) [ - 1 ] )
out_feat . SetField ( "path" , out_path )
# Try to extract a date from input raster fn
out_feat_date = timelib . fn_getdatetime ( out_fn )
if out_feat_date is not None :
datestamp = int ( out_feat_date . strftime ( '%Y%m%d' ) )
# out _ feat _ date = int ( out _ feat _ date . strftime ( ' % Y % m % d % H % M ' ) )
out_feat . SetField ( "date" , datestamp )
decyear = timelib . dt2decyear ( out_feat_date )
out_feat . SetField ( "decyear" , decyear )
out_lyr . CreateFeature ( out_feat )
out_ds = None
|
def provision ( self , instance_id : str , service_details : ProvisionDetails , async_allowed : bool ) -> ProvisionedServiceSpec :
"""Provision the new instance
see openbrokerapi documentation
Returns :
ProvisionedServiceSpec"""
|
if service_details . plan_id == self . _backend . config . UUID_PLANS_EXISTING_CLUSTER : # Provision the instance on an Existing Atlas Cluster
# Find or create the instance
instance = self . _backend . find ( instance_id )
# Create the instance if needed
return self . _backend . create ( instance , service_details . parameters , existing = True )
# Plan not supported
raise ErrPlanUnsupported ( service_details . plan_id )
|
def to_iso639_1 ( key ) :
"""Find ISO 639-1 code for language specified by key .
> > > to _ iso639_1 ( " swe " )
u ' sv '
> > > to _ iso639_1 ( " English " )
u ' en '"""
|
item = find ( whatever = key )
if not item :
raise NonExistentLanguageError ( 'Language does not exist.' )
return item [ u'iso639_1' ]
|
def dialog_open ( self , * , dialog : dict , trigger_id : str , ** kwargs ) -> SlackResponse :
"""Open a dialog with a user .
Args :
dialog ( dict ) : A dictionary of dialog arguments .
" callback _ id " : " 46eh782b0 " ,
" title " : " Request something " ,
" submit _ label " : " Request " ,
" state " : " Max " ,
" elements " : [
" type " : " text " ,
" label " : " Origin " ,
" name " : " loc _ origin "
" type " : " text " ,
" label " : " Destination " ,
" name " : " loc _ destination "
trigger _ id ( str ) : The trigger id of a recent message interaction .
e . g . ' 12345.98765 . abcd2358fdea '"""
|
kwargs . update ( { "dialog" : dialog , "trigger_id" : trigger_id } )
return self . api_call ( "dialog.open" , json = kwargs )
|
def _decode_response ( response ) :
"""Strip off Gerrit ' s magic prefix and decode a response .
: returns :
Decoded JSON content as a dict , or raw text if content could not be
decoded as JSON .
: raises :
requests . HTTPError if the response contains an HTTP error status code ."""
|
content_type = response . headers . get ( 'content-type' , '' )
logger . debug ( "status[%s] content_type[%s] encoding[%s]" % ( response . status_code , content_type , response . encoding ) )
response . raise_for_status ( )
content = response . content . strip ( )
if response . encoding :
content = content . decode ( response . encoding )
if not content :
logger . debug ( "no content in response" )
return content
if content_type . split ( ';' ) [ 0 ] != 'application/json' :
return content
if content . startswith ( GERRIT_MAGIC_JSON_PREFIX ) :
content = content [ len ( GERRIT_MAGIC_JSON_PREFIX ) : ]
try :
return json . loads ( content )
except ValueError :
logger . error ( 'Invalid json content: %s' , content )
raise
|
def p_genvar ( self , p ) :
'genvar : ID'
|
p [ 0 ] = Genvar ( name = p [ 1 ] , width = Width ( msb = IntConst ( '31' , lineno = p . lineno ( 1 ) ) , lsb = IntConst ( '0' , lineno = p . lineno ( 1 ) ) , lineno = p . lineno ( 1 ) ) , lineno = p . lineno ( 1 ) )
p . set_lineno ( 0 , p . lineno ( 1 ) )
|
def process ( self , input_data , topic = None ) :
"""Invokes each handler in sequence .
Publishes final output data .
Params :
input _ data : message received by stream
topic : name of plugin or stream message received from ,
if applicable"""
|
for handler in self . handlers :
output = handler . handle ( input_data )
input_data = output
self . publish ( input_data )
|
def setup ( self , redis_conn = None , host = 'localhost' , port = 6379 ) :
'''Set up the redis connection'''
|
if redis_conn is None :
if host is not None and port is not None :
self . redis_conn = redis . Redis ( host = host , port = port )
else :
raise Exception ( "Please specify some form of connection " "to Redis" )
else :
self . redis_conn = redis_conn
self . redis_conn . info ( )
|
def bump ( self , level = 'patch' , label = None ) :
"""Bump version following semantic versioning rules ."""
|
bump = self . _bump_pre if level == 'pre' else self . _bump
bump ( level , label )
|
def K2findCampaigns_byname_main ( args = None ) :
"""Exposes K2findCampaigns to the command line ."""
|
parser = argparse . ArgumentParser ( description = "Check if a target is " "(or was) observable by any past or future " "observing campaign of NASA's K2 mission." )
parser . add_argument ( 'name' , nargs = 1 , type = str , help = "Name of the object. This will be passed on " "to the CDS name resolver " "to retrieve coordinate information." )
parser . add_argument ( '-p' , '--plot' , action = 'store_true' , help = "Produce a plot showing the target position " "with respect to all K2 campaigns." )
args = parser . parse_args ( args )
targetname = args . name [ 0 ]
try :
campaigns , ra , dec = findCampaignsByName ( targetname )
except ValueError :
print ( "Error: could not retrieve coordinates for {0}." . format ( targetname ) )
print ( "The target may be unknown or there may be a problem " "connecting to the coordinate server." )
sys . exit ( 1 )
# Print the result
if len ( campaigns ) :
print ( Highlight . GREEN + "Success! {0} is on silicon " . format ( targetname ) + "during K2 campaigns {0}." . format ( campaigns ) + Highlight . END )
else :
print ( Highlight . RED + "Sorry, {} is not on silicon " "during any K2 campaign." . format ( targetname ) + Highlight . END )
# Print the pixel positions
for c in campaigns :
printChannelColRow ( c , ra , dec )
# Make a context plot if the user requested so
if args . plot :
save_context_plots ( ra , dec , targetname = targetname )
|
def _verify_include_files_used ( self , file_uses , included_files ) :
"""Find all # include files that are unnecessary ."""
|
for include_file , use in file_uses . items ( ) :
if not use & USES_DECLARATION :
node , module = included_files [ include_file ]
if module . ast_list is not None :
msg = "'{}' does not need to be #included" . format ( node . filename )
if use & USES_REFERENCE :
msg += '; use a forward declaration instead'
self . _add_warning ( msg , node )
|
def set_state ( self , light_id , ** kwargs ) :
'''Sets state on the light , can be used like this :
. . code - block : : python
set _ state ( 1 , xy = [ 1,2 ] )'''
|
light = self . get_light ( light_id )
url = '/api/%s/lights/%s/state' % ( self . username , light . light_id )
response = self . make_request ( 'PUT' , url , kwargs )
setting_count = len ( kwargs . items ( ) )
success_count = 0
for data in response :
if 'success' in data :
success_count += 1
if success_count == setting_count :
return True
else :
return False
|
def _init_metadata ( self ) :
"""stub"""
|
self . _published_metadata = { 'element_id' : Id ( self . my_osid_object_form . _authority , self . my_osid_object_form . _namespace , 'published' ) , 'element_label' : 'Published' , 'instructions' : 'flags if item is published or not' , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_published_values' : [ False ] , 'syntax' : 'BOOLEAN' , }
|
def CreateSubsetFile ( in_drainage_line , river_id , out_riv_bas_id_file , file_geodatabase = None ) :
"""Creates River Basin ID subset input CSV file for RAPID
based on the Drainage Line shapefile with river ID and
next downstream ID fields
Parameters
in _ drainage _ line : str
Path to the stream network ( i . e . Drainage Line ) shapefile .
river _ id : str
The name of the field with the river ID
( Ex . ' HydroID ' , ' COMID ' , or ' LINKNO ' ) .
out _ riv _ bas _ id _ file : str
The path to the output river basin ID subset file .
file _ geodatabase : str , optional
Path to the file geodatabase . If you use this option ,
in _ drainage _ line is the name of the stream network feature class
( WARNING : Not always stable with GDAL ) .
Example : :
from RAPIDpy . gis . network import CreateSubsetFile
CreateSubsetFile (
in _ drainage _ line = ' / path / to / drainageline . shp ' ,
river _ id = ' LINKNO ' ,
out _ riv _ bas _ id _ file = ' / path / to / riv _ bas _ id . csv ' )"""
|
ogr_drainage_line_shapefile_lyr , ogr_drainage_line_shapefile = open_shapefile ( in_drainage_line , file_geodatabase )
ogr_drainage_line_definition = ogr_drainage_line_shapefile_lyr . GetLayerDefn ( )
orig_field_names = [ ]
for idx in xrange ( ogr_drainage_line_definition . GetFieldCount ( ) ) :
orig_field_names . append ( ogr_drainage_line_definition . GetFieldDefn ( idx ) . GetName ( ) )
upper_field_names = [ field . upper ( ) for field in orig_field_names ]
sort_field = None
# Sort by HYDROSEQ order if the option exists
if 'HYDROSEQ' in upper_field_names : # with this method , smaller is downstream
sort_field = orig_field_names [ upper_field_names . index ( 'HYDROSEQ' ) ]
log ( "Sorting by {0}" . format ( sort_field ) )
hydroseq_list = [ ]
hydroid_list = [ ]
# The script line below makes sure that rows in the subset file are
# arranged in descending order of NextDownID of stream segements
for drainage_line_feature in ogr_drainage_line_shapefile_lyr :
hydroid_list . append ( drainage_line_feature . GetField ( river_id ) )
if sort_field :
hydroseq_list . append ( drainage_line_feature . GetField ( sort_field ) )
del ogr_drainage_line_shapefile
hydroid_list = np . array ( hydroid_list , dtype = np . int32 )
if hydroseq_list :
hydroseq_list = np . array ( hydroseq_list , dtype = np . int32 )
sort_order = hydroseq_list . argsort ( ) [ : : - 1 ]
hydroid_list = hydroid_list [ sort_order ]
else :
hydroid_list = np . sort ( hydroid_list )
with open_csv ( out_riv_bas_id_file , 'w' ) as csvfile :
connectwriter = csv_writer ( csvfile )
for hydroid in hydroid_list :
connectwriter . writerow ( [ hydroid ] )
|
def print_subtree ( self , fobj = sys . stdout , level = 0 ) :
"""Print this group node and the subtree rooted at it"""
|
fobj . write ( "{}{!r}\n" . format ( " " * ( level * 2 ) , self ) )
for child in self . get_children ( ) :
child . print_subtree ( fobj , level + 1 )
|
def get_segment_length ( linestring : LineString , p : Point , q : Optional [ Point ] = None ) -> float :
"""Given a Shapely linestring and two Shapely points ,
project the points onto the linestring , and return the distance
along the linestring between the two points .
If ` ` q is None ` ` , then return the distance from the start of the
linestring to the projection of ` ` p ` ` .
The distance is measured in the native coordinates of the linestring ."""
|
# Get projected distances
d_p = linestring . project ( p )
if q is not None :
d_q = linestring . project ( q )
d = abs ( d_p - d_q )
else :
d = d_p
return d
|
def _fake_closeenumeration ( self , namespace , ** params ) :
"""Implements WBEM server responder for
: meth : ` ~ pywbem . WBEMConnection . CloseEnumeration `
with data from the instance repository .
If the EnumerationContext is valid it removes it from the
context repository . Otherwise it returns an exception ."""
|
self . _validate_namespace ( namespace )
context_id = params [ 'EnumerationContext' ]
try :
context_data = self . enumeration_contexts [ context_id ]
except KeyError :
raise CIMError ( CIM_ERR_INVALID_ENUMERATION_CONTEXT , _format ( "EnumerationContext {0!A} not found in mock server " "enumeration contexts." , context_id ) )
# This is probably relatively useless because pywbem handles
# namespace internally but it could catch an error if user plays
# with the context .
if context_data [ 'namespace' ] != namespace :
raise CIMError ( CIM_ERR_INVALID_NAMESPACE , _format ( "Invalid namespace {0!A} for CloseEnumeration {1!A}" , namespace , context_id ) )
del self . enumeration_contexts [ context_id ]
|
def copy_from_scratch ( file_mapping , dry_run = True ) :
"""Copy output files from scratch area"""
|
for key , value in file_mapping . items ( ) :
if dry_run :
print ( "copy %s %s" % ( value , key ) )
else :
try :
outdir = os . path . dirname ( key )
os . makedirs ( outdir )
except OSError :
pass
print ( "copy %s %s" % ( value , key ) )
copyfile ( value , key )
return file_mapping
|
def set_input_focus ( self , focus , revert_to , time , onerror = None ) :
"""Set input focus to focus , which should be a window ,
X . PointerRoot or X . NONE . revert _ to specifies where the focus
reverts to if the focused window becomes not visible , and should
be X . RevertToParent , RevertToPointerRoot , or RevertToNone . See
XSetInputFocus ( 3X11 ) for details .
There is also a Window . set _ input _ focus ( ) ."""
|
request . SetInputFocus ( display = self . display , onerror = onerror , revert_to = revert_to , focus = focus , time = time )
|
def getApplicationsErrorNameFromEnum ( self , error ) :
"""Returns a string for an applications error"""
|
fn = self . function_table . getApplicationsErrorNameFromEnum
result = fn ( error )
return result
|
def read_pod_security_policy ( self , name , ** kwargs ) : # noqa : E501
"""read _ pod _ security _ policy # noqa : E501
read the specified PodSecurityPolicy # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . read _ pod _ security _ policy ( name , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the PodSecurityPolicy ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param bool exact : Should the export be exact . Exact export maintains cluster - specific fields like ' Namespace ' .
: param bool export : Should this value be exported . Export strips fields that a user can not specify .
: return : ExtensionsV1beta1PodSecurityPolicy
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . read_pod_security_policy_with_http_info ( name , ** kwargs )
# noqa : E501
else :
( data ) = self . read_pod_security_policy_with_http_info ( name , ** kwargs )
# noqa : E501
return data
|
def WriteFlowResponses ( self , responses ) :
"""Writes FlowMessages and updates corresponding requests ."""
|
status_available = set ( )
requests_updated = set ( )
task_ids_by_request = { }
for response in responses :
flow_key = ( response . client_id , response . flow_id )
if flow_key not in self . flows :
logging . error ( "Received response for unknown flow %s, %s." , response . client_id , response . flow_id )
continue
request_dict = self . flow_requests . get ( flow_key , { } )
if response . request_id not in request_dict :
logging . error ( "Received response for unknown request %s, %s, %d." , response . client_id , response . flow_id , response . request_id )
continue
response_dict = self . flow_responses . setdefault ( flow_key , { } )
clone = response . Copy ( )
clone . timestamp = rdfvalue . RDFDatetime . Now ( )
response_dict . setdefault ( response . request_id , { } ) [ response . response_id ] = clone
if isinstance ( response , rdf_flow_objects . FlowStatus ) :
status_available . add ( response )
request_key = ( response . client_id , response . flow_id , response . request_id )
requests_updated . add ( request_key )
try :
task_ids_by_request [ request_key ] = response . task_id
except AttributeError :
pass
# Every time we get a status we store how many responses are expected .
for status in status_available :
request_dict = self . flow_requests [ ( status . client_id , status . flow_id ) ]
request = request_dict [ status . request_id ]
request . nr_responses_expected = status . response_id
# And we check for all updated requests if we need to process them .
needs_processing = [ ]
for client_id , flow_id , request_id in requests_updated :
flow_key = ( client_id , flow_id )
request_dict = self . flow_requests [ flow_key ]
request = request_dict [ request_id ]
if request . nr_responses_expected and not request . needs_processing :
response_dict = self . flow_responses . setdefault ( flow_key , { } )
responses = response_dict . get ( request_id , { } )
if len ( responses ) == request . nr_responses_expected :
request . needs_processing = True
self . _DeleteClientActionRequest ( client_id , flow_id , request_id )
flow = self . flows [ flow_key ]
if flow . next_request_to_process == request_id :
needs_processing . append ( rdf_flows . FlowProcessingRequest ( client_id = client_id , flow_id = flow_id ) )
if needs_processing :
self . WriteFlowProcessingRequests ( needs_processing )
|
def claim_new ( self ) -> Iterable [ str ] :
"""Checks for messages in the ` ` new ` ` subdirectory , moving them to
` ` cur ` ` and returning their keys ."""
|
new_subdir = self . _paths [ 'new' ]
cur_subdir = self . _paths [ 'cur' ]
for name in os . listdir ( new_subdir ) :
new_path = os . path . join ( new_subdir , name )
cur_path = os . path . join ( cur_subdir , name )
try :
os . rename ( new_path , cur_path )
except FileNotFoundError :
pass
else :
yield name . rsplit ( self . colon , 1 ) [ 0 ]
|
def overloaded ( func ) :
"""Introduces a new overloaded function and registers its first implementation ."""
|
fn = unwrap ( func )
ensure_function ( fn )
def dispatcher ( * args , ** kwargs ) :
resolved = None
if dispatcher . __complex_parameters :
cache_key_pos = [ ]
cache_key_kw = [ ]
for argset in ( 0 , 1 ) if kwargs else ( 0 , ) :
if argset == 0 :
arg_pairs = enumerate ( args )
complexity_mapping = dispatcher . __complex_positions
else :
arg_pairs = kwargs . items ( )
complexity_mapping = dispatcher . __complex_parameters
for id , arg in arg_pairs :
type_ = type ( arg )
element_type = None
if id in complexity_mapping :
try :
element = next ( iter ( arg ) )
except TypeError :
pass
except StopIteration :
element_type = _empty
else :
complexity = complexity_mapping [ id ]
if complexity & 8 and isinstance ( arg , tuple ) :
element_type = tuple ( type ( el ) for el in arg )
elif complexity & 4 and hasattr ( arg , 'keys' ) :
element_type = ( type ( element ) , type ( arg [ element ] ) )
else :
element_type = type ( element )
if argset == 0 :
cache_key_pos . append ( ( type_ , element_type ) )
else :
cache_key_kw . append ( ( id , type_ , element_type ) )
else :
cache_key_pos = ( type ( arg ) for arg in args )
cache_key_kw = ( ( name , type ( arg ) ) for ( name , arg ) in kwargs . items ( ) ) if kwargs else None
cache_key = ( tuple ( cache_key_pos ) , tuple ( sorted ( cache_key_kw ) ) if kwargs else None )
try :
resolved = dispatcher . __cache [ cache_key ]
except KeyError :
resolved = find ( dispatcher , args , kwargs )
if resolved :
dispatcher . __cache [ cache_key ] = resolved
if resolved :
before = dispatcher . __hooks [ 'before' ]
after = dispatcher . __hooks [ 'after' ]
if before :
before ( * args , ** kwargs )
result = resolved ( * args , ** kwargs )
if after :
after ( * args , ** kwargs )
return result
else :
return error ( dispatcher . __name__ )
dispatcher . __dict__ . update ( __functions = [ ] , __hooks = { 'before' : None , 'after' : None } , __cache = { } , __complex_positions = { } , __complex_parameters = { } , __maxlen = 0 , )
for attr in ( '__module__' , '__name__' , '__qualname__' , '__doc__' ) :
setattr ( dispatcher , attr , getattr ( fn , attr , None ) )
if is_void ( fn ) :
update_docstring ( dispatcher , fn )
return dispatcher
else :
update_docstring ( dispatcher )
return register ( dispatcher , func )
|
def getRunningBatchJobIDs ( self ) :
"""Returns map of running jobIDs and the time they have been running ."""
|
# Example lines . .
# r 5410186 benedictpaten worker 1247029663 localhost
# r 5410324 benedictpaten worker 1247030076 localhost
runningJobs = { }
issuedJobs = self . getIssuedBatchJobIDs ( )
for line in self . _runParasol ( [ 'pstat2' ] ) [ 1 ] :
if line != '' :
match = self . runningPattern . match ( line )
if match is not None :
jobID = int ( match . group ( 1 ) )
startTime = int ( match . group ( 2 ) )
if jobID in issuedJobs : # It ' s one of our jobs
runningJobs [ jobID ] = time . time ( ) - startTime
return runningJobs
|
def dictstr ( arg ) :
"""Parse a key = value string as a tuple ( key , value ) that can be provided as an argument to dict ( )"""
|
key , value = arg . split ( "=" )
if value . lower ( ) == "true" or value . lower ( ) == "false" :
value = bool ( value )
elif INT_RE . match ( value ) :
value = int ( value )
elif FLOAT_RE . match ( value ) :
value = float ( value )
return ( key , value )
|
def save ( self , eopatch , use_tmp = True ) :
"""Method which does the saving
: param eopatch : EOPatch containing the data which will be saved
: type eopatch : EOPatch
: param use _ tmp : If ` True ` data will be saved to temporary file , otherwise it will be saved to intended
( i . e . final ) location
: type use _ tmp : bool"""
|
filename = self . tmp_filename if use_tmp else self . final_filename
if self . feature_name is None :
data = eopatch [ self . feature_type ]
if self . feature_type . has_dict ( ) :
data = data . get_dict ( )
if self . feature_type is FeatureType . BBOX :
data = tuple ( data ) + ( int ( data . crs . value ) , )
else :
data = eopatch [ self . feature_type ] [ self . feature_name ]
file_dir = os . path . dirname ( filename )
os . makedirs ( file_dir , exist_ok = True )
if self . compress_level :
file_handle = gzip . GzipFile ( filename , 'w' , self . compress_level )
else :
file_handle = open ( filename , 'wb' )
with file_handle as outfile :
LOGGER . debug ( "Saving (%s, %s) to %s" , str ( self . feature_type ) , str ( self . feature_name ) , filename )
if self . file_format is FileFormat . NPY :
np . save ( outfile , data )
elif self . file_format is FileFormat . PICKLE :
pickle . dump ( data , outfile )
else :
ValueError ( 'File {} was not saved because saving in file format {} is currently not ' 'supported' . format ( filename , self . file_format ) )
|
def transform ( self , X ) :
"""Parameters
X : array - like , shape [ n x m ]
The mask in form of n x m array ."""
|
if self . mode_ == 'target' :
return np . apply_along_axis ( self . _target , 1 , np . reshape ( X , ( X . shape [ 0 ] , X . shape [ 1 ] * X . shape [ 2 ] ) ) )
if self . mode_ == 'majority' :
return np . apply_along_axis ( self . _majority , 1 , np . reshape ( X , ( X . shape [ 0 ] , X . shape [ 1 ] * X . shape [ 2 ] ) ) )
print ( 'Invalid mode! Set mode to majority or target. Returning input.' )
return X
|
def run ( self ) :
"""Performs the actual FEFF run
Returns :
( subprocess . Popen ) Used for monitoring ."""
|
with open ( self . output_file , "w" ) as f_std , open ( self . stderr_file , "w" , buffering = 1 ) as f_err : # Use line buffering for stderr
# On TSCC , need to run shell command
p = subprocess . Popen ( self . feff_cmd , stdout = f_std , stderr = f_err , shell = True )
return p
|
def key_press_event ( self , obj , event ) :
"""Listens for key press event"""
|
key = self . iren . GetKeySym ( )
log . debug ( 'Key %s pressed' % key )
if key == 'q' :
self . q_pressed = True
# Grab screenshot right before renderer closes
self . last_image = self . screenshot ( True , return_img = True )
elif key == 'b' :
self . observer = self . iren . AddObserver ( 'LeftButtonPressEvent' , self . left_button_down )
elif key == 'v' :
self . isometric_view_interactive ( )
|
def _default_node_children ( self , node , visitor , children ) :
"""Generates a key and list of children of the given : class : ` CTENode `
` node ` , intended to be used as an update to the dictionary
representation generated by the : meth : ` node _ as _ tree ` method . The key is
` ` children ` ` and the list consists of the children of the given node as
determined by the ` children ` callback .
Each child node is , in turn , visited through recursive calls to
: meth : ` node _ as _ child ` , and the ` visitor ` and ` children ` parameters are
passed along .
: param node : the : class : ` CTENode ` for which to generate the children
representation .
: param visitor : optional function responsible for generating the
dictionary representation of the node .
: param children : optional function responsible for generating a
children key and list for the node .
: return : a key and list representation of the structure of the children
of the given node ."""
|
return { self . model . _cte_node_children : [ self . node_as_tree ( child , visitor = visitor , children = children ) for child in node . children . all ( ) ] }
|
def get_cpds ( self , node = None ) :
"""Returns the cpd of the node . If node is not specified returns all the CPDs
that have been added till now to the graph
Parameter
node : any hashable python object ( optional )
The node whose CPD we want . If node not specified returns all the
CPDs added to the model .
Returns
A list of TabularCPDs .
Examples
> > > from pgmpy . models import BayesianModel
> > > from pgmpy . factors . discrete import TabularCPD
> > > student = BayesianModel ( [ ( ' diff ' , ' grade ' ) , ( ' intel ' , ' grade ' ) ] )
> > > cpd = TabularCPD ( ' grade ' , 2 , [ [ 0.1 , 0.9 , 0.2 , 0.7 ] ,
. . . [ 0.9 , 0.1 , 0.8 , 0.3 ] ] ,
. . . [ ' intel ' , ' diff ' ] , [ 2 , 2 ] )
> > > student . add _ cpds ( cpd )
> > > student . get _ cpds ( )"""
|
if node is not None :
if node not in self . nodes ( ) :
raise ValueError ( 'Node not present in the Directed Graph' )
for cpd in self . cpds :
if cpd . variable == node :
return cpd
else :
return None
else :
return self . cpds
|
def newton_refine2 ( s_vals , curve1 , curve2 ) :
"""Image for : func : ` . newton _ refine ` docstring ."""
|
if NO_IMAGES :
return
ax = curve1 . plot ( 256 )
ax . lines [ - 1 ] . zorder = 1
curve2 . plot ( 256 , ax = ax )
ax . lines [ - 1 ] . zorder = 1
points = curve1 . evaluate_multi ( np . asfortranarray ( s_vals ) )
colors = seaborn . dark_palette ( "blue" , 5 )
ax . scatter ( points [ 0 , : ] , points [ 1 , : ] , c = colors , s = 20 , alpha = 0.75 , zorder = 2 )
ax . axis ( "scaled" )
ax . set_xlim ( 0.0 , 1.0 )
ax . set_ylim ( 0.0 , 1.0 )
save_image ( ax . figure , "newton_refine2.png" )
|
def stop ( self ) :
"""Stop serving . Always call this to clean up after yourself ."""
|
self . _stopped = True
threads = [ self . _accept_thread ]
threads . extend ( self . _server_threads )
self . _listening_sock . close ( )
for sock in list ( self . _server_socks ) :
try :
sock . shutdown ( socket . SHUT_RDWR )
except socket . error :
pass
try :
sock . close ( )
except socket . error :
pass
with self . _unlock ( ) :
for thread in threads :
thread . join ( 10 )
if self . _uds_path :
try :
os . unlink ( self . _uds_path )
except OSError :
pass
|
def insert ( self , key , column_parent , column , consistency_level ) :
"""Insert a Column at the given column _ parent . column _ family and optional column _ parent . super _ column .
Parameters :
- key
- column _ parent
- column
- consistency _ level"""
|
self . _seqid += 1
d = self . _reqs [ self . _seqid ] = defer . Deferred ( )
self . send_insert ( key , column_parent , column , consistency_level )
return d
|
def list_all_files ( self ) :
"""Utility method that yields all files on the device ' s file
systems ."""
|
def list_files_recursively ( directory ) :
f_gen = itertools . chain ( directory . files , * tuple ( list_files_recursively ( d ) for d in directory . directories ) )
for f in f_gen :
yield f
return list_files_recursively ( self . filesystem )
|
def _add_input_state ( self , node , input_state ) :
"""Add the input state to all successors of the given node .
: param node : The node whose successors ' input states will be touched .
: param input _ state : The state that will be added to successors of the node .
: return : None"""
|
successors = self . _graph_visitor . successors ( node )
for succ in successors :
if succ in self . _state_map :
self . _state_map [ succ ] = self . _merge_states ( succ , * ( [ self . _state_map [ succ ] , input_state ] ) )
else :
self . _state_map [ succ ] = input_state
|
def get_online_version ( ) :
"""Download update info and parse it ."""
|
session = requests . session ( )
page = urlopen ( UPDATE_URL , session )
version , url = None , None
for line in page . text . splitlines ( ) :
if line . startswith ( VERSION_TAG ) :
version = line . split ( ':' , 1 ) [ 1 ] . strip ( )
elif line . startswith ( URL_TAG ) :
url = line . split ( ':' , 1 ) [ 1 ] . strip ( )
url = url . replace ( '${version}' , version )
return version , url
|
def gather_parsing_stats ( self ) :
"""Times parsing if - - verbose ."""
|
if self . verbose :
start_time = time . clock ( )
try :
yield
finally :
elapsed_time = time . clock ( ) - start_time
printerr ( "Time while parsing:" , elapsed_time , "seconds" )
if packrat_cache :
hits , misses = ParserElement . packrat_cache_stats
printerr ( "Packrat parsing stats:" , hits , "hits;" , misses , "misses" )
else :
yield
|
def post_run ( self , outline = False , * args , ** kwargs ) :
"""Any steps that need to be taken after running the action ."""
|
post_destroy = self . context . config . post_destroy
if not outline and post_destroy :
util . handle_hooks ( stage = "post_destroy" , hooks = post_destroy , provider = self . provider , context = self . context )
|
def generate_module_table_header ( modules ) :
"""Generate header with module table entries for builtin modules .
: param List [ ( module _ name , obj _ module , enabled _ define ) ] modules : module defs
: return : None"""
|
# Print header file for all external modules .
mod_defs = [ ]
print ( "// Automatically generated by makemoduledefs.py.\n" )
for module_name , obj_module , enabled_define in modules :
mod_def = "MODULE_DEF_{}" . format ( module_name . upper ( ) )
mod_defs . append ( mod_def )
print ( ( "#if ({enabled_define})\n" " extern const struct _mp_obj_module_t {obj_module};\n" " #define {mod_def} {{ MP_ROM_QSTR({module_name}), MP_ROM_PTR(&{obj_module}) }},\n" "#else\n" " #define {mod_def}\n" "#endif\n" ) . format ( module_name = module_name , obj_module = obj_module , enabled_define = enabled_define , mod_def = mod_def ) )
print ( "\n#define MICROPY_REGISTERED_MODULES \\" )
for mod_def in mod_defs :
print ( " {mod_def} \\" . format ( mod_def = mod_def ) )
print ( "// MICROPY_REGISTERED_MODULES" )
|
def approved_funds ( pronac , dt ) :
"""Verifica se o valor total de um projeto é um
outlier em relação
aos projetos do mesmo seguimento cultural
Dataframes : planilha _ orcamentaria"""
|
funds_df = data . approved_funds_by_projects
project = ( funds_df . loc [ funds_df [ 'PRONAC' ] == pronac ] )
project = project . to_dict ( 'records' ) [ 0 ]
info = ( data . approved_funds_agg . to_dict ( orient = "index" ) [ project [ 'idSegmento' ] ] )
mean , std = info . values ( )
outlier = gaussian_outlier . is_outlier ( project [ 'VlTotalAprovado' ] , mean , std )
maximum_expected_funds = gaussian_outlier . maximum_expected_value ( mean , std )
return { 'is_outlier' : outlier , 'total_approved_funds' : project [ 'VlTotalAprovado' ] , 'maximum_expected_funds' : maximum_expected_funds }
|
def chown ( self , uid , gid ) :
"""Change the owner ( C { uid } ) and group ( C { gid } ) of this file . As with
python ' s C { os . chown } function , you must pass both arguments , so if you
only want to change one , use L { stat } first to retrieve the current
owner and group .
@ param uid : new owner ' s uid
@ type uid : int
@ param gid : new group id
@ type gid : int"""
|
self . sftp . _log ( DEBUG , 'chown(%s, %r, %r)' % ( hexlify ( self . handle ) , uid , gid ) )
attr = SFTPAttributes ( )
attr . st_uid , attr . st_gid = uid , gid
self . sftp . _request ( CMD_FSETSTAT , self . handle , attr )
|
def start_kex ( self ) :
"""Start the GSS - API / SSPI Authenticated Diffie - Hellman Group Exchange"""
|
if self . transport . server_mode :
self . transport . _expect_packet ( MSG_KEXGSS_GROUPREQ )
return
# request a bit range : we accept ( min _ bits ) to ( max _ bits ) , but prefer
# ( preferred _ bits ) . according to the spec , we shouldn ' t pull the
# minimum up above 1024.
self . gss_host = self . transport . gss_host
m = Message ( )
m . add_byte ( c_MSG_KEXGSS_GROUPREQ )
m . add_int ( self . min_bits )
m . add_int ( self . preferred_bits )
m . add_int ( self . max_bits )
self . transport . _send_message ( m )
self . transport . _expect_packet ( MSG_KEXGSS_GROUP )
|
def get_currency_symbols ( self ) -> List [ str ] :
"""Returns the used currencies ' symbols as an array"""
|
result = [ ]
currencies = self . currencies . get_book_currencies ( )
for cur in currencies :
result . append ( cur . mnemonic )
return result
|
def Kn2Der ( nu , y , n = 0 ) :
r"""Find the derivatives of : math : ` K _ \ nu ( y ^ { 1/2 } ) ` .
Parameters
nu : float
The order of the modified Bessel function of the second kind .
y : array of float
The values to evaluate at .
n : nonnegative int , optional
The order of derivative to take ."""
|
n = int ( n )
y = scipy . asarray ( y , dtype = float )
sqrty = scipy . sqrt ( y )
if n == 0 :
K = scipy . special . kv ( nu , sqrty )
else :
K = scipy . zeros_like ( y )
x = scipy . asarray ( [ fixed_poch ( 1.5 - j , j ) * y ** ( 0.5 - j ) for j in scipy . arange ( 1.0 , n + 1.0 , dtype = float ) ] ) . T
for k in scipy . arange ( 1.0 , n + 1.0 , dtype = float ) :
K += ( scipy . special . kvp ( nu , sqrty , n = int ( k ) ) * incomplete_bell_poly ( n , int ( k ) , x ) )
return K
|
def __parseThunks ( self , thunkRVA , importSection ) :
"""Parses the thunks and returns a list"""
|
offset = to_offset ( thunkRVA , importSection )
table_offset = 0
thunks = [ ]
while True :
thunk = IMAGE_THUNK_DATA . from_buffer ( importSection . raw , offset )
offset += sizeof ( IMAGE_THUNK_DATA )
if thunk . Ordinal == 0 :
break
thunkData = ThunkData ( header = thunk , rva = table_offset + thunkRVA , ordinal = None , importByName = None )
if to_offset ( thunk . AddressOfData , importSection ) > 0 and to_offset ( thunk . AddressOfData , importSection ) < len ( self . _bytes ) :
self . __parseThunkData ( thunkData , importSection )
thunks . append ( thunkData )
table_offset += 4
return thunks
|
def dump ( self , obj , ** kwargs ) :
"""Take obj for later use : using class name to namespace definition ."""
|
self . obj = obj
return super ( JSONSchema , self ) . dump ( obj , ** kwargs )
|
def cmd_add_label ( docid , label_name , color = None ) :
"""Arguments : < document _ id > < label _ name > [ < label _ color > ]
Add a label on a document .
Color must be specified if the label doesn ' t exist yet .
Color will be ignored if the label already exists .
Color format must be given in hexadecimal format . Ex : # abcdef
Possible JSON replies :
{ " status " : " ok " , " docid " : " xxxxx " , " label " : " yyyyy " }
" status " : " error " , " exception " : " yyy " ,
" reason " : " xxxx " , " args " : " ( xxxx , ) " """
|
dsearch = get_docsearch ( )
doc = dsearch . get ( docid )
if doc is None :
raise Exception ( "Document {} not found. Cannot add label on it" . format ( docid ) )
label = None
for clabel in dsearch . label_list :
if clabel . name == label_name :
label = clabel
break
if not label and not color :
raise Exception ( "Label {} doesn't exist yet, and no color has been provided" . format ( label_name ) )
if not label :
label = Label ( label_name , color )
dsearch . create_label ( label )
dsearch . add_label ( doc , label )
verbose ( "Label {} added on document {}" . format ( label_name , docid ) )
reply ( { "docid" : docid , "label" : label_name , } )
|
def get_appliances ( self , location_id ) :
"""Get the appliances added for a specified location .
Args :
location _ id ( string ) : identifiying string of appliance
Returns :
list : dictionary objects containing appliances data"""
|
url = "https://api.neur.io/v1/appliances"
headers = self . __gen_headers ( )
headers [ "Content-Type" ] = "application/json"
params = { "locationId" : location_id , }
url = self . __append_url_params ( url , params )
r = requests . get ( url , headers = headers )
return r . json ( )
|
def get_vnetwork_vswitches_input_datacenter ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
get_vnetwork_vswitches = ET . Element ( "get_vnetwork_vswitches" )
config = get_vnetwork_vswitches
input = ET . SubElement ( get_vnetwork_vswitches , "input" )
datacenter = ET . SubElement ( input , "datacenter" )
datacenter . text = kwargs . pop ( 'datacenter' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def check_constraint ( self , pkge = None , constr = None ) :
"""Checks the constraints .
: param pkge : the package to check
: type pkge : Package
: param constr : the package constraint to check
: type constr : PackageConstraint"""
|
if not pkge is None :
return javabridge . call ( self . jobject , "checkConstraint" , "(Lweka/core/packageManagement/Package;)Z" , pkge . jobject )
if not constr is None :
return javabridge . call ( self . jobject , "checkConstraint" , "(Lweka/core/packageManagement/PackageConstraint;)Z" , pkge . jobject )
raise Exception ( "Either package or package constraing must be provided!" )
|
def _doy_to_datetimeindex ( doy , epoch_year = 2014 ) :
"""Convert a day of year scalar or array to a pd . DatetimeIndex .
Parameters
doy : numeric
Contains days of the year
Returns
pd . DatetimeIndex"""
|
doy = np . atleast_1d ( doy ) . astype ( 'float' )
epoch = pd . Timestamp ( '{}-12-31' . format ( epoch_year - 1 ) )
timestamps = [ epoch + dt . timedelta ( days = adoy ) for adoy in doy ]
return pd . DatetimeIndex ( timestamps )
|
def insert ( name , table = 'filter' , family = 'ipv4' , ** kwargs ) :
'''. . versionadded : : 2014.1.0
Insert a rule into a chain
name
A user - defined name to call this rule by in another part of a state or
formula . This should not be an actual rule .
table
The table that owns the chain that should be modified
family
Networking family , either ipv4 or ipv6
position
The numerical representation of where the rule should be inserted into
the chain . Note that ` ` - 1 ` ` is not a supported position value .
All other arguments are passed in with the same name as the long option
that would normally be used for iptables , with one exception : ` ` - - state ` ` is
specified as ` connstate ` instead of ` state ` ( not to be confused with
` ctstate ` ) .
Jump options that doesn ' t take arguments should be passed in with an empty
string .'''
|
ret = { 'name' : name , 'changes' : { } , 'result' : None , 'comment' : '' }
if 'rules' in kwargs :
ret [ 'changes' ] [ 'locale' ] = [ ]
comments = [ ]
save = False
for rule in kwargs [ 'rules' ] :
if 'rules' in rule :
del rule [ 'rules' ]
if '__agg__' in rule :
del rule [ '__agg__' ]
if 'save' in rule and rule [ 'save' ] :
save = True
if rule [ 'save' ] is not True :
save_file = rule [ 'save' ]
else :
save_file = True
rule [ 'save' ] = False
_ret = insert ( ** rule )
if 'locale' in _ret [ 'changes' ] :
ret [ 'changes' ] [ 'locale' ] . append ( _ret [ 'changes' ] [ 'locale' ] )
comments . append ( _ret [ 'comment' ] )
ret [ 'result' ] = _ret [ 'result' ]
if save :
if save_file is True :
save_file = None
__salt__ [ 'iptables.save' ] ( save_file , family = family )
if not ret [ 'changes' ] [ 'locale' ] :
del ret [ 'changes' ] [ 'locale' ]
ret [ 'comment' ] = '\n' . join ( comments )
return ret
for ignore in _STATE_INTERNAL_KEYWORDS :
if ignore in kwargs :
del kwargs [ ignore ]
kwargs [ 'name' ] = name
kwargs [ 'table' ] = table
rule = __salt__ [ 'iptables.build_rule' ] ( family = family , ** kwargs )
command = __salt__ [ 'iptables.build_rule' ] ( full = True , family = family , command = 'I' , ** kwargs )
if __salt__ [ 'iptables.check' ] ( table , kwargs [ 'chain' ] , rule , family ) is True :
ret [ 'result' ] = True
ret [ 'comment' ] = 'iptables rule for {0} already set for {1} ({2})' . format ( name , family , command . strip ( ) )
if 'save' in kwargs and kwargs [ 'save' ] :
if kwargs [ 'save' ] is not True :
filename = kwargs [ 'save' ]
else :
filename = None
saved_rules = __salt__ [ 'iptables.get_saved_rules' ] ( family = family )
_rules = __salt__ [ 'iptables.get_rules' ] ( family = family )
__rules = [ ]
for table in _rules :
for chain in _rules [ table ] :
__rules . append ( _rules [ table ] [ chain ] . get ( 'rules' ) )
__saved_rules = [ ]
for table in saved_rules :
for chain in saved_rules [ table ] :
__saved_rules . append ( saved_rules [ table ] [ chain ] . get ( 'rules' ) )
# Only save if rules in memory are different than saved rules
if __rules != __saved_rules :
out = __salt__ [ 'iptables.save' ] ( filename , family = family )
ret [ 'comment' ] += ( '\nSaved iptables rule {0} for {1}\n' '{2}\n{3}' ) . format ( name , family , command . strip ( ) , out )
return ret
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'iptables rule for {0} needs to be set for {1} ({2})' . format ( name , family , command . strip ( ) )
return ret
if not __salt__ [ 'iptables.insert' ] ( table , kwargs [ 'chain' ] , kwargs [ 'position' ] , rule , family ) :
ret [ 'changes' ] = { 'locale' : name }
ret [ 'result' ] = True
ret [ 'comment' ] = 'Set iptables rule for {0} to: {1} for {2}' . format ( name , command . strip ( ) , family )
if 'save' in kwargs :
if kwargs [ 'save' ] :
out = __salt__ [ 'iptables.save' ] ( filename = None , family = family )
ret [ 'comment' ] = ( 'Set and saved iptables rule {0} for {1}\n' '{2}\n{3}' ) . format ( name , family , command . strip ( ) , out )
return ret
else :
ret [ 'result' ] = False
ret [ 'comment' ] = ( 'Failed to set iptables rule for {0}.\n' 'Attempted rule was {1}' ) . format ( name , command . strip ( ) )
return ret
|
def bulk_update ( cls , files , api = None ) :
"""This call updates the details for multiple specified files .
Use this call to set new information for the files , thus replacing
all existing information and erasing omitted parameters . For each
of the specified files , the call sets a new name , new tags and
metadata .
: param files : List of file instances .
: param api : Api instance .
: return : List of FileBulkRecord objects ."""
|
if not files :
raise SbgError ( 'Files are required.' )
api = api or cls . _API
data = { 'items' : [ { 'id' : file_ . id , 'name' : file_ . name , 'tags' : file_ . tags , 'metadata' : file_ . metadata , } for file_ in files ] }
logger . info ( 'Updating files in bulk.' )
response = api . post ( url = cls . _URL [ 'bulk_update' ] , data = data )
return FileBulkRecord . parse_records ( response = response , api = api )
|
def data ( self , root ) :
'''Convert etree . Element into a dictionary'''
|
value = self . dict ( )
# Add attributes specific ' attributes ' key
if root . attrib :
value [ 'attributes' ] = self . dict ( )
for attr , attrval in root . attrib . items ( ) :
value [ 'attributes' ] [ unicode ( attr ) ] = self . _fromstring ( attrval )
# Add children to specific ' children ' key
children_list = self . list ( )
children = [ node for node in root if isinstance ( node . tag , basestring ) ]
# Add root text
if root . text and self . text_content is not None :
text = root . text . strip ( )
if text :
if self . simple_text and len ( children ) == len ( root . attrib ) == 0 :
value = self . _fromstring ( text )
else :
children_list = [ self . _fromstring ( text ) , ]
for child in children :
child_data = self . data ( child )
children_list . append ( child_data )
# Flatten children
if len ( root . attrib ) == 0 and len ( children_list ) == 1 :
value = children_list [ 0 ]
elif len ( children_list ) > 0 :
value [ 'children' ] = children_list
return self . dict ( [ ( unicode ( root . tag ) , value ) ] )
|
def config_put ( args ) :
'''Install a valid method configuration into a workspace , in one of several
ways : from a JSON file containing a config definition ( both file names
and objects are supported ) ; as a string representing the content of such
a JSON file ; or as a dict generated from such JSON content , e . g via
json . loads ( ) . Note that the CLI supports only string & filename input .'''
|
config = args . config
if os . path . isfile ( config ) :
with open ( config , 'r' ) as fp :
config = json . loads ( fp . read ( ) )
elif isinstance ( config , str ) :
config = json . loads ( config )
elif isinstance ( config , dict ) :
pass
elif hasattr ( config , "read" ) :
config = json . loads ( config . read ( ) )
else :
raise ValueError ( 'Input method config must be filename, string or dict' )
r = fapi . create_workspace_config ( args . project , args . workspace , config )
fapi . _check_response_code ( r , [ 201 ] )
return True
|
def _to_spans ( x ) :
"""Convert a Candidate , Mention , or Span to a list of spans ."""
|
if isinstance ( x , Candidate ) :
return [ _to_span ( m ) for m in x ]
elif isinstance ( x , Mention ) :
return [ x . context ]
elif isinstance ( x , TemporarySpanMention ) :
return [ x ]
else :
raise ValueError ( f"{type(x)} is an invalid argument type" )
|
def get_publisher ( self , publisher_name , flags = None ) :
"""GetPublisher .
[ Preview API ]
: param str publisher _ name :
: param int flags :
: rtype : : class : ` < Publisher > < azure . devops . v5_0 . gallery . models . Publisher > `"""
|
route_values = { }
if publisher_name is not None :
route_values [ 'publisherName' ] = self . _serialize . url ( 'publisher_name' , publisher_name , 'str' )
query_parameters = { }
if flags is not None :
query_parameters [ 'flags' ] = self . _serialize . query ( 'flags' , flags , 'int' )
response = self . _send ( http_method = 'GET' , location_id = '4ddec66a-e4f6-4f5d-999e-9e77710d7ff4' , version = '5.0-preview.1' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( 'Publisher' , response )
|
def on_update_enabled ( self , conf_evt ) :
"""Implements neighbor configuration change listener ."""
|
enabled = conf_evt . value
# If we do not have any protocol bound and configuration asks us to
# enable this peer , we try to establish connection again .
if enabled :
LOG . info ( '%s enabled' , self )
if self . _protocol and self . _protocol . started :
LOG . error ( 'Tried to enable neighbor that is already enabled' )
else :
self . state . bgp_state = const . BGP_FSM_CONNECT
# Restart connect loop if not already running .
if not self . _connect_retry_event . is_set ( ) :
self . _connect_retry_event . set ( )
LOG . debug ( 'Starting connect loop as neighbor is enabled.' )
else :
LOG . info ( '%s disabled' , self )
if self . _protocol : # Stopping protocol will eventually trigger connection _ lost
# handler which will do some clean - up .
# But the greenlet that is in charge of the socket may be kill
# when we stop the protocol , hence we call connection _ lost
# here as we triggered socket to close .
self . _protocol . send_notification ( BGP_ERROR_CEASE , BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN )
self . _protocol . stop ( )
self . _protocol = None
self . state . bgp_state = const . BGP_FSM_IDLE
# If this peer is not enabled any - more we stop trying to make any
# connection .
LOG . debug ( 'Disabling connect-retry as neighbor was disabled' )
self . _connect_retry_event . clear ( )
|
def skip_cycles ( self ) -> int :
"""The number of cycles dedicated to skips ."""
|
return sum ( ( int ( re . sub ( r'\D' , '' , op ) ) for op in self . skip_tokens ) )
|
def bulk_add ( self , item_id , ref_id = None , tags = None , time = None , title = None , url = None ) :
"""Add an item to list
See : https : / / getpocket . com / developer / docs / v3 / modify
: param item _ id : int
: param ref _ id : tweet _ id
: param tags : list of tags
: param time : time of action
: param title : given title
: param url : item url
: return : self for chaining
: rtype : Pocket"""
|
self . _add_action ( 'add' )
return self
|
def ssl_proxy ( self , value ) :
"""Sets https proxy setting .
: Args :
- value : The https proxy value ."""
|
self . _verify_proxy_type_compatibility ( ProxyType . MANUAL )
self . proxyType = ProxyType . MANUAL
self . sslProxy = value
|
def get_context_data ( self , ** kwargs ) :
"""Populate the context of the template
with all published entries and all the categories ."""
|
context = super ( Sitemap , self ) . get_context_data ( ** kwargs )
context . update ( { 'entries' : Entry . published . all ( ) , 'categories' : Category . published . all ( ) , 'authors' : Author . published . all ( ) } )
return context
|
def set_ ( name , path ) :
'''. . versionadded : : 0.17.0
Sets alternative for < name > to < path > , if < path > is defined
as an alternative for < name > .
name
is the master name for this link group
( e . g . pager )
path
is the location of one of the alternative target files .
( e . g . / usr / bin / less )
. . code - block : : yaml
foo :
alternatives . set :
- path : / usr / bin / foo - 2.0'''
|
ret = { 'name' : name , 'path' : path , 'result' : True , 'changes' : { } , 'comment' : '' }
current = __salt__ [ 'alternatives.show_current' ] ( name )
if current == path :
ret [ 'comment' ] = 'Alternative for {0} already set to {1}' . format ( name , path )
return ret
display = __salt__ [ 'alternatives.display' ] ( name )
isinstalled = False
for line in display . splitlines ( ) :
if line . startswith ( path ) :
isinstalled = True
break
if isinstalled :
if __opts__ [ 'test' ] :
ret [ 'comment' ] = ( 'Alternative for {0} will be set to path {1}' ) . format ( name , path )
ret [ 'result' ] = None
return ret
__salt__ [ 'alternatives.set' ] ( name , path )
current = __salt__ [ 'alternatives.show_current' ] ( name )
if current == path :
ret [ 'comment' ] = ( 'Alternative for {0} set to path {1}' ) . format ( name , current )
ret [ 'changes' ] = { 'path' : current }
else :
ret [ 'comment' ] = 'Alternative for {0} not updated' . format ( name )
return ret
else :
ret [ 'result' ] = False
ret [ 'comment' ] = ( 'Alternative {0} for {1} doesn\'t exist' ) . format ( path , name )
return ret
|
def pull ( collector , image , ** kwargs ) :
"""Pull an image"""
|
if not image . image_index :
raise BadOption ( "The chosen image does not have a image_index configuration" , wanted = image . name )
tag = kwargs [ "artifact" ]
if tag is NotSpecified :
collector . configuration [ "harpoon" ] . tag
if tag is not NotSpecified :
image . tag = tag
log . info ( "Pulling tag: %s" , tag )
Syncer ( ) . pull ( image , ignore_missing = image . harpoon . ignore_missing )
|
def api_method ( self ) :
"""Returns the api method to ` send ` the current API Object type"""
|
if not self . _api_method :
raise NotImplementedError ( )
return getattr ( self . api , self . _api_method )
|
def _add_arg ( self , key , value , mask = False ) :
"""Add CLI Arg for the correct language .
Args :
key ( string ) : The CLI Args key ( e . g . , - - name ) .
value ( string ) : The CLI Args value ( e . g . , bob ) .
mask ( boolean , default : False ) : Indicates whether no mask value ."""
|
if self . lang == 'python' :
self . _add_arg_python ( key , value , mask )
elif self . lang == 'java' :
self . _add_arg_java ( key , value , mask )
|
def __write_docstring ( self , routine ) :
"""Writes the docstring for the wrapper method of a stored routine .
: param dict routine : The metadata of the stored routine ."""
|
self . _write_line ( '"""' )
self . __write_docstring_description ( routine )
self . _write_docstring_parameters ( routine )
self . __write_docstring_return_type ( )
self . _write_line ( '"""' )
|
def vatu0 ( self , E , Lz , u0 , R , retv2 = False ) :
"""NAME :
vatu0
PURPOSE :
calculate the velocity at u0
INPUT :
E - energy
Lz - angular momentum
u0 - u0
R - radius corresponding to u0 , pi / 2.
retv2 = ( False ) , if True return v ^ 2
OUTPUT :
velocity
HISTORY :
2012-11-29 - Written - Bovy ( IAS )"""
|
v2 = ( 2. * ( E - actionAngleStaeckel . potentialStaeckel ( u0 , numpy . pi / 2. , self . _pot , self . _delta ) ) - Lz ** 2. / R ** 2. )
if retv2 :
return v2
v2 [ ( v2 < 0. ) * ( v2 > - 10. ** - 7. ) ] = 0.
return numpy . sqrt ( v2 )
|
def read ( self ) :
"""Reads enough bytes from ` ` open _ stream _ in ` ` to fill the ` ` width ` `
( if available ) and converts them to an ` ` int ` ` . Returns this ` ` int ` ` ."""
|
int_ = bytes_to_int ( self . open_stream_in . read ( math . ceil ( self . width / 8 ) ) , self . width )
self . repr_ . setvalue ( int_ )
return self . value . getvalue ( )
|
def list_exchanges_for_vhost ( self , vhost ) :
"""A list of all exchanges in a given virtual host .
: param vhost : The vhost name
: type vhost : str"""
|
return self . _api_get ( '/api/exchanges/{0}' . format ( urllib . parse . quote_plus ( vhost ) ) )
|
def retinotopy_anchors ( mesh , mdl , polar_angle = None , eccentricity = None , weight = None , weight_min = 0.1 , field_sign_weight = 0 , field_sign = None , invert_field_sign = False , radius_weight = 0 , radius_weight_source = 'Wandell2015' , radius = None , model_field_sign = None , model_hemi = Ellipsis , scale = 1 , shape = 'Gaussian' , suffix = None , sigma = [ 0.1 , 2.0 , 8.0 ] , select = 'close' ) :
'''retinotopy _ anchors ( mesh , model ) is intended for use with the mesh _ register function and the
retinotopy _ model ( ) function and / or the RetinotopyModel class ; it yields a description of the
anchor points that tie relevant vertices the given mesh to points predicted by the given model
object . Any instance of the RetinotopyModel class should work as a model argument ; this includes
SchiraModel objects as well as RetinotopyMeshModel objects such as those returned by the
retinotopy _ model ( ) function . If the model given is a string , then it is passed to the
retinotopy _ model ( ) function first .
Options :
* polar _ angle ( default None ) specifies that the given data should be used in place of the
' polar _ angle ' or ' PRF _ polar _ angle ' property values . The given argument must be numeric and
the same length as the the number of vertices in the mesh . If None is given , then the
property value of the mesh is used ; if a list is given and any element is None , then the
weight for that vertex is treated as a zero . If the option is a string , then the property
value with the same name isused as the polar _ angle data .
* eccentricity ( default None ) specifies that the given data should be used in places of the
' eccentricity ' or ' PRF _ eccentricity ' property values . The eccentricity option is handled
virtually identically to the polar _ angle option .
* weight ( default None ) specifies that the weight or scale of the data ; this is handled
generally like the polar _ angle and eccentricity options , but may also be 1 , indicating that
all vertices with polar _ angle and eccentricity values defined will be given a weight of 1.
If weight is left as None , then the function will check for ' weight ' ,
' variance _ explained ' , ' PRF _ variance _ explained ' , and ' retinotopy _ weight ' values and will use
the first found ( in that order ) . If none of these is found , then a value of 1 is assumed .
* weight _ min ( default 0 ) specifies that the weight must be higher than the given value inn
order to be included in the fit ; vertices with weights below this value have their weights
truncated to 0.
* scale ( default 1 ) specifies a constant by which to multiply all weights for all anchors ; the
value None is interpreted as 1.
* shape ( default ' Gaussian ' ) specifies the shape of the potential function ( see mesh _ register )
* model _ hemi ( default : None ) specifies the hemisphere of the model to load ; if None , then
looks for a non - specific model .
* suffix ( default None ) specifies any additional arguments that should be appended to the
potential function description list that is produced by this function ; i . e . , the
retinotopy _ anchors function produces a list , and the contents of suffix , if given and not
None , are appended to that list ( see mesh _ register ) .
* select ( default ' close ' ) specifies a function that will be called with two arguments for
every vertex given an anchor ; the arguments are the vertex label and the matrix of anchors .
The function should return a list of anchors to use for the label ( None is equivalent to
lambda id , anc : anc ) . The parameter may alternately be specified using the string ' close ' :
select = [ ' close ' , [ k ] ] indicates that any anchor more than k times the average edge - length in
the mesh should be excluded ; a value of just [ ' close ' , k ] on the other hand indicates that
any anchor more than k distance from the vertex should be exlcuded . The default value ,
' close ' , is equivalent to [ ' close ' , [ 40 ] ] .
* sigma ( default [ 0.1 , 2.0 , 4.0 ] ) specifies how the sigma parameter should be handled ; if
None , then no sigma value is specified ; if a single number , then all sigma values are
assigned that value ; if a list of three numbers , then the first is the minimum sigma value ,
the second is the fraction of the minimum distance between paired anchor points , and the
last is the maximum sigma - - - the idea with this form of the argument is that the ideal
sigma value in many cases is approximately 0.25 to 0.5 times the distance between anchors
to which a single vertex is attracted ; for any anchor a to which a vertex u is attracted ,
the sigma of a is the middle sigma - argument value times the minimum distance from a to all
other anchors to which u is attracted ( clipped by the min and max sigma ) .
* field _ sign _ weight ( default : 0 ) specifies the amount of weight that should be put on the
retinotopic field of the model as a method of attenuating the weights on those anchors whose
empirical retinotopic values and predicted model locations do not match . The weight that
results is calculated from the difference in empirical field - sign for each vertex and the
visual area field sign based on the labels in the model . The higher the field - sign weight ,
( approaching 1 ) the more the resulting value is a geometric mean of the field - sign - based
weight and the original weights . As this value approaches 0 , the resulting weights are more
like the original weights .
* radius _ weight ( default : 0 ) specifies the amount of weight that should be put on the
receptive field radius of the model as a method of attenuating the weights on those anchors
whose empirical retinotopic values and predicted model locations do not match . The weight
that results is calculated from the difference in empirical RF radius for each vertex and
the predicted RF radius based on the labels in the model . The higher the radius weight ,
( approaching 1 ) the more the resulting value is a geometric mean of the field - sign - based
weight and the original weights . As this value approaches 0 , the resulting weights are more
like the original weights .
* radius _ weight _ source ( default : ' Wandell2015 ' ) specifies the source for predicting RF radius ;
based on eccentricity and visual area label .
Example :
# The retinotopy _ anchors function is intended for use with mesh _ register , as follows :
# Define our Schira Model :
model = neuropythy . registration . SchiraModel ( )
# Make sure our mesh has polar angle , eccentricity , and weight data :
mesh . prop ( ' polar _ angle ' , polar _ angle _ vertex _ data ) ;
mesh . prop ( ' eccentricity ' , eccentricity _ vertex _ data ) ;
mesh . prop ( ' weight ' , variance _ explained _ vertex _ data ) ;
# register the mesh using the retinotopy and model :
registered _ mesh = neuropythy . registration . mesh _ register (
mesh ,
[ ' mesh ' , retinotopy _ anchors ( mesh , model ) ] ,
max _ step _ size = 0.05,
max _ steps = 2000)'''
|
if pimms . is_str ( mdl ) :
hemi = None
if pimms . is_str ( model_hemi ) :
model_hemi = model_hemi . upper ( )
hemnames = { k : h for ( h , als ) in [ ( 'LH' , [ 'LH' , 'L' , 'LEFT' , 'RHX' , 'RX' ] ) , ( 'RH' , [ 'RH' , 'R' , 'RIGHT' , 'LHX' , 'LX' ] ) ] for k in als }
if model_hemi in hemnames :
hemi = hemnames [ model_hemi ]
else :
raise ValueError ( 'Unrecognized hemisphere name: %s' % model_hemi )
elif model_hemi is not None :
raise ValueError ( 'model_hemi must be a string, Ellipsis, or None' )
mdl = retinotopy_model ( mdl , hemi = hemi )
if not isinstance ( mdl , RetinotopyModel ) :
raise RuntimeError ( 'given model is not a RetinotopyModel instance!' )
if not isinstance ( mesh , geo . Mesh ) :
raise RuntimeError ( 'given mesh is not a Mesh object!' )
n = mesh . vertex_count
X = mesh . coordinates . T
if weight_min is None :
weight_min = 0
# make sure we have our polar angle / eccen / weight values :
# ( weight is odd because it might be a single number , so handle that first )
( polar_angle , eccentricity , weight ) = [ extract_retinotopy_argument ( mesh , name , arg , default = 'empirical' ) for ( name , arg ) in [ ( 'polar_angle' , polar_angle ) , ( 'eccentricity' , eccentricity ) , ( 'weight' , np . full ( n , weight ) if pimms . is_number ( weight ) else weight ) ] ]
# Make sure they contain no None / invalid values
( polar_angle , eccentricity , weight ) = _retinotopy_vectors_to_float ( polar_angle , eccentricity , weight , weight_min = weight_min )
if np . sum ( weight > 0 ) == 0 :
raise ValueError ( 'No positive weights found' )
idcs = np . where ( weight > 0 ) [ 0 ]
# Interpret the select arg if necessary ( but don ' t apply it yet )
select = [ 'close' , [ 40 ] ] if select == 'close' else [ 'close' , [ 40 ] ] if select == [ 'close' ] else select
if select is None :
select = lambda a , b : b
elif ( ( pimms . is_vector ( select ) or is_list ( select ) or is_tuple ( select ) ) and len ( select ) == 2 and select [ 0 ] == 'close' ) :
if pimms . is_vector ( select [ 1 ] ) :
d = np . mean ( mesh . edge_lengths ) * select [ 1 ] [ 0 ]
else :
d = select [ 1 ]
select = lambda idx , ancs : [ a for a in ancs if a [ 0 ] is not None if npla . norm ( X [ idx ] - a ) < d ]
# Okay , apply the model :
res = mdl . angle_to_cortex ( polar_angle [ idcs ] , eccentricity [ idcs ] )
oks = np . isfinite ( np . sum ( np . reshape ( res , ( res . shape [ 0 ] , - 1 ) ) , axis = 1 ) )
# Organize the data ; trim out those not selected
data = [ [ [ i for _ in r ] , r , [ ksidx [ tuple ( a ) ] for a in r ] ] for ( i , r0 , ok ) in zip ( idcs , res , oks ) if ok for ksidx in [ { tuple ( a ) : ( k + 1 ) for ( k , a ) in enumerate ( r0 ) } ] for r in [ select ( i , r0 ) ] if len ( r ) > 0 ]
# Flatten out the data into arguments for Java
idcs = [ int ( i ) for d in data for i in d [ 0 ] ]
ancs = np . asarray ( [ pt for d in data for pt in d [ 1 ] ] ) . T
labs = np . asarray ( [ ii for d in data for ii in d [ 2 ] ] ) . T
# Get just the relevant weights and the scale
wgts = np . asarray ( weight [ idcs ] * ( 1 if scale is None else scale ) )
# add in the field - sign weights and radius weights if requested here ;
if not np . isclose ( field_sign_weight , 0 ) and mdl . area_name_to_id is not None :
id2n = mdl . area_id_to_name
if field_sign is True or field_sign is Ellipsis or field_sign is None :
from . cmag import cmag
r = { 'polar_angle' : polar_angle , 'eccentricity' : eccentricity }
# field _ sign = retinotopic _ field _ sign ( mesh , retinotopy = r )
field_sign = cmag ( mesh , r ) [ 'field_sign' ]
elif pimms . is_str ( field_sign ) :
field_sign = mesh . prop ( field_sign )
field_sign = np . asarray ( field_sign )
if invert_field_sign :
field_sign = - field_sign
fswgts = 1.0 - 0.25 * np . asarray ( [ ( fs - visual_area_field_signs [ id2n [ l ] ] ) if l in id2n else 0 for ( l , fs ) in zip ( labs , field_sign [ idcs ] ) ] ) ** 2
# average the weights at some fraction with the original weights
fswgts = field_sign_weight * fswgts + ( 1 - field_sign_weight ) * wgts
else :
fswgts = None
# add in radius weights if requested as well
if not np . isclose ( radius_weight , 0 ) and mdl . area_name_to_id is not None :
id2n = mdl . area_id_to_name
emprad = extract_retinotopy_argument ( mesh , 'radius' , radius , default = 'empirical' )
emprad = emprad [ idcs ]
emprad = np . argsort ( np . argsort ( emprad ) ) * ( 1.0 / len ( emprad ) ) - 0.5
eccs = eccentricity [ idcs ]
prerad = np . asarray ( [ predict_pRF_radius ( ecc , id2n [ lbl ] , source = radius_weight_source ) for ( ecc , lbl ) in zip ( eccs , labs ) ] )
prerad = np . argsort ( np . argsort ( prerad ) ) * ( 1.0 / len ( prerad ) ) - 0.5
rdwgts = 1.0 - ( emprad - prerad ) ** 2
# average the weights at some fraction with the original weights
rdwgts = radius_weight * rdwgts + ( 1 - radius_weight ) * wgts
else :
rdwgts = None
# apply the weights
if fswgts is not None :
if rdwgts is not None :
wgts = np . power ( fswgts * rdwgts * wgts , 1.0 / 3.0 )
else :
wgts = np . sqrt ( fswgts * wgts )
elif rdwgts is not None :
wgts = np . sqrt ( rdwgts * wgts )
# Figure out the sigma parameter :
if sigma is None :
sigs = None
elif pimms . is_number ( sigma ) :
sigs = sigma
elif pimms . is_vector ( sigma ) and len ( sigma ) == 3 :
[ minsig , mult , maxsig ] = sigma
sigs = np . clip ( [ mult * min ( [ npla . norm ( a0 - a ) for a in anchs if a is not a0 ] ) if len ( iii ) > 1 else maxsig for ( iii , anchs , _ ) in data for a0 in anchs ] , minsig , maxsig )
else :
raise ValueError ( 'sigma must be a number or a list of 3 numbers' )
# okay , we ' ve partially parsed the data that was given ; now we can construct the final list of
# instructions :
tmp = ( [ 'anchor' , shape , np . asarray ( idcs , dtype = np . int ) , np . asarray ( ancs , dtype = np . float64 ) , 'scale' , np . asarray ( wgts , dtype = np . float64 ) ] + ( [ ] if sigs is None else [ 'sigma' , sigs ] ) + ( [ ] if suffix is None else suffix ) )
return tmp
|
def setup_prometheus ( self , registry = None ) :
"""Setup Prometheus ."""
|
kwargs = { }
if registry :
kwargs [ "registry" ] = registry
self . metrics = PrometheusMetrics ( self . app , ** kwargs )
try :
version = pkg_resources . require ( self . app . name ) [ 0 ] . version
except pkg_resources . DistributionNotFound :
version = "unknown"
self . metrics . info ( "app_info" , "Application info" , version = version , appname = self . app . name )
self . app . logger . info ( "Prometheus is enabled." )
|
def projection ( self , * axes , ** kwargs ) :
"""Projection to lower - dimensional histogram .
The inheriting class should implement the _ projection _ class _ map
class attribute to suggest class for the projection . If the
arguments don ' t match any of the map keys , HistogramND is used ."""
|
axes , _ = self . _get_projection_axes ( * axes )
axes = tuple ( sorted ( axes ) )
if axes in self . _projection_class_map :
klass = self . _projection_class_map [ axes ]
return HistogramND . projection ( self , * axes , type = klass , ** kwargs )
else :
return HistogramND . projection ( self , * axes , ** kwargs )
|
def info ( self , msg : str ) -> None :
"""Write an info message to the Windows Application log
( ± to the Python disk log ) ."""
|
# noinspection PyUnresolvedReferences
servicemanager . LogInfoMsg ( str ( msg ) )
if self . debugging :
log . info ( msg )
|
def resolve_config ( self ) :
'''Resolve configuration params to native instances'''
|
conf = self . load_config ( self . force_default )
for k in conf [ 'hues' ] :
conf [ 'hues' ] [ k ] = getattr ( KEYWORDS , conf [ 'hues' ] [ k ] )
as_tuples = lambda name , obj : namedtuple ( name , obj . keys ( ) ) ( ** obj )
self . hues = as_tuples ( 'Hues' , conf [ 'hues' ] )
self . opts = as_tuples ( 'Options' , conf [ 'options' ] )
self . labels = as_tuples ( 'Labels' , conf [ 'labels' ] )
|
def update ( self , template_id , assumer , template_types , template_dests , template_start_standards , template_start_fees , template_add_standards , template_add_fees , session , name = None ) :
'''taobao . delivery . template . update 修改运费模板
修改运费模板'''
|
request = TOPRequest ( 'taobao.delivery.template.update' )
if name != None :
request [ 'name' ] = name
request [ 'assumer' ] = assumer
request [ 'template_id' ] = template_id
request [ 'template_types' ] = template_types
request [ 'template_dests' ] = template_dests
request [ 'template_start_standards' ] = template_start_standards
request [ 'template_start_fees' ] = template_start_fees
request [ 'template_add_standards' ] = template_add_standards
request [ 'template_add_fees' ] = template_add_fees
self . create ( self . execute ( request , session ) , fields = [ 'complete' , ] )
return self . complete
|
def convex_conj ( self ) :
"""Convex conjugate functional of IndicatorLpUnitBall .
Returns
convex _ conj : GroupL1Norm
The convex conjugate is the the group L1 - norm ."""
|
conj_exp = conj_exponent ( self . pointwise_norm . exponent )
return GroupL1Norm ( self . domain , exponent = conj_exp )
|
def get_next_input ( self ) :
"""Returns the next line of input
: return : string of input"""
|
# TODO : could override input if we get input coming in at the same time
all_input = Deployment . objects . get ( pk = self . id ) . input or ''
lines = all_input . splitlines ( )
first_line = lines [ 0 ] if len ( lines ) else None
lines = lines [ 1 : ] if len ( lines ) > 1 else [ ]
Deployment . objects . filter ( pk = self . id ) . update ( input = '\n' . join ( lines ) )
return first_line
|
def auto ( self ) :
"""Returns the highest whole - number unit ."""
|
if self . _value >= 1000000000000 :
return self . tb , 'tb'
if self . _value >= 1000000000 :
return self . gb , 'gb'
if self . _value >= 1000000 :
return self . mb , 'mb'
if self . _value >= 1000 :
return self . kb , 'kb'
else :
return self . b , 'b'
|
def get_unlabeled_entries ( self ) :
"""Returns list of unlabeled features , along with their entry _ ids
Returns
unlabeled _ entries : list of ( entry _ id , feature ) tuple
Labeled entries"""
|
return [ ( idx , entry [ 0 ] ) for idx , entry in enumerate ( self . data ) if entry [ 1 ] is None ]
|
def get_path_url ( path , relative = False ) :
"""Returns an absolute or relative path url given a path"""
|
if relative :
return os . path . relpath ( path )
else :
return 'file://%s' % os . path . abspath ( path )
|
def delete_service ( self , stack , service ) :
"""删除服务
删除指定名称服务 , 并自动销毁服务已部署的所有容器和存储卷 。
Args :
- stack : 服务所属的服务组名称
- service : 服务名
Returns :
返回一个tuple对象 , 其格式为 ( < result > , < ResponseInfo > )
- result 成功返回空dict { } , 失败返回 { " error " : " < errMsg string > " }
- ResponseInfo 请求的Response信息"""
|
url = '{0}/v3/stacks/{1}/services/{2}' . format ( self . host , stack , service )
return self . __delete ( url )
|
def check_for_usable_restore_env ( self ) :
"""Check if the current env can be used to restore files ."""
|
self . check_for_usable_environment ( )
if not os . path . isdir ( self . mackup_folder ) :
utils . error ( "Unable to find the Mackup folder: {}\n" "You might want to back up some files or get your" " storage directory synced first." . format ( self . mackup_folder ) )
|
def set_ip_port ( self , ip , port ) :
'''set ip and port'''
|
self . address = ip
self . port = port
self . stop ( )
self . start ( )
|
def match_all_concepts ( self , string ) :
'''Returns sorted list of all : class : ` Concept ` s matching ` ` string ` `'''
|
multipliers = { 'exact' : 10 ** 5 , 'fname' : 10 ** 4 , 'fuzzy' : 10 ** 2 , 'fuzzy_fragment' : 1 }
matches = [ ]
for concept in self . vocab :
matches += concept . matches ( string , self . fuzzy , self . fname_match , self . fuzzy_fragment , self . guess )
return sort_matches ( matches )
|
def WriteClientCrashInfo ( self , client_id , crash_info ) :
"""Writes a new client crash record ."""
|
if client_id not in self . metadatas :
raise db . UnknownClientError ( client_id )
ts = rdfvalue . RDFDatetime . Now ( )
self . metadatas [ client_id ] [ "last_crash_timestamp" ] = ts
history = self . crash_history . setdefault ( client_id , { } )
history [ ts ] = crash_info . SerializeToString ( )
|
def compile_insert_get_id ( self , query , values , sequence = None ) :
"""Compile an insert and get ID statement into SQL .
: param query : A QueryBuilder instance
: type query : QueryBuilder
: param values : The values to insert
: type values : dict
: param sequence : The id sequence
: type sequence : str
: return : The compiled statement
: rtype : str"""
|
if sequence is None :
sequence = "id"
return "%s RETURNING %s" % ( self . compile_insert ( query , values ) , self . wrap ( sequence ) , )
|
def stop ( self ) :
"""BLOCKS UNTIL ALL THREADS HAVE STOPPED
THEN RUNS sys . exit ( 0)"""
|
global DEBUG
self_thread = Thread . current ( )
if self_thread != MAIN_THREAD or self_thread != self :
Log . error ( "Only the main thread can call stop() on main thread" )
DEBUG = True
self . please_stop . go ( )
join_errors = [ ]
with self . child_lock :
children = copy ( self . children )
for c in reversed ( children ) :
DEBUG and c . name and Log . note ( "Stopping thread {{name|quote}}" , name = c . name )
try :
c . stop ( )
except Exception as e :
join_errors . append ( e )
for c in children :
DEBUG and c . name and Log . note ( "Joining on thread {{name|quote}}" , name = c . name )
try :
c . join ( )
except Exception as e :
join_errors . append ( e )
DEBUG and c . name and Log . note ( "Done join on thread {{name|quote}}" , name = c . name )
if join_errors :
Log . error ( "Problem while stopping {{name|quote}}" , name = self . name , cause = unwraplist ( join_errors ) )
self . stop_logging ( )
self . timers . stop ( )
self . timers . join ( )
write_profiles ( self . cprofiler )
DEBUG and Log . note ( "Thread {{name|quote}} now stopped" , name = self . name )
sys . exit ( )
|
def symbolic_Rz_matrix ( symbolic_theta ) :
"""Matrice symbolique de rotation autour de l ' axe Z"""
|
return sympy . Matrix ( [ [ sympy . cos ( symbolic_theta ) , - sympy . sin ( symbolic_theta ) , 0 ] , [ sympy . sin ( symbolic_theta ) , sympy . cos ( symbolic_theta ) , 0 ] , [ 0 , 0 , 1 ] ] )
|
def set_title ( self , value : Union [ Literal , Identifier , str ] , lang : str = None ) :
"""Set the DC Title literal value
: param value : Value of the title node
: param lang : Language in which the value is"""
|
return self . metadata . add ( key = DC . title , value = value , lang = lang )
|
def decode ( self ) :
"Decode self . buffer , populating instance variables and return self ."
|
buflen = len ( self . buffer )
tftpassert ( buflen >= 4 , "malformed ERR packet, too short" )
log . debug ( "Decoding ERR packet, length %s bytes" , buflen )
if buflen == 4 :
log . debug ( "Allowing this affront to the RFC of a 4-byte packet" )
fmt = b"!HH"
log . debug ( "Decoding ERR packet with fmt: %s" , fmt )
self . opcode , self . errorcode = struct . unpack ( fmt , self . buffer )
else :
log . debug ( "Good ERR packet > 4 bytes" )
fmt = b"!HH%dsx" % ( len ( self . buffer ) - 5 )
log . debug ( "Decoding ERR packet with fmt: %s" , fmt )
self . opcode , self . errorcode , self . errmsg = struct . unpack ( fmt , self . buffer )
log . error ( "ERR packet - errorcode: %d, message: %s" % ( self . errorcode , self . errmsg ) )
return self
|
def validate_auth_mechanism ( option , value ) :
"""Validate the authMechanism URI option ."""
|
# CRAM - MD5 is for server testing only . Undocumented ,
# unsupported , may be removed at any time . You have
# been warned .
if value not in MECHANISMS and value != 'CRAM-MD5' :
raise ValueError ( "%s must be in %s" % ( option , tuple ( MECHANISMS ) ) )
return value
|
def _create ( self , uri , body , records = None , subdomains = None , return_none = False , return_raw = False , ** kwargs ) :
"""Handles the communication with the API when creating a new
resource managed by this class .
Since DNS works completely differently for create ( ) than the other
APIs , this method overrides the default BaseManager behavior .
If ' records ' are supplied , they should be a list of dicts . Each
record dict should have the following format :
{ " name " : " example . com " ,
" type " : " A " ,
" data " : " 192.0.2.17 " ,
" ttl " : 86400}
If ' subdomains ' are supplied , they should be a list of dicts . Each
subdomain dict should have the following format :
{ " name " : " sub1 . example . com " ,
" comment " : " 1st sample subdomain " ,
" emailAddress " : " sample @ rackspace . com " }"""
|
self . run_hooks ( "modify_body_for_create" , body , ** kwargs )
resp , resp_body = self . _async_call ( uri , body = body , method = "POST" , error_class = exc . DomainCreationFailed )
response_body = resp_body [ self . response_key ] [ 0 ]
return self . resource_class ( self , response_body )
|
def tofile ( self , filepath = None ) :
"""Saves configuration into a file and returns its path .
Convenience method .
: param str | unicode filepath : Filepath to save configuration into .
If not provided a temporary file will be automatically generated .
: rtype : str | unicode"""
|
if filepath is None :
with NamedTemporaryFile ( prefix = '%s_' % self . alias , suffix = '.ini' , delete = False ) as f :
filepath = f . name
else :
filepath = os . path . abspath ( filepath )
if os . path . isdir ( filepath ) :
filepath = os . path . join ( filepath , '%s.ini' % self . alias )
with open ( filepath , 'w' ) as target_file :
target_file . write ( self . format ( ) )
target_file . flush ( )
return filepath
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.