signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def output_sizes ( self ) :
"""Returns a tuple of all output sizes of all the layers ."""
|
return tuple ( [ l ( ) if callable ( l ) else l for l in self . _output_sizes ] )
|
def send ( self , data , opcode = ABNF . OPCODE_TEXT ) :
"""send message .
data : message to send . If you set opcode to OPCODE _ TEXT ,
data must be utf - 8 string or unicode .
opcode : operation code of data . default is OPCODE _ TEXT ."""
|
if not self . sock or self . sock . send ( data , opcode ) == 0 :
raise WebSocketConnectionClosedException ( "Connection is already closed." )
|
def _validate_read_indexer ( self , key , indexer , axis , raise_missing = False ) :
"""Check that indexer can be used to return a result ( e . g . at least one
element was found , unless the list of keys was actually empty ) .
Parameters
key : list - like
Target labels ( only used to show correct error message )
indexer : array - like of booleans
Indices corresponding to the key ( with - 1 indicating not found )
axis : int
Dimension on which the indexing is being made
raise _ missing : bool
Whether to raise a KeyError if some labels are not found . Will be
removed in the future , and then this method will always behave as
if raise _ missing = True .
Raises
KeyError
If at least one key was requested but none was found , and
raise _ missing = True ."""
|
ax = self . obj . _get_axis ( axis )
if len ( key ) == 0 :
return
# Count missing values :
missing = ( indexer < 0 ) . sum ( )
if missing :
if missing == len ( indexer ) :
raise KeyError ( "None of [{key}] are in the [{axis}]" . format ( key = key , axis = self . obj . _get_axis_name ( axis ) ) )
# We ( temporarily ) allow for some missing keys with . loc , except in
# some cases ( e . g . setting ) in which " raise _ missing " will be False
if not ( self . name == 'loc' and not raise_missing ) :
not_found = list ( set ( key ) - set ( ax ) )
raise KeyError ( "{} not in index" . format ( not_found ) )
# we skip the warning on Categorical / Interval
# as this check is actually done ( check for
# non - missing values ) , but a bit later in the
# code , so we want to avoid warning & then
# just raising
_missing_key_warning = textwrap . dedent ( """
Passing list-likes to .loc or [] with any missing label will raise
KeyError in the future, you can use .reindex() as an alternative.
See the documentation here:
https://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate-loc-reindex-listlike""" )
# noqa
if not ( ax . is_categorical ( ) or ax . is_interval ( ) ) :
warnings . warn ( _missing_key_warning , FutureWarning , stacklevel = 6 )
|
def find_primitive ( cell , symprec = 1e-5 , angle_tolerance = - 1.0 ) :
"""Primitive cell is searched in the input cell .
The primitive cell is returned by a tuple of ( lattice , positions , numbers ) .
If it fails , None is returned ."""
|
_set_no_error ( )
lattice , positions , numbers , _ = _expand_cell ( cell )
if lattice is None :
return None
num_atom_prim = spg . primitive ( lattice , positions , numbers , symprec , angle_tolerance )
_set_error_message ( )
if num_atom_prim > 0 :
return ( np . array ( lattice . T , dtype = 'double' , order = 'C' ) , np . array ( positions [ : num_atom_prim ] , dtype = 'double' , order = 'C' ) , np . array ( numbers [ : num_atom_prim ] , dtype = 'intc' ) )
else :
return None
|
def is_frameshift_len ( mut_df ) :
"""Simply returns a series indicating whether each corresponding mutation
is a frameshift .
This is based on the length of the indel . Thus may be fooled by frameshifts
at exon - intron boundaries or other odd cases .
Parameters
mut _ df : pd . DataFrame
mutation input file as a dataframe in standard format
Returns
is _ fs : pd . Series
pandas series indicating if mutaitons are frameshifts"""
|
# calculate length , 0 - based coordinates
# indel _ len = mut _ df [ ' End _ Position ' ] - mut _ df [ ' Start _ Position ' ]
if 'indel len' in mut_df . columns :
indel_len = mut_df [ 'indel len' ]
else :
indel_len = compute_indel_length ( mut_df )
# only non multiples of 3 are frameshifts
is_fs = ( indel_len % 3 ) > 0
# make sure no single base substitutions are counted
is_indel = ( mut_df [ 'Reference_Allele' ] == '-' ) | ( mut_df [ 'Tumor_Allele' ] == '-' )
is_fs [ ~ is_indel ] = False
return is_fs
|
def rest_call ( self , url , method , data = None , sensitive = False , timeout = None , content_json = True , retry = None , max_retry = None , retry_sleep = None ) :
"""Generic REST call worker function
* * Parameters : * *
- * * url : * * URL for the REST call
- * * method : * * METHOD for the REST call
- * * data : * * Optional DATA for the call ( for POST / PUT / etc . )
- * * sensitive : * * Flag if content request / response should be hidden from logging functions
- * * timeout : * * Requests Timeout
- * * content _ json : * * Bool on whether the Content - Type header should be set to application / json
- * * retry : * * DEPRECATED - please use ` cloudgenix . API . modify _ rest _ retry ` instead .
- * * max _ retry : * * DEPRECATED - please use ` cloudgenix . API . modify _ rest _ retry ` instead .
- * * retry _ sleep : * * DEPRECATED - please use ` cloudgenix . API . modify _ rest _ retry ` instead .
* * Returns : * * Requests . Response object , extended with :
- * * cgx _ status * * : Bool , True if a successful CloudGenix response , False if error .
- * * cgx _ content * * : Content of the response , guaranteed to be in Dict format . Empty / invalid responses
will be converted to a Dict response ."""
|
# pull retry related items from Constructor if not specified .
if timeout is None :
timeout = self . rest_call_timeout
if retry is not None : # Someone using deprecated retry code . Notify .
sys . stderr . write ( "WARNING: 'retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead." )
if max_retry is not None : # Someone using deprecated retry code . Notify .
sys . stderr . write ( "WARNING: 'max_retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead." )
if retry_sleep is not None : # Someone using deprecated retry code . Notify .
sys . stderr . write ( "WARNING: 'max_retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead." )
# Get logging level , use this to bypass logging functions with possible large content if not set .
logger_level = api_logger . getEffectiveLevel ( )
# populate headers and cookies from session .
if content_json and method . lower ( ) not in [ 'get' , 'delete' ] :
headers = { 'Content-Type' : 'application/json' }
else :
headers = { }
# add session headers
headers . update ( self . _session . headers )
cookie = self . _session . cookies . get_dict ( )
# make sure data is populated if present .
if isinstance ( data , ( list , dict ) ) :
data = json . dumps ( data )
api_logger . debug ( 'REST_CALL URL = %s' , url )
# make request
try :
if not sensitive :
api_logger . debug ( '\n\tREQUEST: %s %s\n\tHEADERS: %s\n\tCOOKIES: %s\n\tDATA: %s\n' , method . upper ( ) , url , headers , cookie , data )
# Actual request
response = self . _session . request ( method , url , data = data , verify = self . ca_verify_filename , stream = True , timeout = timeout , headers = headers , allow_redirects = False )
# Request complete - lets parse .
# if it ' s a non - CGX - good response , return with cgx _ status = False
if response . status_code not in [ requests . codes . ok , requests . codes . no_content , requests . codes . found , requests . codes . moved ] : # Simple JSON debug
if not sensitive :
try :
api_logger . debug ( 'RESPONSE HEADERS: %s\n' , json . dumps ( json . loads ( text_type ( response . headers ) ) , indent = 4 ) )
except ValueError :
api_logger . debug ( 'RESPONSE HEADERS: %s\n' , text_type ( response . headers ) )
try :
api_logger . debug ( 'RESPONSE: %s\n' , json . dumps ( response . json ( ) , indent = 4 ) )
except ValueError :
api_logger . debug ( 'RESPONSE: %s\n' , text_type ( response . text ) )
else :
api_logger . debug ( 'RESPONSE NOT LOGGED (sensitive content)' )
api_logger . debug ( "Error, non-200 response received: %s" , response . status_code )
# CGX extend requests . Response for return
response . cgx_status = False
response . cgx_content = self . _catch_nonjson_streamresponse ( response . text )
return response
else : # Simple JSON debug
if not sensitive and ( logger_level <= logging . DEBUG and logger_level != logging . NOTSET ) :
try :
api_logger . debug ( 'RESPONSE HEADERS: %s\n' , json . dumps ( json . loads ( text_type ( response . headers ) ) , indent = 4 ) )
api_logger . debug ( 'RESPONSE: %s\n' , json . dumps ( response . json ( ) , indent = 4 ) )
except ValueError :
api_logger . debug ( 'RESPONSE HEADERS: %s\n' , text_type ( response . headers ) )
api_logger . debug ( 'RESPONSE: %s\n' , text_type ( response . text ) )
elif sensitive :
api_logger . debug ( 'RESPONSE NOT LOGGED (sensitive content)' )
# CGX extend requests . Response for return
response . cgx_status = True
response . cgx_content = self . _catch_nonjson_streamresponse ( response . text )
return response
except ( requests . exceptions . Timeout , requests . exceptions . ConnectionError , urllib3 . exceptions . MaxRetryError ) as e :
api_logger . info ( "Error, %s." , text_type ( e ) )
# make a requests . Response object for return since we didn ' t get one .
response = requests . Response
# CGX extend requests . Response for return
response . cgx_status = False
response . cgx_content = { '_error' : [ { 'message' : 'REST Request Exception: {}' . format ( e ) , 'data' : { } , } ] }
return response
|
def _ha_return_method ( func ) :
'''Method decorator for ' return type ' methods'''
|
def wrapped ( self , * args , ** kw ) :
self . _reset_retries ( )
while ( True ) : # switch between all namenodes
try :
return func ( self , * args , ** kw )
except RequestError as e :
self . __handle_request_error ( e )
except socket . error as e :
self . __handle_socket_error ( e )
return wrapped
|
def _realize ( self , master , element ) :
"""Builds a widget from xml element using master as parent ."""
|
data = data_xmlnode_to_dict ( element , self . translator )
cname = data [ 'class' ]
uniqueid = data [ 'id' ]
if cname not in CLASS_MAP :
self . _import_class ( cname )
if cname in CLASS_MAP :
self . _pre_process_data ( data )
parent = CLASS_MAP [ cname ] . builder . factory ( self , data )
widget = parent . realize ( master )
self . objects [ uniqueid ] = parent
xpath = "./child"
children = element . findall ( xpath )
for child in children :
child_xml = child . find ( './object' )
child = self . _realize ( parent , child_xml )
parent . add_child ( child )
parent . configure ( )
parent . layout ( )
return parent
else :
raise Exception ( 'Class "{0}" not mapped' . format ( cname ) )
|
def load_signing_key ( signing_key , crypto_backend = default_backend ( ) ) :
"""Optional : crypto backend object from the " cryptography " python library"""
|
if isinstance ( signing_key , EllipticCurvePrivateKey ) :
return signing_key
elif isinstance ( signing_key , ( str , unicode ) ) :
invalid_strings = [ b'-----BEGIN PUBLIC KEY-----' ]
invalid_string_matches = [ string_value in signing_key for string_value in invalid_strings ]
if any ( invalid_string_matches ) :
raise ValueError ( 'Signing key must be a private key, not a public key.' )
if is_hex ( signing_key ) :
try :
private_key_pem = ECPrivateKey ( signing_key ) . to_pem ( )
except :
pass
else :
try :
return load_pem_private_key ( private_key_pem , password = None , backend = crypto_backend )
except :
raise InvalidPrivateKeyError ( )
try :
return load_der_private_key ( signing_key , password = None , backend = crypto_backend )
except Exception as e :
traceback . print_exc ( )
raise InvalidPrivateKeyError ( )
else :
try :
return load_pem_private_key ( signing_key , password = None , backend = crypto_backend )
except :
raise InvalidPrivateKeyError ( )
else :
raise ValueError ( 'Signing key must be in string or unicode format.' )
|
def merge_parts ( self , version_id = None , ** kwargs ) :
"""Merge parts into object version ."""
|
self . file . update_checksum ( ** kwargs )
with db . session . begin_nested ( ) :
obj = ObjectVersion . create ( self . bucket , self . key , _file_id = self . file_id , version_id = version_id )
self . delete ( )
return obj
|
def get_file_systems ( self ) :
"""Creates a map of mounted filesystems on the machine .
iostat ( 1 ) : Each sector has size of 512 bytes .
Returns :
st _ dev - > FileSystem ( device , mount _ point )"""
|
result = { }
if os . access ( '/proc/mounts' , os . R_OK ) :
file = open ( '/proc/mounts' )
for line in file :
try :
mount = line . split ( )
device = mount [ 0 ]
mount_point = mount [ 1 ]
fs_type = mount [ 2 ]
except ( IndexError , ValueError ) :
continue
# Skip the filesystem if it is not in the list of valid
# filesystems
if fs_type not in self . filesystems :
self . log . debug ( "Ignoring %s since it is of type %s " + " which is not in the list of filesystems." , mount_point , fs_type )
continue
# Process the filters
if self . exclude_reg . search ( mount_point ) :
self . log . debug ( "Ignoring %s since it is in the " + "exclude_filter list." , mount_point )
continue
if ( ( ( '/' in device or device == 'tmpfs' ) and mount_point . startswith ( '/' ) ) ) :
try :
stat = os . stat ( mount_point )
except OSError :
self . log . debug ( "Path %s is not mounted - skipping." , mount_point )
continue
if stat . st_dev in result :
continue
result [ stat . st_dev ] = { 'device' : os . path . realpath ( device ) , 'mount_point' : mount_point , 'fs_type' : fs_type }
file . close ( )
else :
if not psutil :
self . log . error ( 'Unable to import psutil' )
return None
partitions = psutil . disk_partitions ( False )
for partition in partitions :
result [ len ( result ) ] = { 'device' : os . path . realpath ( partition . device ) , 'mount_point' : partition . mountpoint , 'fs_type' : partition . fstype }
pass
return result
|
def slice_upload ( cookie , data ) :
'''分片上传一个大文件
分片上传完成后 , 会返回这个分片的MD5 , 用于最终的文件合并 .
如果上传失败 , 需要重新上传 .
不需要指定上传路径 , 上传后的数据会被存储在服务器的临时目录里 .
data - 这个文件分片的数据 .'''
|
url = '' . join ( [ const . PCS_URL_C , 'file?method=upload&type=tmpfile&app_id=250528' , '&' , cookie . sub_output ( 'BDUSS' ) , ] )
fields = [ ]
files = [ ( 'file' , ' ' , data ) ]
headers = { 'Accept' : const . ACCEPT_HTML , 'Origin' : const . PAN_URL }
req = net . post_multipart ( url , headers , fields , files )
if req :
return json . loads ( req . data . decode ( ) )
else :
return None
|
def iter_labels ( self , number = - 1 , etag = None ) :
"""Iterate over the labels for every issue associated with this
milestone .
. . versionchanged : : 0.9
Add etag parameter .
: param int number : ( optional ) , number of labels to return . Default : - 1
returns all available labels .
: param str etag : ( optional ) , ETag header from a previous response
: returns : generator of : class : ` Label < github3 . issues . label . Label > ` \ s"""
|
url = self . _build_url ( 'labels' , base_url = self . _api )
return self . _iter ( int ( number ) , url , Label , etag = etag )
|
def network_running ( name , bridge , forward , vport = None , tag = None , autostart = True , connection = None , username = None , password = None ) :
'''Defines and starts a new network with specified arguments .
: param connection : libvirt connection URI , overriding defaults
. . versionadded : : 2019.2.0
: param username : username to connect with , overriding defaults
. . versionadded : : 2019.2.0
: param password : password to connect with , overriding defaults
. . versionadded : : 2019.2.0
. . code - block : : yaml
domain _ name :
virt . network _ define
. . code - block : : yaml
network _ name :
virt . network _ define :
- bridge : main
- forward : bridge
- vport : openvswitch
- tag : 180
- autostart : True'''
|
ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : '' }
try :
info = __salt__ [ 'virt.network_info' ] ( name , connection = connection , username = username , password = password )
if info :
if info [ 'active' ] :
ret [ 'comment' ] = 'Network {0} exists and is running' . format ( name )
else :
__salt__ [ 'virt.network_start' ] ( name , connection = connection , username = username , password = password )
ret [ 'changes' ] [ name ] = 'Network started'
ret [ 'comment' ] = 'Network {0} started' . format ( name )
else :
__salt__ [ 'virt.network_define' ] ( name , bridge , forward , vport , tag = tag , autostart = autostart , start = True , connection = connection , username = username , password = password )
ret [ 'changes' ] [ name ] = 'Network defined and started'
ret [ 'comment' ] = 'Network {0} defined and started' . format ( name )
except libvirt . libvirtError as err :
ret [ 'result' ] = False
ret [ 'comment' ] = err . get_error_message ( )
return ret
|
def truncate ( num , precision = 0 ) :
"""Deprecated , use decimal _ to _ precision instead"""
|
if precision > 0 :
decimal_precision = math . pow ( 10 , precision )
return math . trunc ( num * decimal_precision ) / decimal_precision
return int ( Exchange . truncate_to_string ( num , precision ) )
|
def sset_loop ( args ) :
'''Loop over all sample sets in a workspace , performing a func'''
|
# Ensure that the requested action is a valid fiss _ cmd
fiss_func = __cmd_to_func ( args . action )
if not fiss_func :
eprint ( "invalid FISS cmd '" + args . action + "'" )
return 1
# First get the sample set names
r = fapi . get_entities ( args . project , args . workspace , "sample_set" )
fapi . _check_response_code ( r , 200 )
sample_sets = [ entity [ 'name' ] for entity in r . json ( ) ]
args . entity_type = "sample_set"
for sset in sample_sets :
print ( '\n# {0}::{1}/{2} {3}' . format ( args . project , args . workspace , sset , args . action ) )
args . entity = sset
# Note how this code is similar to how args . func is called in
# main so it may make sense to try to a common method for both
try :
result = fiss_func ( args )
except Exception as e :
status = __pretty_print_fc_exception ( e )
if not args . keep_going :
return status
printToCLI ( result )
return 0
|
def match_abstract_str ( cls ) :
"""For a given abstract or match rule meta - class returns a nice string
representation for the body ."""
|
def r ( s ) :
if s . root :
if s in visited or s . rule_name in ALL_TYPE_NAMES or ( hasattr ( s , '_tx_class' ) and s . _tx_class . _tx_type is not RULE_MATCH ) :
return s . rule_name
visited . add ( s )
if isinstance ( s , Match ) :
result = text ( s )
elif isinstance ( s , OrderedChoice ) :
result = "|" . join ( [ r ( x ) for x in s . nodes ] )
elif isinstance ( s , Sequence ) :
result = " " . join ( [ r ( x ) for x in s . nodes ] )
elif isinstance ( s , ZeroOrMore ) :
result = "({})*" . format ( r ( s . nodes [ 0 ] ) )
elif isinstance ( s , OneOrMore ) :
result = "({})+" . format ( r ( s . nodes [ 0 ] ) )
elif isinstance ( s , Optional ) :
result = "{}?" . format ( r ( s . nodes [ 0 ] ) )
elif isinstance ( s , SyntaxPredicate ) :
result = ""
return "{}{}" . format ( result , "-" if s . suppress else "" )
mstr = ""
if cls . __name__ not in ALL_TYPE_NAMES and not ( cls . _tx_type is RULE_ABSTRACT and cls . __name__ != cls . _tx_peg_rule . rule_name ) :
e = cls . _tx_peg_rule
visited = set ( )
if not isinstance ( e , Match ) :
visited . add ( e )
if isinstance ( e , OrderedChoice ) :
mstr = "|" . join ( [ r ( x ) for x in e . nodes if x . rule_name in BASE_TYPE_NAMES or not x . root ] )
elif isinstance ( e , Sequence ) :
mstr = " " . join ( [ r ( x ) for x in e . nodes ] )
else :
mstr = r ( e )
mstr = dot_escape ( mstr )
return mstr
|
def _send_broker_unaware_request ( self , payloads , encoder_fn , decoder_fn ) :
"""Attempt to send a broker - agnostic request to one of the available
brokers . Keep trying until you succeed ."""
|
hosts = set ( )
for broker in self . brokers . values ( ) :
host , port , afi = get_ip_port_afi ( broker . host )
hosts . add ( ( host , broker . port , afi ) )
hosts . update ( self . hosts )
hosts = list ( hosts )
random . shuffle ( hosts )
for ( host , port , afi ) in hosts :
try :
conn = self . _get_conn ( host , port , afi )
except KafkaConnectionError :
log . warning ( "Skipping unconnected connection: %s:%s (AFI %s)" , host , port , afi )
continue
request = encoder_fn ( payloads = payloads )
future = conn . send ( request )
# Block
while not future . is_done :
for r , f in conn . recv ( ) :
f . success ( r )
if future . failed ( ) :
log . error ( "Request failed: %s" , future . exception )
continue
return decoder_fn ( future . value )
raise KafkaUnavailableError ( 'All servers failed to process request: %s' % ( hosts , ) )
|
def register_sub_command ( self , sub_command , additional_ids = [ ] ) :
"""Register a command as a subcommand .
It will have it ' s CommandDesc . command string used as id . Additional ids can be provided .
Args :
sub _ command ( CommandBase ) : Subcommand to register .
additional _ ids ( List [ str ] ) : List of additional ids . Can be empty ."""
|
self . __register_sub_command ( sub_command , sub_command . command_desc ( ) . command )
self . __additional_ids . update ( additional_ids )
for id in additional_ids :
self . __register_sub_command ( sub_command , id )
|
def close_alert ( name = None , api_key = None , reason = "Conditions are met." , action_type = "Close" ) :
'''Close an alert in OpsGenie . It ' s a wrapper function for create _ alert .
Example usage with Salt ' s requisites and other global state arguments
could be found above .
Required Parameters :
name
It will be used as alert ' s alias . If you want to use the close
functionality you must provide name field for both states like
in above case .
Optional Parameters :
api _ key
It ' s the API Key you ' ve copied while adding integration in OpsGenie .
reason
It will be used as alert ' s default message in OpsGenie .
action _ type
OpsGenie supports the default values Create / Close for action _ type .
You can customize this field with OpsGenie ' s custom actions for
other purposes like adding notes or acknowledging alerts .'''
|
if name is None :
raise salt . exceptions . SaltInvocationError ( 'Name cannot be None.' )
return create_alert ( name , api_key , reason , action_type )
|
def unregister_language ( self , name ) :
"""Unregisters language with given name from the : obj : ` LanguagesModel . languages ` class property .
: param name : Language to unregister .
: type name : unicode
: return : Method success .
: rtype : bool"""
|
if not self . get_language ( name ) :
raise foundations . exceptions . ProgrammingError ( "{0} | '{1}' language isn't registered!" . format ( self . __class__ . __name__ , name ) )
LOGGER . debug ( "> Unregistering '{0}' language." . format ( name ) )
for i , language in enumerate ( self . __languages ) :
if not language . name == name :
continue
del ( self . __languages [ i ] )
self . sort_languages ( )
return True
|
def evolvets ( rng , pop , params , simplification_interval , recorder = None , suppress_table_indexing = False , record_gvalue_matrix = False , stopping_criterion = None , track_mutation_counts = False , remove_extinct_variants = True ) :
"""Evolve a population with tree sequence recording
: param rng : random number generator
: type rng : : class : ` fwdpy11 . GSLrng `
: param pop : A population
: type pop : : class : ` fwdpy11 . DiploidPopulation `
: param params : simulation parameters
: type params : : class : ` fwdpy11 . ModelParams `
: param simplification _ interval : Number of generations between simplifications .
: type simplification _ interval : int
: param recorder : ( None ) A temporal sampler / data recorder .
: type recorder : callable
: param suppress _ table _ indexing : ( False ) Prevents edge table indexing until end of simulation
: type suppress _ table _ indexing : boolean
: param record _ gvalue _ matrix : ( False ) Whether to record genetic values into : attr : ` fwdpy11 . Population . genetic _ values ` .
: type record _ gvalue _ matrix : boolean
The recording of genetic values into : attr : ` fwdpy11 . Population . genetic _ values ` is supprssed by default . First , it
is redundant with : attr : ` fwdpy11 . DiploidMetadata . g ` for the common case of mutational effects on a single trait .
Second , we save some memory by not tracking these matrices . However , it is useful to track these data for some
cases when simulating multivariate mutational effects ( pleiotropy ) .
. . note : :
If recorder is None ,
then : class : ` fwdpy11 . NoAncientSamples ` will be used ."""
|
import warnings
# Currently , we do not support simulating neutral mutations
# during tree sequence simulations , so we make sure that there
# are no neutral regions / rates :
if len ( params . nregions ) != 0 :
raise ValueError ( "Simulation of neutral mutations on tree sequences not supported (yet)." )
# Test parameters while suppressing warnings
with warnings . catch_warnings ( ) :
warnings . simplefilter ( "ignore" )
# Will throw exception if anything is wrong :
params . validate ( )
if recorder is None :
from . _fwdpy11 import NoAncientSamples
recorder = NoAncientSamples ( )
if stopping_criterion is None :
from . _fwdpy11 import _no_stopping
stopping_criterion = _no_stopping
from . _fwdpy11 import MutationRegions
from . _fwdpy11 import dispatch_create_GeneticMap
from . _fwdpy11 import evolve_with_tree_sequences
# TODO : update to allow neutral mutations
pneutral = 0
mm = MutationRegions . create ( pneutral , params . nregions , params . sregions )
rm = dispatch_create_GeneticMap ( params . recrate , params . recregions )
from . _fwdpy11 import SampleRecorder
sr = SampleRecorder ( )
evolve_with_tree_sequences ( rng , pop , sr , simplification_interval , params . demography , params . mutrate_s , mm , rm , params . gvalue , recorder , stopping_criterion , params . pself , params . prune_selected is False , suppress_table_indexing , record_gvalue_matrix , track_mutation_counts , remove_extinct_variants )
|
def setlist ( self , key , values ) :
"""Sets < key > ' s list of values to < values > . Existing items with key < key >
are first replaced with new values from < values > . Any remaining old
items that haven ' t been replaced with new values are deleted , and any
new values from < values > that don ' t have corresponding items with < key >
to replace are appended to the end of the list of all items .
If values is an empty list , [ ] , < key > is deleted , equivalent in action
to del self [ < key > ] .
Example :
omd = omdict ( [ ( 1,1 ) , ( 2,2 ) ] )
omd . setlist ( 1 , [ 11 , 111 ] )
omd . allitems ( ) = = [ ( 1,11 ) , ( 2,2 ) , ( 1,111 ) ]
omd = omdict ( [ ( 1,1 ) , ( 1,11 ) , ( 2,2 ) , ( 1,111 ) ] )
omd . setlist ( 1 , [ None ] )
omd . allitems ( ) = = [ ( 1 , None ) , ( 2,2 ) ]
omd = omdict ( [ ( 1,1 ) , ( 1,11 ) , ( 2,2 ) , ( 1,111 ) ] )
omd . setlist ( 1 , [ ] )
omd . allitems ( ) = = [ ( 2,2 ) ]
Returns : < self > ."""
|
if not values and key in self :
self . pop ( key )
else :
it = zip_longest ( list ( self . _map . get ( key , [ ] ) ) , values , fillvalue = _absent )
for node , value in it :
if node is not _absent and value is not _absent :
node . value = value
elif node is _absent :
self . add ( key , value )
elif value is _absent :
self . _map [ key ] . remove ( node )
self . _items . removenode ( node )
return self
|
def search_complete ( self , completed ) :
"""Current search thread has finished"""
|
self . result_browser . set_sorting ( ON )
self . find_options . ok_button . setEnabled ( True )
self . find_options . stop_button . setEnabled ( False )
self . status_bar . hide ( )
self . result_browser . expandAll ( )
if self . search_thread is None :
return
self . sig_finished . emit ( )
found = self . search_thread . get_results ( )
self . stop_and_reset_thread ( )
if found is not None :
results , pathlist , nb , error_flag = found
self . result_browser . show ( )
|
def setLocked ( self , state , force = False ) :
"""Sets the locked state for this panel to the inputed state .
: param state | < bool >"""
|
if not force and state == self . _locked :
return
self . _locked = state
tabbar = self . tabBar ( )
tabbar . setLocked ( state )
if self . hideTabsWhenLocked ( ) :
tabbar . setVisible ( self . count ( ) > 1 or not state )
else :
tabbar . setVisible ( True )
if tabbar . isVisible ( ) :
self . setContentsMargins ( 6 , tabbar . height ( ) , 6 , 6 )
else :
self . setContentsMargins ( 1 , 1 , 1 , 1 )
self . adjustSizeConstraint ( )
|
def fetch_releases ( self , package_name ) :
"""Fetch package and index _ url for a package _ name ."""
|
package_name = self . source . normalize_package_name ( package_name )
releases = self . source . get_package_versions ( package_name )
releases_with_index_url = [ ( item , self . index_url ) for item in releases ]
return package_name , releases_with_index_url
|
def disallow ( self , foreign , permission = "active" , account = None , threshold = None , ** kwargs ) :
"""Remove additional access to an account by some other public
key or account .
: param str foreign : The foreign account that will obtain access
: param str permission : ( optional ) The actual permission to
modify ( defaults to ` ` active ` ` )
: param str account : ( optional ) the account to allow access
to ( defaults to ` ` default _ account ` ` )
: param int threshold : The threshold that needs to be reached
by signatures to be able to interact"""
|
if not account :
if "default_account" in self . config :
account = self . config [ "default_account" ]
if not account :
raise ValueError ( "You need to provide an account" )
if permission not in [ "owner" , "active" ] :
raise ValueError ( "Permission needs to be either 'owner', or 'active" )
account = Account ( account , blockchain_instance = self )
authority = account [ permission ]
try :
pubkey = PublicKey ( foreign , prefix = self . prefix )
affected_items = list ( filter ( lambda x : x [ 0 ] == str ( pubkey ) , authority [ "key_auths" ] ) )
authority [ "key_auths" ] = list ( filter ( lambda x : x [ 0 ] != str ( pubkey ) , authority [ "key_auths" ] ) )
except :
try :
foreign_account = Account ( foreign , blockchain_instance = self )
affected_items = list ( filter ( lambda x : x [ 0 ] == foreign_account [ "id" ] , authority [ "account_auths" ] , ) )
authority [ "account_auths" ] = list ( filter ( lambda x : x [ 0 ] != foreign_account [ "id" ] , authority [ "account_auths" ] , ) )
except :
raise ValueError ( "Unknown foreign account or unvalid public key" )
if not affected_items :
raise ValueError ( "Changes nothing!" )
removed_weight = affected_items [ 0 ] [ 1 ]
# Define threshold
if threshold :
authority [ "weight_threshold" ] = threshold
# Correct threshold ( at most by the amount removed from the
# authority )
try :
self . _test_weights_treshold ( authority )
except :
log . critical ( "The account's threshold will be reduced by %d" % ( removed_weight ) )
authority [ "weight_threshold" ] -= removed_weight
self . _test_weights_treshold ( authority )
op = operations . Account_update ( ** { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "account" : account [ "id" ] , permission : authority , "extensions" : { } , } )
if permission == "owner" :
return self . finalizeOp ( op , account [ "name" ] , "owner" , ** kwargs )
else :
return self . finalizeOp ( op , account [ "name" ] , "active" , ** kwargs )
|
def uniqueID ( size = 6 , chars = string . ascii_uppercase + string . digits ) :
"""A quick and dirty way to get a unique string"""
|
return '' . join ( random . choice ( chars ) for x in xrange ( size ) )
|
def youtube_id ( self ) :
"""Extract and return Youtube video id"""
|
if not self . video_embed :
return ''
m = re . search ( r'/embed/([A-Za-z0-9\-=_]*)' , self . video_embed )
if m :
return m . group ( 1 )
return ''
|
def _get_replication_metrics ( self , key , db ) :
"""Use either REPLICATION _ METRICS _ 10 , REPLICATION _ METRICS _ 9_1 , or
REPLICATION _ METRICS _ 9_1 + REPLICATION _ METRICS _ 9_2 , depending on the
postgres version .
Uses a dictionnary to save the result for each instance"""
|
metrics = self . replication_metrics . get ( key )
if self . _is_10_or_above ( key , db ) and metrics is None :
self . replication_metrics [ key ] = dict ( self . REPLICATION_METRICS_10 )
metrics = self . replication_metrics . get ( key )
elif self . _is_9_1_or_above ( key , db ) and metrics is None :
self . replication_metrics [ key ] = dict ( self . REPLICATION_METRICS_9_1 )
if self . _is_9_2_or_above ( key , db ) :
self . replication_metrics [ key ] . update ( self . REPLICATION_METRICS_9_2 )
metrics = self . replication_metrics . get ( key )
return metrics
|
def query_random ( num = 6 , kind = '1' ) :
'''Query wikis randomly .'''
|
return TabWiki . select ( ) . where ( TabWiki . kind == kind ) . order_by ( peewee . fn . Random ( ) ) . limit ( num )
|
def get_ip ( request ) :
"""Retrieves the remote IP address from the request data . If the user is
behind a proxy , they may have a comma - separated list of IP addresses , so
we need to account for that . In such a case , only the first IP in the
list will be retrieved . Also , some hosts that use a proxy will put the
REMOTE _ ADDR into HTTP _ X _ FORWARDED _ FOR . This will handle pulling back the
IP from the proper place .
* * NOTE * * This function was taken from django - tracking ( MIT LICENSE )
http : / / code . google . com / p / django - tracking /"""
|
# if neither header contain a value , just use local loopback
ip_address = request . META . get ( 'HTTP_X_FORWARDED_FOR' , request . META . get ( 'REMOTE_ADDR' , '127.0.0.1' ) )
if ip_address : # make sure we have one and only one IP
try :
ip_address = IP_RE . match ( ip_address )
if ip_address :
ip_address = ip_address . group ( 0 )
else : # no IP , probably from some dirty proxy or other device
# throw in some bogus IP
ip_address = '10.0.0.1'
except IndexError :
pass
return ip_address
|
def update_field_forward_refs ( field : 'Field' , globalns : Any , localns : Any ) -> None :
"""Try to update ForwardRefs on fields based on this Field , globalns and localns ."""
|
if type ( field . type_ ) == ForwardRef :
field . type_ = field . type_ . _evaluate ( globalns , localns or None )
# type : ignore
field . prepare ( )
if field . sub_fields :
for sub_f in field . sub_fields :
update_field_forward_refs ( sub_f , globalns = globalns , localns = localns )
|
def _logins ( users , user_attrs = None ) :
'''FIXME : DOCS . . .'''
|
# FIXME : check for support attrs
# Supported attrs :
# login # DEFAULT , no auth required
# email
# bio
# company
# created _ at
# hireable
# location
# updated _ at
# url
# ' login ' will be the dict index key ; remove it from user _ attr columns
if 'login' in user_attrs :
if user_attrs . index ( 'login' ) >= 0 : # get this out of the remaining attrs to parse
del user_attrs [ user_attrs . index ( 'login' ) ]
_users = { }
for u in users :
l = u . login
logr . debug ( 'LOGIN: {}' . format ( l ) )
_users [ l ] = { }
for a in user_attrs :
logr . debug ( 'user: {}' . format ( u ) )
logr . debug ( 'attr: {}' . format ( a ) )
_users [ l ] [ a ] = getattr ( u , a )
return _users
|
def _apply_flat ( cls , f , acts ) :
"""Utility for applying f to inner dimension of acts .
Flattens acts into a 2D tensor , applies f , then unflattens so that all
dimesnions except innermost are unchanged ."""
|
orig_shape = acts . shape
acts_flat = acts . reshape ( [ - 1 , acts . shape [ - 1 ] ] )
new_flat = f ( acts_flat )
if not isinstance ( new_flat , np . ndarray ) :
return new_flat
shape = list ( orig_shape [ : - 1 ] ) + [ - 1 ]
return new_flat . reshape ( shape )
|
def end_request ( req , collector_addr = 'tcp://127.0.0.2:2345' , prefix = 'my_app' ) :
"""registers the end of a request
registers the end of a request , computes elapsed time , sends it to the collector
: param req : request , can be mostly any hash - able object
: param collector _ addr : collector address , in zeromq format ( string , default tcp : / / 127.0.0.2:2345)
: param prefix : label under which to register the request ( string , default my _ app )"""
|
req_end = time ( )
hreq = hash ( req )
if hreq in requests :
req_time = req_end - requests [ hreq ]
req_time *= 1000
del requests [ hreq ]
collector = get_context ( ) . socket ( zmq . PUSH )
collector . connect ( collector_addr )
collector . send_multipart ( [ prefix , str ( req_time ) ] )
collector . close ( )
return req_time
|
def setup_icons ( self , ) :
"""Set all icons on buttons
: returns : None
: rtype : None
: raises : None"""
|
plus_icon = get_icon ( 'glyphicons_433_plus_bright.png' , asicon = True )
self . addnew_tb . setIcon ( plus_icon )
|
def remove_port ( uri ) :
"""Remove the port number from a URI
: param uri : full URI that Twilio requested on your server
: returns : full URI without a port number
: rtype : str"""
|
new_netloc = uri . netloc . split ( ':' ) [ 0 ]
new_uri = uri . _replace ( netloc = new_netloc )
return new_uri . geturl ( )
|
def install_update_deps ( self ) :
"""todo : Docstring for install _ update _ deps
: return :
: rtype :"""
|
logger . debug ( "" )
self . _ctx . installed ( self . name )
# are there any dependencies ?
depfile = os . path . join ( self . repo_dir , '_upkg' , 'depends' )
logger . debug ( "depfile? %s" , depfile )
if os . path . exists ( depfile ) :
logger . debug ( "Found depends file at %s" , depfile )
deps = open ( depfile , 'r' )
dep = deps . readline ( )
while dep :
dep = dep . strip ( )
logger . debug ( "depends: %s" , dep )
self . _ctx . add_dep ( nice_pkg_name ( os . path . basename ( dep ) ) , dep )
dep = deps . readline ( )
deps . close ( )
for rep in self . _ctx . deps_needed :
repo = Repo ( url = rep )
if repo . installed :
repo . update ( )
else :
repo . install ( )
|
def write ( self , file_name ) :
"""Writes the chapter object to an xhtml file .
Args :
file _ name ( str ) : The full name of the xhtml file to save to ."""
|
try :
assert file_name [ - 6 : ] == '.xhtml'
except ( AssertionError , IndexError ) :
raise ValueError ( 'filename must end with .xhtml' )
with open ( file_name , 'wb' ) as f :
f . write ( self . content . encode ( 'utf-8' ) )
|
def do ( self , params ) :
"""Perform the underlying experiment and summarise its results .
Our results are the summary statistics extracted from the results of
the instances of the underlying experiment that we performed .
We drop from the calculations any experiments whose completion status
was False , indicating an error . Our own completion status will be
True unless we had an error summarising a field ( usually caused by trying
to summarise non - numeric data ) .
We record the exceptions generated by any experiment we summarise under
the metadata key : attr : ` SummaryExperiment . UNDERLYING _ EXCEPTIONS `
: param params : the parameters to the underlying experiment
: returns : the summary statistics of the underlying results"""
|
# perform the underlying experiment
rc = self . experiment ( ) . run ( )
# extract the result dicts as a list
results = rc [ Experiment . RESULTS ]
if not isinstance ( results , list ) : # force to list
results = [ rc ]
# extract only the successful runs
sresults = [ res for res in results if res [ Experiment . METADATA ] [ Experiment . STATUS ] ]
exs = [ res [ Experiment . METADATA ] [ Experiment . EXCEPTION ] for res in results if not res [ Experiment . METADATA ] [ Experiment . STATUS ] ]
# add extra values to our metadata record
self . _metadata [ self . UNDERLYING_RESULTS ] = len ( results )
self . _metadata [ self . UNDERLYING_SUCCESSFUL_RESULTS ] = len ( sresults )
self . _metadata [ self . UNDERLYING_EXCEPTIONS ] = exs
# construct summary results
return self . summarise ( sresults )
|
def coerce ( value ) :
"""Turns value into a string"""
|
if isinstance ( value , StringCell ) :
return value
elif isinstance ( value , ( str , unicode ) ) :
return StringCell ( value )
else :
raise CoercionFailure ( "Cannot coerce %s to StringCell" % ( value ) )
|
def find_files ( ) :
"""Search for the directory containing jsonld and csv files . chdir and then quit .
: return none :"""
|
_dir = os . getcwd ( )
_files = os . listdir ( )
# Look for a jsonld file
for _file in _files :
if _file . endswith ( ".jsonld" ) :
return os . getcwd ( )
# No jsonld file found , try to chdir into " bag " ( LiPD v1.3)
if "bag" in _files :
os . chdir ( "bag" )
_dir = find_files ( )
# No " bag " dir . Try chdir into whatever dataset name dir we find ( < LiPD v1.2)
else :
for _file in _files :
if os . path . isdir ( _file ) :
os . chdir ( _file )
_dir = find_files ( )
return _dir
|
def reftrack_status_data ( rt , role ) :
"""Return the data for the status
: param rt : the : class : ` jukeboxcore . reftrack . Reftrack ` holds the data
: type rt : : class : ` jukeboxcore . reftrack . Reftrack `
: param role : item data role
: type role : QtCore . Qt . ItemDataRole
: returns : data for the status
: rtype : depending on role
: raises : None"""
|
status = rt . status ( )
if role == QtCore . Qt . DisplayRole or role == QtCore . Qt . EditRole :
if status :
return status
else :
return "Not in scene!"
|
def sharpe ( self ) :
"""夏普比率"""
|
return round ( float ( self . calc_sharpe ( self . annualize_return , self . volatility , 0.05 ) ) , 2 )
|
def GetAttributeNs ( self , localName , namespaceURI ) :
"""Provides the value of the specified attribute"""
|
ret = libxml2mod . xmlTextReaderGetAttributeNs ( self . _o , localName , namespaceURI )
return ret
|
def _parse_current_network_settings ( ) :
'''Parse / etc / default / networking and return current configuration'''
|
opts = salt . utils . odict . OrderedDict ( )
opts [ 'networking' ] = ''
if os . path . isfile ( _DEB_NETWORKING_FILE ) :
with salt . utils . files . fopen ( _DEB_NETWORKING_FILE ) as contents :
for line in contents :
salt . utils . stringutils . to_unicode ( line )
if line . startswith ( '#' ) :
continue
elif line . startswith ( 'CONFIGURE_INTERFACES' ) :
opts [ 'networking' ] = line . split ( '=' , 1 ) [ 1 ] . strip ( )
hostname = _parse_hostname ( )
domainname = _parse_domainname ( )
searchdomain = _parse_searchdomain ( )
opts [ 'hostname' ] = hostname
opts [ 'domainname' ] = domainname
opts [ 'searchdomain' ] = searchdomain
return opts
|
def select_mask ( cls , dataset , selection ) :
"""Given a Dataset object and a dictionary with dimension keys and
selection keys ( i . e tuple ranges , slices , sets , lists or literals )
return a boolean mask over the rows in the Dataset object that
have been selected ."""
|
select_mask = None
for dim , k in selection . items ( ) :
if isinstance ( k , tuple ) :
k = slice ( * k )
masks = [ ]
alias = dataset . get_dimension ( dim ) . name
series = dataset . data [ alias ]
if isinstance ( k , slice ) :
if k . start is not None : # Workaround for dask issue # 3392
kval = util . numpy_scalar_to_python ( k . start )
masks . append ( kval <= series )
if k . stop is not None :
kval = util . numpy_scalar_to_python ( k . stop )
masks . append ( series < kval )
elif isinstance ( k , ( set , list ) ) :
iter_slc = None
for ik in k :
mask = series == ik
if iter_slc is None :
iter_slc = mask
else :
iter_slc |= mask
masks . append ( iter_slc )
elif callable ( k ) :
masks . append ( k ( series ) )
else :
masks . append ( series == k )
for mask in masks :
if select_mask is not None :
select_mask &= mask
else :
select_mask = mask
return select_mask
|
def get ( self , pos ) :
"""Get the closest dataset ."""
|
latitude = int ( round ( pos [ 'latitude' ] ) )
search_set = self . bins [ latitude ]
i = 1
if latitude - i >= - 90 :
search_set += self . bins [ latitude - i ]
if latitude + i <= 90 :
search_set += self . bins [ latitude + i ]
while len ( search_set ) == 0 and i <= 200 :
if latitude - i >= - 90 :
search_set += self . bins [ latitude - i ]
if latitude + i <= 90 :
search_set += self . bins [ latitude + i ]
i += 1
return find_closest ( search_set , pos )
|
def set_double_stack ( socket_obj , double_stack = True ) : # type : ( socket . socket , bool ) - > None
"""Sets up the IPv6 double stack according to the operating system
: param socket _ obj : A socket object
: param double _ stack : If True , use the double stack , else only support IPv6
: raise AttributeError : Python or system doesn ' t support V6
: raise socket . error : Error setting up the double stack value"""
|
try : # Use existing value
opt_ipv6_only = socket . IPV6_V6ONLY
except AttributeError : # Use " known " value
if os . name == "nt" : # Windows : see ws2ipdef . h
opt_ipv6_only = 27
elif platform . system ( ) == "Linux" : # Linux : see linux / in6 . h ( in recent kernels )
opt_ipv6_only = 26
else : # Unknown value : do nothing
raise
# Setup the socket ( can raise a socket . error )
socket_obj . setsockopt ( ipproto_ipv6 ( ) , opt_ipv6_only , int ( not double_stack ) )
|
def typeof_rave_data ( value ) :
"""Function to duck - type values , not relying on standard Python functions because , for example ,
a string of ' 1 ' should be typed as an integer and not as a string or float
since we ' re trying to replace like with like when scrambling ."""
|
# Test if value is a date
for format in [ '%d %b %Y' , '%b %Y' , '%Y' , '%d %m %Y' , '%m %Y' , '%d/%b/%Y' , '%b/%Y' , '%d/%m/%Y' , '%m/%Y' ] :
try :
datetime . datetime . strptime ( value , format )
if len ( value ) == 4 and ( int ( value ) < 1900 or int ( value ) > 2030 ) :
break
return ( 'date' , format )
except ValueError :
pass
except TypeError :
pass
# Test if value is a time
for format in [ '%H:%M:%S' , '%H:%M' , '%I:%M:%S' , '%I:%M' , '%I:%M:%S %p' , '%I:%M %p' ] :
try :
datetime . datetime . strptime ( value , format )
return ( 'time' , format )
except ValueError :
pass
except TypeError :
pass
# Test if value is a integer
try :
if ( ( isinstance ( value , str ) and isinstance ( int ( value ) , int ) ) or isinstance ( value , int ) ) :
return ( 'int' , None )
except ValueError :
pass
except TypeError :
pass
# Test if value is a float
try :
float ( value )
return ( 'float' , None )
except ValueError :
pass
except TypeError :
pass
# If no match on anything else , assume its a string
return ( 'string' , None )
|
def slicenet_middle ( inputs_encoded , targets , target_space_emb , mask , hparams ) :
"""Middle part of slicenet , connecting encoder and decoder ."""
|
def norm_fn ( x , name ) :
with tf . variable_scope ( name , default_name = "norm" ) :
return common_layers . apply_norm ( x , hparams . norm_type , hparams . hidden_size , hparams . norm_epsilon )
# Flatten targets and embed target _ space _ id .
targets_flat = tf . expand_dims ( common_layers . flatten4d3d ( targets ) , axis = 2 )
target_space_emb = tf . tile ( target_space_emb , [ tf . shape ( targets_flat ) [ 0 ] , 1 , 1 , 1 ] )
# Use attention from each target to look at input and retrieve .
targets_shifted = common_layers . shift_right ( targets_flat , pad_value = target_space_emb )
if hparams . attention_type == "none" :
targets_with_attention = tf . zeros_like ( targets_shifted )
else :
inputs_padding_bias = ( 1.0 - mask ) * - 1e9
# Bias to not attend to padding .
targets_with_attention = attention ( targets_shifted , inputs_encoded , norm_fn , hparams , bias = inputs_padding_bias )
# Positional targets : merge attention and raw .
kernel = ( hparams . kernel_height , hparams . kernel_width )
targets_merged = common_layers . subseparable_conv_block ( tf . concat ( [ targets_with_attention , targets_shifted ] , axis = 3 ) , hparams . hidden_size , [ ( ( 1 , 1 ) , kernel ) ] , normalizer_fn = norm_fn , padding = "LEFT" , separability = 4 , name = "targets_merge" )
return targets_merged , 0.0
|
def parse ( self , body ) :
"""Parse JSON request , storing content in object attributes .
Args :
body : str . HTTP request body .
Returns :
self"""
|
if isinstance ( body , six . string_types ) :
body = json . loads ( body )
# version
version = body [ 'version' ]
self . version = version
# session
session = body [ 'session' ]
self . session . new = session [ 'new' ]
self . session . session_id = session [ 'sessionId' ]
application_id = session [ 'application' ] [ 'applicationId' ]
self . session . application . application_id = application_id
if 'attributes' in session and session [ 'attributes' ] :
self . session . attributes = session . get ( 'attributes' , { } )
else :
self . session . attributes = { }
self . session . user . user_id = session [ 'user' ] [ 'userId' ]
self . session . user . access_token = session [ 'user' ] . get ( 'accessToken' , 0 )
# request
request = body [ 'request' ]
# launch request
if request [ 'type' ] == 'LaunchRequest' :
self . request = LaunchRequest ( )
# intent request
elif request [ 'type' ] == 'IntentRequest' :
self . request = IntentRequest ( )
self . request . intent = Intent ( )
intent = request [ 'intent' ]
self . request . intent . name = intent [ 'name' ]
if 'slots' in intent and intent [ 'slots' ] :
for name , slot in six . iteritems ( intent [ 'slots' ] ) :
self . request . intent . slots [ name ] = Slot ( )
self . request . intent . slots [ name ] . name = slot [ 'name' ]
self . request . intent . slots [ name ] . value = slot . get ( 'value' )
# session ended request
elif request [ 'type' ] == 'SessionEndedRequest' :
self . request = SessionEndedRequest ( )
self . request . reason = request [ 'reason' ]
# common - keep after specific requests to prevent param overwrite
self . request . type = request [ 'type' ]
self . request . request_id = request [ 'requestId' ]
self . request . timestamp = request [ 'timestamp' ]
return self
|
def loads ( self , value ) :
'''Returns deserialized ` value ` .'''
|
for serializer in reversed ( self ) :
value = serializer . loads ( value )
return value
|
def itemat ( iterable , index ) :
"""Try to get the item at index position in iterable after iterate on
iterable items .
: param iterable : object which provides the method _ _ getitem _ _ or _ _ iter _ _ .
: param int index : item position to get ."""
|
result = None
handleindex = True
if isinstance ( iterable , dict ) :
handleindex = False
else :
try :
result = iterable [ index ]
except TypeError :
handleindex = False
if not handleindex :
iterator = iter ( iterable )
if index < 0 : # ensure index is positive
index += len ( iterable )
while index >= 0 :
try :
value = next ( iterator )
except StopIteration :
raise IndexError ( "{0} index {1} out of range" . format ( iterable . __class__ , index ) )
else :
if index == 0 :
result = value
break
index -= 1
return result
|
def deleteoutputfile ( project , filename , credentials = None ) :
"""Delete an output file"""
|
user , oauth_access_token = parsecredentials ( credentials )
# pylint : disable = unused - variable
if filename :
filename = filename . replace ( ".." , "" )
# Simple security
if not filename or len ( filename ) == 0 : # Deleting all output files and resetting
Project . reset ( project , user )
msg = "Deleted"
return withheaders ( flask . make_response ( msg ) , 'text/plain' , { 'Content-Length' : len ( msg ) , 'allow_origin' : settings . ALLOW_ORIGIN } )
# 200
elif os . path . isdir ( Project . path ( project , user ) + filename ) : # Deleting specified directory
shutil . rmtree ( Project . path ( project , user ) + filename )
msg = "Deleted"
return withheaders ( flask . make_response ( msg ) , 'text/plain' , { 'Content-Length' : len ( msg ) , 'allow_origin' : settings . ALLOW_ORIGIN } )
# 200
else :
try :
file = clam . common . data . CLAMOutputFile ( Project . path ( project , user ) , filename )
except :
raise flask . abort ( 404 )
success = file . delete ( )
if not success :
raise flask . abort ( 404 )
else :
msg = "Deleted"
return withheaders ( flask . make_response ( msg ) , 'text/plain' , { 'Content-Length' : len ( msg ) , 'allow_origin' : settings . ALLOW_ORIGIN } )
|
def get_item2 ( self , tablename , key , attributes = None , alias = None , consistent = False , return_capacity = None ) :
"""Fetch a single item from a table
Parameters
tablename : str
Name of the table to fetch from
key : dict
Primary key dict specifying the hash key and , if applicable , the
range key of the item .
attributes : str or list , optional
See docs for ProjectionExpression . If list , it will be joined by
commas .
alias : dict , optional
See docs for ExpressionAttributeNames
consistent : bool , optional
Perform a strongly consistent read of the data ( default False )
return _ capacity : { NONE , INDEXES , TOTAL } , optional
INDEXES will return the consumed capacity for indexes , TOTAL will
return the consumed capacity for the table and the indexes .
( default NONE )"""
|
kwargs = { 'TableName' : tablename , 'Key' : self . dynamizer . encode_keys ( key ) , 'ConsistentRead' : consistent , 'ReturnConsumedCapacity' : self . _default_capacity ( return_capacity ) , }
if attributes is not None :
if not isinstance ( attributes , six . string_types ) :
attributes = ', ' . join ( attributes )
kwargs [ 'ProjectionExpression' ] = attributes
if alias :
kwargs [ 'ExpressionAttributeNames' ] = alias
data = self . call ( 'get_item' , ** kwargs )
return Result ( self . dynamizer , data , 'Item' )
|
def device_radio_str ( self , resp , indent = " " ) :
"""Convenience to string method ."""
|
signal = resp . signal
tx = resp . tx
rx = resp . rx
s = "Wifi Signal Strength (mW): {}\n" . format ( signal )
s += indent + "Wifi TX (bytes): {}\n" . format ( tx )
s += indent + "Wifi RX (bytes): {}\n" . format ( rx )
return s
|
def _get_nodes ( network_id , template_id = None ) :
"""Get all the nodes in a network"""
|
extras = { 'types' : [ ] , 'attributes' : [ ] }
node_qry = db . DBSession . query ( Node ) . filter ( Node . network_id == network_id , Node . status == 'A' ) . options ( noload ( 'network' ) )
if template_id is not None :
node_qry = node_qry . filter ( ResourceType . node_id == Node . id , TemplateType . id == ResourceType . type_id , TemplateType . template_id == template_id )
node_res = db . DBSession . execute ( node_qry . statement ) . fetchall ( )
nodes = [ ]
for n in node_res :
nodes . append ( JSONObject ( n , extras = extras ) )
return nodes
|
def set_variables ( self , data ) :
"""Set variables for the network .
Parameters
data : dict
dict for variable in the form of example as shown .
Examples
> > > from pgmpy . readwrite . XMLBeliefNetwork import XBNWriter
> > > writer = XBNWriter ( )
> > > writer . set _ variables ( { ' a ' : { ' TYPE ' : ' discrete ' , ' XPOS ' : ' 13495 ' ,
. . . ' YPOS ' : ' 10465 ' , ' DESCRIPTION ' : ' ( a ) Metastatic Cancer ' ,
. . . ' STATES ' : [ ' Present ' , ' Absent ' ] }
. . . ' b ' : { ' TYPE ' : ' discrete ' , ' XPOS ' : ' 11290 ' ,
. . . ' YPOS ' : ' 11965 ' , ' DESCRIPTION ' : ' ( b ) Serum Calcium Increase ' ,
. . . ' STATES ' : [ ' Present ' , ' Absent ' ] } } )"""
|
variables = etree . SubElement ( self . bnmodel , "VARIABLES" )
for var in sorted ( data ) :
variable = etree . SubElement ( variables , 'VAR' , attrib = { 'NAME' : var , 'TYPE' : data [ var ] [ 'TYPE' ] , 'XPOS' : data [ var ] [ 'XPOS' ] , 'YPOS' : data [ var ] [ 'YPOS' ] } )
etree . SubElement ( variable , 'DESCRIPTION' , attrib = { 'DESCRIPTION' : data [ var ] [ 'DESCRIPTION' ] } )
for state in data [ var ] [ 'STATES' ] :
etree . SubElement ( variable , 'STATENAME' ) . text = state
|
def get_module ( mod_name ) :
"""Load and return a module based on C { mod _ name } ."""
|
if mod_name is '' :
raise ImportError ( 'Unable to import empty module' )
mod = __import__ ( mod_name )
components = mod_name . split ( '.' )
for comp in components [ 1 : ] :
mod = getattr ( mod , comp )
return mod
|
def add_cylinder ( self , name , position , sizes , mass , precision = [ 10 , 10 ] ) :
"""Add Cylinder"""
|
self . _create_pure_shape ( 2 , 239 , sizes , mass , precision )
self . set_object_position ( "Cylinder" , position )
self . change_object_name ( "Cylinder" , name )
|
def update ( self , frame_no ) :
"""Process the animation effect for the specified frame number .
: param frame _ no : The index of the frame being generated ."""
|
if ( frame_no >= self . _start_frame and ( self . _stop_frame == 0 or frame_no < self . _stop_frame ) ) :
self . _update ( frame_no )
|
def add_milestone ( self , milestone , codelistoid = "MILESTONES" ) :
"""Add a milestone
: param codelistoid : specify the CodeListOID ( defaults to MILESTONES )
: param str milestone : Milestone to add"""
|
if milestone not in self . milestones . get ( codelistoid , [ ] ) :
self . _milestones . setdefault ( codelistoid , [ ] ) . append ( milestone )
|
def find_all_matching_parsers ( self , strict : bool , desired_type : Type [ Any ] = JOKER , required_ext : str = JOKER ) -> Tuple [ Tuple [ List [ Parser ] , List [ Parser ] , List [ Parser ] ] , List [ Parser ] , List [ Parser ] , List [ Parser ] ] :
"""Implementation of the parent method by lookin into the registry to find the most appropriate parsers to use in
order
: param strict :
: param desired _ type : the desired type , or ' JOKER ' for a wildcard
: param required _ ext :
: return : match = ( matching _ parsers _ generic , matching _ parsers _ approx , matching _ parsers _ exact ) ,
no _ type _ match _ but _ ext _ match , no _ ext _ match _ but _ type _ match , no _ match"""
|
# if desired _ type is JOKER and required _ ext is JOKER :
# # Easy : return everything ( GENERIC first , SPECIFIC then ) in order ( make a copy first : ) )
# matching _ parsers _ generic = self . _ generic _ parsers . copy ( )
# matching _ parsers _ approx = [ ]
# matching _ parsers _ exact = self . _ specific _ parsers . copy ( )
# no _ type _ match _ but _ ext _ match = [ ]
# no _ ext _ match _ but _ type _ match = [ ]
# no _ match = [ ]
# else :
# Although the above could be thought as an easy way to accelerate the process , it does not any more since the
# JOKER special cases are handled in parser . is _ able _ to _ parse and converter . is _ able _ to _ convert functions .
# It was also dangerous since it prevented us to get consistency across views - hence parser / converter
# implementors could get the feeling that their parser was correctly registered where it wasn ' t
check_var ( strict , var_types = bool , var_name = 'strict' )
# first transform any ' Any ' type requirement into the official class for that
desired_type = get_validated_type ( desired_type , 'desired_type' , enforce_not_joker = False )
matching_parsers_generic = [ ]
matching_parsers_approx = [ ]
matching_parsers_exact = [ ]
no_type_match_but_ext_match = [ ]
no_ext_match_but_type_match = [ ]
no_match = [ ]
# handle generic parsers first - except if desired type is Any
for p in self . _generic_parsers :
match = p . is_able_to_parse ( desired_type = desired_type , desired_ext = required_ext , strict = strict )
if match : # match
if is_any_type ( desired_type ) : # special case : what is required is Any , so put in exact match
matching_parsers_exact . append ( p )
else :
matching_parsers_generic . append ( p )
else : # check if by releasing the constraint on ext it makes a match
if p . is_able_to_parse ( desired_type = desired_type , desired_ext = JOKER , strict = strict ) :
no_ext_match_but_type_match . append ( p )
else : # there will be no way to use this : it is a generic parser that is not able to parse this type . . .
# no _ type _ match _ but _ ext _ match . append ( p )
pass
# then the specific
for p in self . _specific_parsers :
match , exact_match = p . is_able_to_parse_detailed ( desired_type = desired_type , desired_ext = required_ext , strict = strict )
if match :
if is_any_type ( desired_type ) : # special case : dont register as a type match
no_type_match_but_ext_match . append ( p )
else :
if exact_match is None or exact_match :
matching_parsers_exact . append ( p )
else :
matching_parsers_approx . append ( p )
else : # try to set the type to a supported type to see if that makes a match
if p . is_able_to_parse ( desired_type = JOKER , desired_ext = required_ext , strict = strict ) :
no_type_match_but_ext_match . append ( p )
# try to set the ext to a supported ext to see if that makes a match
elif p . is_able_to_parse ( desired_type = desired_type , desired_ext = JOKER , strict = strict ) :
no_ext_match_but_type_match . append ( p )
# no match at all
else :
no_match . append ( p )
return ( matching_parsers_generic , matching_parsers_approx , matching_parsers_exact ) , no_type_match_but_ext_match , no_ext_match_but_type_match , no_match
|
def deserialize ( cls , value ) :
"""Generates a Peer instance via a JSON string of the sort generated
by ` Peer . deserialize ` .
The ` name ` and ` ip ` keys are required to be present in the JSON map ,
if the ` port ` key is not present the default is used ."""
|
parsed = json . loads ( value )
if "name" not in parsed :
raise ValueError ( "No peer name." )
if "ip" not in parsed :
raise ValueError ( "No peer IP." )
if "port" not in parsed :
parsed [ "port" ] = DEFAULT_PEER_PORT
return cls ( parsed [ "name" ] , parsed [ "ip" ] , parsed [ "port" ] )
|
def set_guest_property ( self , property_p , value , flags ) :
"""Sets , changes or deletes an entry in the machine ' s guest property
store .
in property _ p of type str
The name of the property to set , change or delete .
in value of type str
The new value of the property to set , change or delete . If the
property does not yet exist and value is non - empty , it will be
created . If the value is @ c null or empty , the property will be
deleted if it exists .
in flags of type str
Additional property parameters , passed as a comma - separated list of
" name = value " type entries .
raises : class : ` OleErrorAccessdenied `
Property cannot be changed .
raises : class : ` OleErrorInvalidarg `
Invalid @ a flags .
raises : class : ` VBoxErrorInvalidVmState `
Virtual machine is not mutable or session not open .
raises : class : ` VBoxErrorInvalidObjectState `
Cannot set transient property when machine not running ."""
|
if not isinstance ( property_p , basestring ) :
raise TypeError ( "property_p can only be an instance of type basestring" )
if not isinstance ( value , basestring ) :
raise TypeError ( "value can only be an instance of type basestring" )
if not isinstance ( flags , basestring ) :
raise TypeError ( "flags can only be an instance of type basestring" )
self . _call ( "setGuestProperty" , in_p = [ property_p , value , flags ] )
|
def _find_best_positions ( self , G ) :
"""Finds best positions for the given graph ( given as adjacency matrix )
nodes by minimizing a network potential ."""
|
initpos = None
holddim = None
if self . xpos is not None :
y = _np . random . random ( len ( self . xpos ) )
initpos = _np . vstack ( ( self . xpos , y ) ) . T
holddim = 0
elif self . ypos is not None :
x = _np . zeros_like ( self . xpos )
initpos = _np . vstack ( ( x , self . ypos ) ) . T
holddim = 1
# nothing to do
elif self . xpos is not None and self . ypos is not None :
return _np . array ( [ self . xpos , self . ypos ] ) , 0
from pyemma . plots . _ext . fruchterman_reingold import _fruchterman_reingold
best_pos = _fruchterman_reingold ( G , pos = initpos , dim = 2 , hold_dim = holddim )
# rescale fixed to user settings and balance the other coordinate
if self . xpos is not None : # rescale x to fixed value
best_pos [ : , 0 ] *= ( _np . max ( self . xpos ) - _np . min ( self . xpos ) ) / ( _np . max ( best_pos [ : , 0 ] ) - _np . min ( best_pos [ : , 0 ] ) )
best_pos [ : , 0 ] += _np . min ( self . xpos ) - _np . min ( best_pos [ : , 0 ] )
# rescale y to balance
if _np . max ( best_pos [ : , 1 ] ) - _np . min ( best_pos [ : , 1 ] ) > 0.01 :
best_pos [ : , 1 ] *= ( _np . max ( self . xpos ) - _np . min ( self . xpos ) ) / ( _np . max ( best_pos [ : , 1 ] ) - _np . min ( best_pos [ : , 1 ] ) )
if self . ypos is not None :
best_pos [ : , 1 ] *= ( _np . max ( self . ypos ) - _np . min ( self . ypos ) ) / ( _np . max ( best_pos [ : , 1 ] ) - _np . min ( best_pos [ : , 1 ] ) )
best_pos [ : , 1 ] += _np . min ( self . ypos ) - _np . min ( best_pos [ : , 1 ] )
# rescale x to balance
if _np . max ( best_pos [ : , 0 ] ) - _np . min ( best_pos [ : , 0 ] ) > 0.01 :
best_pos [ : , 0 ] *= ( _np . max ( self . ypos ) - _np . min ( self . ypos ) ) / ( _np . max ( best_pos [ : , 0 ] ) - _np . min ( best_pos [ : , 0 ] ) )
return best_pos
|
def greenlet ( func , args = ( ) , kwargs = None ) :
"""create a new greenlet from a function and arguments
: param func : the function the new greenlet should run
: type func : function
: param args : any positional arguments for the function
: type args : tuple
: param kwargs : any keyword arguments for the function
: type kwargs : dict or None
the only major difference between this function and that of the basic
greenlet api is that this one sets the new greenlet ' s parent to be the
greenhouse main loop greenlet , which is a requirement for greenlets that
will wind up in the greenhouse scheduler ."""
|
if args or kwargs :
def target ( ) :
return func ( * args , ** ( kwargs or { } ) )
else :
target = func
return compat . greenlet ( target , state . mainloop )
|
def add_text_content_type ( application , content_type , default_encoding , dumps , loads ) :
"""Add handler for a text content type .
: param tornado . web . Application application : the application to modify
: param str content _ type : the content type to add
: param str default _ encoding : encoding to use when one is unspecified
: param dumps : function that dumps a dictionary to a string .
` ` dumps ( dict , encoding : str ) - > str ` `
: param loads : function that loads a dictionary from a string .
` ` loads ( str , encoding : str ) - > dict ` `
Note that the ` ` charset ` ` parameter is stripped from ` content _ type `
if it is present ."""
|
parsed = headers . parse_content_type ( content_type )
parsed . parameters . pop ( 'charset' , None )
normalized = str ( parsed )
add_transcoder ( application , handlers . TextContentHandler ( normalized , dumps , loads , default_encoding ) )
|
def prepare_environment ( default_settings = _default_settings , ** kwargs ) : # pylint : disable = unused - argument
'''Prepare ENV for web - application
: param default _ settings : minimal needed settings for run app
: type default _ settings : dict
: param kwargs : other overrided settings
: rtype : None'''
|
for key , value in default_settings . items ( ) :
os . environ . setdefault ( key , value )
os . environ . update ( kwargs )
if six . PY2 : # nocv
warnings . warn ( 'Python 2.7 is deprecated and will dropped in 2.0, use Python >3.5' , DeprecationWarning )
|
def _proxy ( self ) :
"""Generate an instance context for the instance , the context is capable of
performing various actions . All instance actions are proxied to the context
: returns : CompositionSettingsContext for this CompositionSettingsInstance
: rtype : twilio . rest . video . v1 . composition _ settings . CompositionSettingsContext"""
|
if self . _context is None :
self . _context = CompositionSettingsContext ( self . _version , )
return self . _context
|
def _not_in ( x , y ) :
"""Compute the vectorized membership of ` ` x not in y ` ` if possible ,
otherwise use Python ."""
|
try :
return ~ x . isin ( y )
except AttributeError :
if is_list_like ( x ) :
try :
return ~ y . isin ( x )
except AttributeError :
pass
return x not in y
|
def fail_run_group ( group , session ) :
"""End the run _ group unsuccessfully .
Args :
group : The run _ group we want to complete .
session : The database transaction we will finish ."""
|
from datetime import datetime
group . end = datetime . now ( )
group . status = 'failed'
session . commit ( )
|
def handle ( self , * args , ** options ) :
"""Create new app"""
|
quickstart = Quickstart ( )
try :
quickstart . create_app ( os . path . join ( settings . BASE_DIR , 'apps' ) , options . get ( 'name' ) )
self . stdout . write ( self . style . SUCCESS ( "Successfully created app ({name}), don't forget to add 'apps.{name}' to INSTALLED_APPS" . format ( name = options . get ( 'name' ) ) ) )
except FileExistsError as e :
print ( e )
raise CommandError ( "App with same name already exists" )
|
async def insert ( ** data ) :
"""RPC method for inserting data to the table
: return : None"""
|
table = data . get ( 'table' )
try :
clickhouse_queries . insert_into_table ( table , data )
return 'Data was successfully inserted into table'
except ServerException as e :
exception_code = int ( str ( e ) [ 5 : 8 ] . strip ( ) )
if exception_code == 60 :
return 'Table does not exists'
elif exception_code == 50 :
return 'Invalid params'
|
def from_int ( self , integer ) :
"""Set the Note corresponding to the integer .
0 is a C on octave 0 , 12 is a C on octave 1 , etc .
Example :
> > > Note ( ) . from _ int ( 12)
' C - 1'"""
|
self . name = notes . int_to_note ( integer % 12 )
self . octave = integer // 12
return self
|
def search ( self , CorpNum , JobID , Type , TaxType , PurposeType , TaxRegIDType , TaxRegIDYN , TaxRegID , Page , PerPage , Order , UserID = None ) :
"""수집 결과 조회
args
CorpNum : 팝빌회원 사업자번호
JobID : 작업아이디
Type : 문서형태 배열 , N - 일반전자세금계산서 , M - 수정전자세금계산서
TaxType : 과세형태 배열 , T - 과세 , N - 면세 , Z - 영세
PurposeType : 영수 / 청구 , R - 영수 , C - 청구 , N - 없음
TaxRegIDType : 종사업장번호 사업자유형 , S - 공급자 , B - 공급받는자 , T - 수탁자
TaxRegIDYN : 종사업장번호 유무 , 공백 - 전체조회 , 0 - 종사업장번호 없음 , 1 - 종사업장번호 있음
TaxRegID : 종사업장번호 , 콤마 ( " , " ) 로 구분 하여 구성 ex ) ' 0001,0002'
Page : 페이지 번호
PerPage : 페이지당 목록 개수 , 최대 1000개
Order : 정렬 방향 , D - 내림차순 , A - 오름차순
UserID : 팝빌회원 아이디
return
수집 결과 정보
raise
PopbillException"""
|
if JobID == None or len ( JobID ) != 18 :
raise PopbillException ( - 99999999 , "작업아이디(jobID)가 올바르지 않습니다." )
uri = '/HomeTax/Taxinvoice/' + JobID
uri += '?Type=' + ',' . join ( Type )
uri += '&TaxType=' + ',' . join ( TaxType )
uri += '&PurposeType=' + ',' . join ( PurposeType )
uri += '&TaxRegIDType=' + TaxRegIDType
uri += '&TaxRegID=' + TaxRegID
uri += '&Page=' + str ( Page )
uri += '&PerPage=' + str ( PerPage )
uri += '&Order=' + Order
if TaxRegIDYN != '' :
uri += '&TaxRegIDYN=' + TaxRegIDYN
return self . _httpget ( uri , CorpNum , UserID )
|
def create_row_to_some_id_col_mapping ( id_array ) :
"""Parameters
id _ array : 1D ndarray .
All elements of the array should be ints representing some id related
to the corresponding row .
Returns
rows _ to _ ids : 2D scipy sparse array .
Will map each row of id _ array to the unique values of ` id _ array ` . The
columns of the returned sparse array will correspond to the unique
values of ` id _ array ` , in the order of appearance for each of these
unique values ."""
|
# Get the unique ids , in their original order of appearance
original_order_unique_ids = get_original_order_unique_ids ( id_array )
# Create a matrix with the same number of rows as id _ array but a single
# column for each of the unique IDs . This matrix will associate each row
# as belonging to a particular observation using a one and using a zero to
# show non - association .
rows_to_ids = ( id_array [ : , None ] == original_order_unique_ids [ None , : ] ) . astype ( int )
return rows_to_ids
|
def Target_setRemoteLocations ( self , locations ) :
"""Function path : Target . setRemoteLocations
Domain : Target
Method name : setRemoteLocations
Parameters :
Required arguments :
' locations ' ( type : array ) - > List of remote locations .
No return value .
Description : Enables target discovery for the specified locations , when < code > setDiscoverTargets < / code > was set to < code > true < / code > ."""
|
assert isinstance ( locations , ( list , tuple ) ) , "Argument 'locations' must be of type '['list', 'tuple']'. Received type: '%s'" % type ( locations )
subdom_funcs = self . synchronous_command ( 'Target.setRemoteLocations' , locations = locations )
return subdom_funcs
|
def disable_contactgroup_svc_notifications ( self , contactgroup ) :
"""Disable service notifications for a contactgroup
Format of the line that triggers function call : :
DISABLE _ CONTACTGROUP _ SVC _ NOTIFICATIONS ; < contactgroup _ name >
: param contactgroup : contactgroup to disable
: type contactgroup : alignak . objects . contactgroup . Contactgroup
: return : None"""
|
for contact_id in contactgroup . get_contacts ( ) :
self . disable_contact_svc_notifications ( self . daemon . contacts [ contact_id ] )
|
def _getCampaignDict ( ) :
"""Returns a dictionary specifying the details of all campaigns ."""
|
global _campaign_dict_cache
if _campaign_dict_cache is None : # All pointing parameters and dates are stored in a JSON file
fn = os . path . join ( PACKAGEDIR , "data" , "k2-campaign-parameters.json" )
_campaign_dict_cache = json . load ( open ( fn ) )
return _campaign_dict_cache
|
def help ( module = None , * args ) :
'''Display help on Ansible standard module .
: param module :
: return :'''
|
if not module :
raise CommandExecutionError ( 'Please tell me what module you want to have helped with. ' 'Or call "ansible.list" to know what is available.' )
try :
module = _resolver . load_module ( module )
except ( ImportError , LoaderError ) as err :
raise CommandExecutionError ( 'Module "{0}" is currently not functional on your system.' . format ( module ) )
doc = { }
ret = { }
for docset in module . DOCUMENTATION . split ( '---' ) :
try :
docset = salt . utils . yaml . safe_load ( docset )
if docset :
doc . update ( docset )
except Exception as err :
log . error ( "Error parsing doc section: %s" , err )
if not args :
if 'description' in doc :
description = doc . get ( 'description' ) or ''
del doc [ 'description' ]
ret [ 'Description' ] = description
ret [ 'Available sections on module "{}"' . format ( module . __name__ . replace ( 'ansible.modules.' , '' ) ) ] = doc . keys ( )
else :
for arg in args :
info = doc . get ( arg )
if info is not None :
ret [ arg ] = info
return ret
|
def build_where_stmt ( self , ident , filters , q_filters = None , source_class = None ) :
"""construct a where statement from some filters"""
|
if q_filters is not None :
stmts = self . _parse_q_filters ( ident , q_filters , source_class )
if stmts :
self . _ast [ 'where' ] . append ( stmts )
else :
stmts = [ ]
for row in filters :
negate = False
# pre - process NOT cases as they are nested dicts
if '__NOT__' in row and len ( row ) == 1 :
negate = True
row = row [ '__NOT__' ]
for prop , op_and_val in row . items ( ) :
op , val = op_and_val
if op in _UNARY_OPERATORS : # unary operators do not have a parameter
statement = '{0} {1}.{2} {3}' . format ( 'NOT' if negate else '' , ident , prop , op )
else :
place_holder = self . _register_place_holder ( ident + '_' + prop )
statement = '{0} {1}.{2} {3} {{{4}}}' . format ( 'NOT' if negate else '' , ident , prop , op , place_holder )
self . _query_params [ place_holder ] = val
stmts . append ( statement )
self . _ast [ 'where' ] . append ( ' AND ' . join ( stmts ) )
|
def build_plan ( description , graph , targets = None , reverse = False ) :
"""Builds a plan from a list of steps .
Args :
description ( str ) : an arbitrary string to
describe the plan .
graph ( : class : ` Graph ` ) : a list of : class : ` Graph ` to execute .
targets ( list ) : an optional list of step names to filter the graph to .
If provided , only these steps , and their transitive dependencies
will be executed . If no targets are specified , every node in the
graph will be executed .
reverse ( bool ) : If provided , the graph will be walked in reverse order
( dependencies last ) ."""
|
# If we want to execute the plan in reverse ( e . g . Destroy ) , transpose the
# graph .
if reverse :
graph = graph . transposed ( )
# If we only want to build a specific target , filter the graph .
if targets :
nodes = [ ]
for target in targets :
for k , step in graph . steps . items ( ) :
if step . name == target :
nodes . append ( step . name )
graph = graph . filtered ( nodes )
return Plan ( description = description , graph = graph )
|
def parse ( self , file_obj ) :
"""Read an OpenSSH config from the given file object .
: param file _ obj : a file - like object to read the config file from"""
|
host = { "host" : [ "*" ] , "config" : { } }
for line in file_obj : # Strip any leading or trailing whitespace from the line .
# Refer to https : / / github . com / paramiko / paramiko / issues / 499
line = line . strip ( )
if not line or line . startswith ( "#" ) :
continue
match = re . match ( self . SETTINGS_REGEX , line )
if not match :
raise Exception ( "Unparsable line {}" . format ( line ) )
key = match . group ( 1 ) . lower ( )
value = match . group ( 2 )
if key == "host" :
self . _config . append ( host )
host = { "host" : self . _get_hosts ( value ) , "config" : { } }
elif key == "proxycommand" and value . lower ( ) == "none" : # Store ' none ' as None ; prior to 3 . x , it will get stripped out
# at the end ( for compatibility with issue # 415 ) . After 3 . x , it
# will simply not get stripped , leaving a nice explicit marker .
host [ "config" ] [ key ] = None
else :
if value . startswith ( '"' ) and value . endswith ( '"' ) :
value = value [ 1 : - 1 ]
# identityfile , localforward , remoteforward keys are special
# cases , since they are allowed to be specified multiple times
# and they should be tried in order of specification .
if key in [ "identityfile" , "localforward" , "remoteforward" ] :
if key in host [ "config" ] :
host [ "config" ] [ key ] . append ( value )
else :
host [ "config" ] [ key ] = [ value ]
elif key not in host [ "config" ] :
host [ "config" ] [ key ] = value
self . _config . append ( host )
|
def get_agents_by_search ( self , agent_query , agent_search ) :
"""Pass through to provider AgentSearchSession . get _ agents _ by _ search"""
|
# Implemented from azosid template for -
# osid . resource . ResourceSearchSession . get _ resources _ by _ search _ template
if not self . _can ( 'search' ) :
raise PermissionDenied ( )
return self . _provider_session . get_agents_by_search ( agent_query , agent_search )
|
def add_subparsers ( self , * args , ** kwargs ) :
"""Add subparsers to this parser
Parameters
` ` * args , * * kwargs ` `
As specified by the original
: meth : ` argparse . ArgumentParser . add _ subparsers ` method
chain : bool
Default : False . If True , It is enabled to chain subparsers"""
|
chain = kwargs . pop ( 'chain' , None )
ret = super ( FuncArgParser , self ) . add_subparsers ( * args , ** kwargs )
if chain :
self . _chain_subparsers = True
self . _subparsers_action = ret
return ret
|
def default ( self ) :
"""Return last changes in truncated unified diff format"""
|
output = ensure_unicode ( self . git . log ( '-1' , '-p' , '--no-color' , '--format=%s' , ) . stdout )
lines = output . splitlines ( )
return u'\n' . join ( itertools . chain ( lines [ : 1 ] , itertools . islice ( itertools . dropwhile ( lambda x : not x . startswith ( '+++' ) , lines [ 1 : ] , ) , 1 , None , ) , ) )
|
def do_handshake ( self , timeout ) :
'perform a SSL / TLS handshake'
|
tout = _timeout ( timeout )
if not self . _blocking :
return self . _sslobj . do_handshake ( )
while 1 :
try :
return self . _sslobj . do_handshake ( )
except ssl . SSLError , exc :
if exc . args [ 0 ] == ssl . SSL_ERROR_WANT_READ :
self . _wait_event ( tout . now )
continue
elif exc . args [ 0 ] == ssl . SSL_ERROR_WANT_WRITE :
self . _wait_event ( tout . now , write = True )
continue
raise
self . _wait_event ( timeout )
self . _sslobj . do_handshake ( )
|
def update ( self , value = None , force = False , ** kwargs ) :
'Updates the ProgressBar to a new value .'
|
if self . start_time is None :
self . start ( )
return self . update ( value , force = force , ** kwargs )
if value is not None and value is not base . UnknownLength :
if self . max_value is base . UnknownLength : # Can ' t compare against unknown lengths so just update
pass
elif self . min_value <= value <= self . max_value : # pragma : no cover
# Correct value , let ' s accept
pass
elif self . max_error :
raise ValueError ( 'Value %s is out of range, should be between %s and %s' % ( value , self . min_value , self . max_value ) )
else :
self . max_value = value
self . previous_value = self . value
self . value = value
minimum_update_interval = self . _MINIMUM_UPDATE_INTERVAL
delta = timeit . default_timer ( ) - self . _last_update_timer
if delta < minimum_update_interval and not force : # Prevent updating too often
return
# Save the updated values for dynamic messages
for key in kwargs :
if key in self . dynamic_messages :
self . dynamic_messages [ key ] = kwargs [ key ]
else :
raise TypeError ( 'update() got an unexpected keyword ' + 'argument {0!r}' . format ( key ) )
if self . _needs_update ( ) or force :
self . updates += 1
ResizableMixin . update ( self , value = value )
ProgressBarBase . update ( self , value = value )
StdRedirectMixin . update ( self , value = value )
# Only flush if something was actually written
self . fd . flush ( )
|
def set_model ( self , model ) :
"""Set the model the item belongs to
A TreeItem can only belong to one model .
: param model : the model the item belongs to
: type model : : class : ` Treemodel `
: returns : None
: rtype : None
: raises : None"""
|
self . _model = model
for c in self . childItems :
c . set_model ( model )
|
def optspace ( edm_missing , rank , niter = 500 , tol = 1e-6 , print_out = False ) :
"""Complete and denoise EDM using OptSpace algorithm .
Uses OptSpace algorithm to complete and denoise EDM . The problem being solved is
X , S , Y = argmin _ ( X , S , Y ) | | W ° ( D - XSY ' ) | | _ F ^ 2
: param edm _ missing : EDM with 0 where no measurement was taken .
: param rank : expected rank of complete EDM .
: param niter , tol : see opt _ space module for description .
: return : Completed matrix ."""
|
from . opt_space import opt_space
N = edm_missing . shape [ 0 ]
X , S , Y , __ = opt_space ( edm_missing , r = rank , niter = niter , tol = tol , print_out = print_out )
edm_complete = X . dot ( S . dot ( Y . T ) )
edm_complete [ range ( N ) , range ( N ) ] = 0.0
return edm_complete
|
def _tags_present ( name , tags , vpc_id = None , vpc_name = None , region = None , key = None , keyid = None , profile = None ) :
'''helper function to validate tags are correct'''
|
ret = { 'result' : True , 'comment' : '' , 'changes' : { } }
if tags :
sg = __salt__ [ 'boto_secgroup.get_config' ] ( name = name , group_id = None , region = region , key = key , keyid = keyid , profile = profile , vpc_id = vpc_id , vpc_name = vpc_name )
if not sg :
ret [ 'comment' ] = '{0} security group configuration could not be retrieved.' . format ( name )
ret [ 'result' ] = False
return ret
tags_to_add = tags
tags_to_update = { }
tags_to_remove = [ ]
if sg . get ( 'tags' ) :
for existing_tag in sg [ 'tags' ] :
if existing_tag not in tags :
if existing_tag not in tags_to_remove :
tags_to_remove . append ( existing_tag )
else :
if tags [ existing_tag ] != sg [ 'tags' ] [ existing_tag ] :
tags_to_update [ existing_tag ] = tags [ existing_tag ]
tags_to_add . pop ( existing_tag )
if tags_to_remove :
if __opts__ [ 'test' ] :
msg = 'The following tag{0} set to be removed: {1}.' . format ( ( 's are' if len ( tags_to_remove ) > 1 else ' is' ) , ', ' . join ( tags_to_remove ) )
ret [ 'comment' ] = ' ' . join ( [ ret [ 'comment' ] , msg ] )
ret [ 'result' ] = None
else :
temp_ret = __salt__ [ 'boto_secgroup.delete_tags' ] ( tags_to_remove , name = name , group_id = None , vpc_name = vpc_name , vpc_id = vpc_id , region = region , key = key , keyid = keyid , profile = profile )
if not temp_ret :
ret [ 'result' ] = False
ret [ 'comment' ] = ' ' . join ( [ ret [ 'comment' ] , 'Error attempting to delete tags {0}.' . format ( tags_to_remove ) ] )
return ret
if 'old' not in ret [ 'changes' ] :
ret [ 'changes' ] = dictupdate . update ( ret [ 'changes' ] , { 'old' : { 'tags' : { } } } )
for rem_tag in tags_to_remove :
ret [ 'changes' ] [ 'old' ] [ 'tags' ] [ rem_tag ] = sg [ 'tags' ] [ rem_tag ]
if tags_to_add or tags_to_update :
if __opts__ [ 'test' ] :
if tags_to_add :
msg = 'The following tag{0} set to be added: {1}.' . format ( ( 's are' if len ( tags_to_add . keys ( ) ) > 1 else ' is' ) , ', ' . join ( tags_to_add . keys ( ) ) )
ret [ 'comment' ] = ' ' . join ( [ ret [ 'comment' ] , msg ] )
ret [ 'result' ] = None
if tags_to_update :
msg = 'The following tag {0} set to be updated: {1}.' . format ( ( 'values are' if len ( tags_to_update . keys ( ) ) > 1 else 'value is' ) , ', ' . join ( tags_to_update . keys ( ) ) )
ret [ 'comment' ] = ' ' . join ( [ ret [ 'comment' ] , msg ] )
ret [ 'result' ] = None
else :
all_tag_changes = dictupdate . update ( tags_to_add , tags_to_update )
temp_ret = __salt__ [ 'boto_secgroup.set_tags' ] ( all_tag_changes , name = name , group_id = None , vpc_name = vpc_name , vpc_id = vpc_id , region = region , key = key , keyid = keyid , profile = profile )
if not temp_ret :
ret [ 'result' ] = False
msg = 'Error attempting to set tags.'
ret [ 'comment' ] = ' ' . join ( [ ret [ 'comment' ] , msg ] )
return ret
if 'old' not in ret [ 'changes' ] :
ret [ 'changes' ] = dictupdate . update ( ret [ 'changes' ] , { 'old' : { 'tags' : { } } } )
if 'new' not in ret [ 'changes' ] :
ret [ 'changes' ] = dictupdate . update ( ret [ 'changes' ] , { 'new' : { 'tags' : { } } } )
for tag in all_tag_changes :
ret [ 'changes' ] [ 'new' ] [ 'tags' ] [ tag ] = tags [ tag ]
if 'tags' in sg :
if sg [ 'tags' ] :
if tag in sg [ 'tags' ] :
ret [ 'changes' ] [ 'old' ] [ 'tags' ] [ tag ] = sg [ 'tags' ] [ tag ]
if not tags_to_update and not tags_to_remove and not tags_to_add :
ret [ 'comment' ] = ' ' . join ( [ ret [ 'comment' ] , 'Tags are already set.' ] )
return ret
|
def fold ( self ) :
"""Folds the region ."""
|
start , end = self . get_range ( )
TextBlockHelper . set_collapsed ( self . _trigger , True )
block = self . _trigger . next ( )
while block . blockNumber ( ) <= end and block . isValid ( ) :
block . setVisible ( False )
block = block . next ( )
|
def parameter ( self , name = None , value = None , ** kwargs ) :
"""Create a < Parameter > element
: param name : The name of the custom parameter
: param value : The value of the custom parameter
: param kwargs : additional attributes
: returns : < Parameter > element"""
|
return self . nest ( Parameter ( name = name , value = value , ** kwargs ) )
|
def activated ( self , include_extras = True , extra_dists = None ) :
"""Helper context manager to activate the environment .
This context manager will set the following variables for the duration
of its activation :
* sys . prefix
* sys . path
* os . environ [ " VIRTUAL _ ENV " ]
* os . environ [ " PATH " ]
In addition , it will make any distributions passed into ` extra _ dists ` available
on ` sys . path ` while inside the context manager , as well as making ` passa ` itself
available .
The environment ' s ` prefix ` as well as ` scripts _ dir ` properties are both prepended
to ` os . environ [ " PATH " ] ` to ensure that calls to ` ~ Environment . run ( ) ` use the
environment ' s path preferentially ."""
|
if not extra_dists :
extra_dists = [ ]
original_path = sys . path
original_prefix = sys . prefix
parent_path = vistir . compat . Path ( __file__ ) . absolute ( ) . parent
vendor_dir = parent_path . joinpath ( "vendor" ) . as_posix ( )
patched_dir = parent_path . joinpath ( "patched" ) . as_posix ( )
parent_path = parent_path . as_posix ( )
self . add_dist ( "pip" )
prefix = self . prefix . as_posix ( )
with vistir . contextmanagers . temp_environ ( ) , vistir . contextmanagers . temp_path ( ) :
os . environ [ "PATH" ] = os . pathsep . join ( [ vistir . compat . fs_str ( self . scripts_dir ) , vistir . compat . fs_str ( self . prefix . as_posix ( ) ) , os . environ . get ( "PATH" , "" ) ] )
os . environ [ "PYTHONIOENCODING" ] = vistir . compat . fs_str ( "utf-8" )
os . environ [ "PYTHONDONTWRITEBYTECODE" ] = vistir . compat . fs_str ( "1" )
from . environments import PIPENV_USE_SYSTEM
if self . is_venv :
os . environ [ "PYTHONPATH" ] = self . base_paths [ "PYTHONPATH" ]
os . environ [ "VIRTUAL_ENV" ] = vistir . compat . fs_str ( prefix )
else :
if not PIPENV_USE_SYSTEM and not os . environ . get ( "VIRTUAL_ENV" ) :
os . environ [ "PYTHONPATH" ] = self . base_paths [ "PYTHONPATH" ]
os . environ . pop ( "PYTHONHOME" , None )
sys . path = self . sys_path
sys . prefix = self . sys_prefix
site . addsitedir ( self . base_paths [ "purelib" ] )
pip = self . safe_import ( "pip" )
pip_vendor = self . safe_import ( "pip._vendor" )
pep517_dir = os . path . join ( os . path . dirname ( pip_vendor . __file__ ) , "pep517" )
site . addsitedir ( pep517_dir )
os . environ [ "PYTHONPATH" ] = os . pathsep . join ( [ os . environ . get ( "PYTHONPATH" , self . base_paths [ "PYTHONPATH" ] ) , pep517_dir ] )
if include_extras :
site . addsitedir ( parent_path )
sys . path . extend ( [ parent_path , patched_dir , vendor_dir ] )
extra_dists = list ( self . extra_dists ) + extra_dists
for extra_dist in extra_dists :
if extra_dist not in self . get_working_set ( ) :
extra_dist . activate ( self . sys_path )
try :
yield
finally :
sys . path = original_path
sys . prefix = original_prefix
six . moves . reload_module ( pkg_resources )
|
def log_message ( self , format , * args ) :
"""Log an arbitrary message .
This is used by all other logging functions . Override
it if you have specific logging wishes .
The first argument , FORMAT , is a format string for the
message to be logged . If the format string contains
any % escapes requiring parameters , they should be
specified as subsequent arguments ( it ' s just like
printf ! ) .
The client ip and current date / time are prefixed to
every message ."""
|
sys . stderr . write ( "%s - - [%s] %s\n" % ( self . address_string ( ) , self . log_date_time_string ( ) , format % args ) )
|
def determine_endpoint_type ( features ) :
"""Determines the type of an endpoint
: param features : pandas . DataFrame
A dataset in a panda ' s data frame
: returns string
string with a name of a dataset"""
|
counter = { k . name : v for k , v in features . columns . to_series ( ) . groupby ( features . dtypes ) . groups . items ( ) }
if ( len ( features . groupby ( 'class' ) . apply ( list ) ) == 2 ) :
return ( 'binary' )
if ( 'float64' in counter ) :
return ( 'float' )
return ( 'integer' )
|
def getContextsForTerm ( self , retina_name , term , get_fingerprint = None , start_index = 0 , max_results = 5 ) :
"""Get the contexts for a given term
Args :
retina _ name , str : The retina name ( required )
term , str : A term in the retina ( required )
get _ fingerprint , bool : Configure if the fingerprint should be returned as part of the results ( optional )
start _ index , int : The start - index for pagination ( optional ) ( optional )
max _ results , int : Max results per page ( optional ) ( optional )
Returns : Array [ Context ]"""
|
resourcePath = '/terms/contexts'
method = 'GET'
queryParams = { }
headerParams = { 'Accept' : 'Application/json' , 'Content-Type' : 'application/json' }
postData = None
queryParams [ 'retina_name' ] = retina_name
queryParams [ 'term' ] = term
queryParams [ 'start_index' ] = start_index
queryParams [ 'max_results' ] = max_results
queryParams [ 'get_fingerprint' ] = get_fingerprint
response = self . apiClient . _callAPI ( resourcePath , method , queryParams , postData , headerParams )
return [ context . Context ( ** r ) for r in response . json ( ) ]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.