signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get_request_kwargs ( self ) :
"""Construct keyword parameters for Session . request ( ) and
Session . resolve _ redirects ( ) .""" | kwargs = dict ( stream = True , timeout = self . aggregate . config [ "timeout" ] )
if self . proxy :
kwargs [ "proxies" ] = { self . proxytype : self . proxy }
if self . scheme == u"https" and self . aggregate . config [ "sslverify" ] :
kwargs [ 'verify' ] = self . aggregate . config [ "sslverify" ]
else :
kwargs [ 'verify' ] = False
return kwargs |
def _check_available_disk_space ( self , project ) :
"""Sends a warning notification if disk space is getting low .
: param project : project instance""" | try :
used_disk_space = psutil . disk_usage ( project . path ) . percent
except FileNotFoundError :
log . warning ( 'Could not find "{}" when checking for used disk space' . format ( project . path ) )
return
# send a warning if used disk space is > = 90%
if used_disk_space >= 90 :
message = 'Only {}% or less of disk space detected in "{}" on "{}"' . format ( used_disk_space , project . path , platform . node ( ) )
log . warning ( message )
project . emit ( "log.warning" , { "message" : message } ) |
def build_skos_concepts ( self ) :
"""2015-08-19 : first draft""" | self . all_skos_concepts = [ ]
# @ todo : keep adding ?
qres = self . sparqlHelper . getSKOSInstances ( )
# print ( " rdflib query done " )
for candidate in qres :
test_existing_cl = self . get_skos ( uri = candidate [ 0 ] )
if not test_existing_cl : # create it
self . all_skos_concepts += [ OntoSKOSConcept ( candidate [ 0 ] , None , self . namespaces ) ]
else :
pass
# print ( " concepts created " )
# add more data
skos = rdflib . Namespace ( 'http://www.w3.org/2004/02/skos/core#' )
for aConcept in self . all_skos_concepts : # print ( " enriching concept . . . " , aConcept )
aConcept . rdftype = skos [ 'Concept' ]
aConcept . triples = self . sparqlHelper . entityTriples ( aConcept . uri )
aConcept . _buildGraph ( )
# force construction of mini graph
aConcept . sparqlHelper = self . sparqlHelper
# attach to an ontology
for uri in aConcept . getValuesForProperty ( rdflib . RDFS . isDefinedBy ) :
onto = self . get_ontology ( uri = str ( uri ) )
if onto :
onto . all_skos_concepts += [ aConcept ]
aConcept . ontology = onto
# add direct Supers
directSupers = self . sparqlHelper . getSKOSDirectSupers ( aConcept . uri )
for x in directSupers :
superclass = self . get_skos ( uri = x [ 0 ] )
# note : extra condition to avoid recursive structures
if superclass and superclass . uri != aConcept . uri :
aConcept . _parents . append ( superclass )
# add inverse relationships ( = direct subs for superclass )
if aConcept not in superclass . children ( ) :
superclass . _children . append ( aConcept )
# sort alphabetically
self . all_skos_concepts = sorted ( self . all_skos_concepts , key = lambda x : x . qname )
# compute top layer for skos
exit = [ ]
for c in self . all_skos_concepts :
if not c . parents ( ) :
exit += [ c ]
self . toplayer_skos = exit |
def dump ( d , fp , indent = 4 , spacer = " " , quote = '"' , newlinechar = "\n" , end_comment = False ) :
"""Write d ( the Mapfile dictionary ) as a formatted stream to fp
Parameters
d : dict
A Python dictionary based on the the mappyfile schema
fp : file
A file - like object
indent : int
The number of ` ` spacer ` ` characters to indent structures in the Mapfile
spacer : string
The character to use for indenting structures in the Mapfile . Typically
spaces or tab characters ( ` ` \\ t ` ` )
quote : string
The quote character to use in the Mapfile ( double or single quotes )
newlinechar : string
The character used to insert newlines in the Mapfile
end _ comment : bool
Add a comment with the block type at each closing END
statement e . g . END # MAP
Example
To open a Mapfile from a string , and then dump it back out to an open file ,
using 2 spaces for indentation , and single - quotes for properties : :
s = ' ' ' MAP NAME " TEST " END ' ' '
d = mappyfile . loads ( s )
with open ( fn , " w " ) as f :
mappyfile . dump ( d , f , indent = 2 , quote = " ' " )""" | map_string = _pprint ( d , indent , spacer , quote , newlinechar , end_comment )
fp . write ( map_string ) |
def _set_ldp_sync_info ( self , v , load = False ) :
"""Setter method for ldp _ sync _ info , mapped from YANG variable / isis _ state / interface _ detail / isis _ intf / ldp _ sync _ info ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ ldp _ sync _ info is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ ldp _ sync _ info ( ) directly .
YANG Description : ISIS LDP sync info""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = ldp_sync_info . ldp_sync_info , is_container = 'container' , presence = False , yang_name = "ldp-sync-info" , rest_name = "ldp-sync-info" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'callpoint' : u'isis-isis-ldp-sync-info' , u'cli-suppress-show-path' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-isis-operational' , defining_module = 'brocade-isis-operational' , yang_type = 'container' , is_config = False )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """ldp_sync_info must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=ldp_sync_info.ldp_sync_info, is_container='container', presence=False, yang_name="ldp-sync-info", rest_name="ldp-sync-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-isis-ldp-sync-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""" , } )
self . __ldp_sync_info = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def openorders ( ctx , account ) :
"""List open orders of an account""" | account = Account ( account or config [ "default_account" ] , bitshares_instance = ctx . bitshares )
t = [ [ "Price" , "Quote" , "Base" , "ID" ] ]
for o in account . openorders :
t . append ( [ "{:f} {}/{}" . format ( o [ "price" ] , o [ "base" ] [ "asset" ] [ "symbol" ] , o [ "quote" ] [ "asset" ] [ "symbol" ] , ) , str ( o [ "quote" ] ) , str ( o [ "base" ] ) , o [ "id" ] , ] )
print_table ( t ) |
def check_type ( self ) :
"""Make sure each stochastic has a correct type , and identify discrete stochastics .""" | self . isdiscrete = { }
for stochastic in self . stochastics :
if stochastic . dtype in integer_dtypes :
self . isdiscrete [ stochastic ] = True
elif stochastic . dtype in bool_dtypes :
raise ValueError ( 'Binary stochastics not supported by AdaptativeMetropolis.' )
else :
self . isdiscrete [ stochastic ] = False |
def safe_dump ( data , abspath , indent_format = False , float_precision = None , ensure_ascii = True , enable_verbose = True ) :
"""A stable version of : func : ` dump ` , this method will silently overwrite
existing file .
There ' s a issue with : func : ` dump ` : If your program is interrupted while
writing , you got an incomplete file , and you also lose the original file .
So this method write json to a temporary file first , then rename to what
you expect , and silently overwrite old one . This way can guarantee atomic
write .
* * 中文文档 * *
在对文件进行写入时 , 如果程序中断 , 则会留下一个不完整的文件 。 如果使用了覆盖式
写入 , 则我们即没有得到新文件 , 同时也丢失了原文件 。 所以为了保证写操作的原子性
( 要么全部完成 , 要么全部都不完成 ) , 更好的方法是 : 首先将文件写入一个临时文件中 ,
完成后再讲文件重命名 , 覆盖旧文件 。 这样即使中途程序被中断 , 也仅仅是留下了一个
未完成的临时文件而已 , 不会影响原文件 。""" | abspath = lower_ext ( str ( abspath ) )
abspath_temp = "%s.tmp" % abspath
dump ( data , abspath_temp , indent_format = indent_format , float_precision = float_precision , ensure_ascii = ensure_ascii , overwrite = True , enable_verbose = enable_verbose )
shutil . move ( abspath_temp , abspath ) |
def get_spider_list ( self , project_name , version = None ) :
"""Get the list of spiders available in the last ( unless overridden ) version of some project .
: param project _ name : the project name
: param version : the version of the project to examine
: return : a dictionary that spider name list
example : { " status " : " ok " , " spiders " : [ " spider1 " , " spider2 " , " spider3 " ] }""" | url , method = self . command_set [ 'listspiders' ] [ 0 ] , self . command_set [ 'listspiders' ] [ 1 ]
data = { }
data [ 'project' ] = project_name
if version is not None :
data [ '_version' ] = version
response = http_utils . request ( url , method_type = method , data = data , return_type = http_utils . RETURN_JSON )
if response is None :
logging . warning ( '%s failure: not found or connection fail' % sys . _getframe ( ) . f_code . co_name )
response = SpiderList ( ) . __dict__
return response |
def query ( self ) :
"""Returns the query that is defined by this current panel .
: return < orb . Query >""" | joiner = self . currentJoiner ( )
query = Query ( )
for entry in self . entries ( ) :
if joiner == QueryCompound . Op . And :
query &= entry . query ( )
else :
query |= entry . query ( )
query . setName ( self . uiNameTXT . text ( ) )
return query |
def to_db ( catchment , session , method = 'create' , autocommit = False ) :
"""Load catchment object into the database .
A catchment / station number ( : attr : ` catchment . id ` ) must be provided . If : attr : ` method ` is set to ` update ` , any
existing catchment in the database with the same catchment number will be updated .
: param catchment : New catchment object to replace any existing catchment in the database
: type catchment : : class : ` . entities . Catchment `
: param session : Database session to use , typically ` floodestimation . db . Session ( ) `
: type session : : class : ` sqlalchemy . orm . session . Session `
: param method : - ` ` create ` ` : only new catchments will be loaded , it must not already exist in the database .
- ` ` update ` ` : any existing catchment in the database will be updated . Otherwise it will be created .
: type method : str
: param autocommit : Whether to commit the database session immediately . Default : ` ` False ` ` .
: type autocommit : bool""" | if not catchment . id :
raise ValueError ( "Catchment/station number (`catchment.id`) must be set." )
if method == 'create' :
session . add ( catchment )
elif method == 'update' :
session . merge ( catchment )
else :
raise ValueError ( "Method `{}` invalid. Use either `create` or `update`." )
if autocommit :
session . commit ( ) |
def get_worksheets_section ( self ) :
"""Returns the section dictionary related with Worksheets ,
that contains some informative panels ( like
WS to be verified , WS with results pending , etc . )""" | out = [ ]
bc = getToolByName ( self . context , CATALOG_WORKSHEET_LISTING )
query = { 'portal_type' : "Worksheet" , }
# Check if dashboard _ cookie contains any values to query
# elements by
query = self . _update_criteria_with_filters ( query , 'worksheets' )
# Active Worksheets ( all )
total = self . search_count ( query , bc . id )
# Open worksheets
name = _ ( 'Results pending' )
desc = _ ( 'Results pending' )
purl = 'worksheets?list_review_state=open'
query [ 'review_state' ] = [ 'open' , 'attachment_due' ]
out . append ( self . _getStatistics ( name , desc , purl , bc , query , total ) )
# Worksheets to be verified
name = _ ( 'To be verified' )
desc = _ ( 'To be verified' )
purl = 'worksheets?list_review_state=to_be_verified'
query [ 'review_state' ] = [ 'to_be_verified' , ]
out . append ( self . _getStatistics ( name , desc , purl , bc , query , total ) )
# Worksheets verified
name = _ ( 'Verified' )
desc = _ ( 'Verified' )
purl = 'worksheets?list_review_state=verified'
query [ 'review_state' ] = [ 'verified' , ]
out . append ( self . _getStatistics ( name , desc , purl , bc , query , total ) )
# Chart with the evolution of WSs over a period , grouped by
# periodicity
outevo = self . fill_dates_evo ( bc , query )
out . append ( { 'type' : 'bar-chart-panel' , 'name' : _ ( 'Evolution of Worksheets' ) , 'class' : 'informative' , 'description' : _ ( 'Evolution of Worksheets' ) , 'data' : json . dumps ( outevo ) , 'datacolors' : json . dumps ( self . get_colors_palette ( ) ) } )
return { 'id' : 'worksheets' , 'title' : _ ( 'Worksheets' ) , 'panels' : out } |
def from_array ( array ) :
"""Deserialize a new Sticker from a given dictionary .
: return : new Sticker instance .
: rtype : Sticker""" | if array is None or not array :
return None
# end if
assert_type_or_raise ( array , dict , parameter_name = "array" )
from . stickers import MaskPosition
data = { }
data [ 'file_id' ] = u ( array . get ( 'file_id' ) )
data [ 'width' ] = int ( array . get ( 'width' ) )
data [ 'height' ] = int ( array . get ( 'height' ) )
data [ 'thumb' ] = PhotoSize . from_array ( array . get ( 'thumb' ) ) if array . get ( 'thumb' ) is not None else None
data [ 'emoji' ] = u ( array . get ( 'emoji' ) ) if array . get ( 'emoji' ) is not None else None
data [ 'set_name' ] = u ( array . get ( 'set_name' ) ) if array . get ( 'set_name' ) is not None else None
data [ 'mask_position' ] = MaskPosition . from_array ( array . get ( 'mask_position' ) ) if array . get ( 'mask_position' ) is not None else None
data [ 'file_size' ] = int ( array . get ( 'file_size' ) ) if array . get ( 'file_size' ) is not None else None
data [ '_raw' ] = array
return Sticker ( ** data ) |
def incrby ( self , key , increment ) :
"""Increment the integer value of a key by the given amount .
: raises TypeError : if increment is not int""" | if not isinstance ( increment , int ) :
raise TypeError ( "increment must be of type int" )
return self . execute ( b'INCRBY' , key , increment ) |
def _load_json_result ( self , conf , compile_classpath , coursier_cache_path , invalidation_check , pants_jar_path_base , result , override_classifiers = None ) :
"""Given a coursier run result , load it into compile _ classpath by target .
: param compile _ classpath : ` ClasspathProducts ` that will be modified
: param coursier _ cache _ path : cache location that is managed by coursier
: param invalidation _ check : InvalidationCheck
: param pants _ jar _ path _ base : location under pants workdir that contains all the hardlinks to coursier cache
: param result : result dict converted from the json produced by one coursier run
: return : n / a""" | # Parse the coursier result
flattened_resolution = self . _extract_dependencies_by_root ( result )
coord_to_resolved_jars = self . _map_coord_to_resolved_jars ( result , coursier_cache_path , pants_jar_path_base )
# Construct a map from org : name to the reconciled org : name : version coordinate
# This is used when there is won ' t be a conflict _ resolution entry because the conflict
# was resolved in pants .
org_name_to_org_name_rev = { }
for coord in coord_to_resolved_jars . keys ( ) :
org_name_to_org_name_rev [ '{}:{}' . format ( coord . org , coord . name ) ] = coord
jars_per_target = [ ]
for vt in invalidation_check . all_vts :
t = vt . target
jars_to_digest = [ ]
if isinstance ( t , JarLibrary ) :
def get_transitive_resolved_jars ( my_coord , resolved_jars ) :
transitive_jar_path_for_coord = [ ]
coord_str = str ( my_coord )
if coord_str in flattened_resolution and my_coord in resolved_jars :
transitive_jar_path_for_coord . append ( resolved_jars [ my_coord ] )
for c in flattened_resolution [ coord_str ] :
j = resolved_jars . get ( self . to_m2_coord ( c ) )
if j :
transitive_jar_path_for_coord . append ( j )
return transitive_jar_path_for_coord
for jar in t . jar_dependencies : # if there are override classifiers , then force use of those .
coord_candidates = [ ]
if override_classifiers :
coord_candidates = [ jar . coordinate . copy ( classifier = c ) for c in override_classifiers ]
else :
coord_candidates = [ jar . coordinate ]
# if conflict resolution entries , then update versions to the resolved ones .
if jar . coordinate . simple_coord in result [ 'conflict_resolution' ] :
parsed_conflict = self . to_m2_coord ( result [ 'conflict_resolution' ] [ jar . coordinate . simple_coord ] )
coord_candidates = [ c . copy ( rev = parsed_conflict . rev ) for c in coord_candidates ]
elif '{}:{}' . format ( jar . coordinate . org , jar . coordinate . name ) in org_name_to_org_name_rev :
parsed_conflict = org_name_to_org_name_rev [ '{}:{}' . format ( jar . coordinate . org , jar . coordinate . name ) ]
coord_candidates = [ c . copy ( rev = parsed_conflict . rev ) for c in coord_candidates ]
for coord in coord_candidates :
transitive_resolved_jars = get_transitive_resolved_jars ( coord , coord_to_resolved_jars )
if transitive_resolved_jars :
for jar in transitive_resolved_jars :
jars_to_digest . append ( jar )
jars_per_target . append ( ( t , jars_to_digest ) )
for target , jars_to_add in self . add_directory_digests_for_jars ( jars_per_target ) :
compile_classpath . add_jars_for_targets ( [ target ] , conf , jars_to_add ) |
def tokenize ( self , value ) :
"""Split the incoming value into tokens and process each token ,
optionally stemming or running metaphone .
: returns : A ` ` dict ` ` mapping token to score . The score is
based on the relative frequency of the word in the
document .""" | words = self . split_phrase ( decode ( value ) . lower ( ) )
if self . _stopwords :
words = [ w for w in words if w not in self . _stopwords ]
if self . _min_word_length :
words = [ w for w in words if len ( w ) >= self . _min_word_length ]
fraction = 1. / ( len ( words ) + 1 )
# Prevent division by zero .
# Apply optional transformations .
if self . _use_stemmer :
words = self . stem ( words )
if self . _use_metaphone :
words = self . metaphone ( words )
scores = { }
for word in words :
scores . setdefault ( word , 0 )
scores [ word ] += fraction
return scores |
def get_reddit ( ) :
"""Returns the reddit dataset , downloading locally if necessary .
This dataset was released here :
https : / / www . reddit . com / r / redditdev / comments / dtg4j / want _ to _ help _ reddit _ build _ a _ recommender _ a _ public /
and contains 23M up / down votes from 44K users on 3.4M links .
Returns a CSR matrix of ( item , user , rating""" | filename = os . path . join ( _download . LOCAL_CACHE_DIR , "reddit.hdf5" )
if not os . path . isfile ( filename ) :
log . info ( "Downloading dataset to '%s'" , filename )
_download . download_file ( URL , filename )
else :
log . info ( "Using cached dataset at '%s'" , filename )
with h5py . File ( filename , 'r' ) as f :
m = f . get ( 'item_user_ratings' )
return csr_matrix ( ( m . get ( 'data' ) , m . get ( 'indices' ) , m . get ( 'indptr' ) ) ) |
def canon_did ( uri : str ) -> str :
"""Convert a URI into a DID if need be , left - stripping ' did : sov : ' if present .
Return input if already a DID . Raise BadIdentifier for invalid input .
: param uri : input URI or DID
: return : corresponding DID""" | if ok_did ( uri ) :
return uri
if uri . startswith ( 'did:sov:' ) :
rv = uri [ 8 : ]
if ok_did ( rv ) :
return rv
raise BadIdentifier ( 'Bad specification {} does not correspond to a sovrin DID' . format ( uri ) ) |
def get_hash_for_filename ( filename , hashfile_path ) :
"""Return hash for filename in the hashfile .""" | filehash = ''
with open ( hashfile_path , 'r' ) as stream :
for _cnt , line in enumerate ( stream ) :
if line . rstrip ( ) . endswith ( filename ) :
filehash = re . match ( r'^[A-Za-z0-9]*' , line ) . group ( 0 )
break
if filehash :
return filehash
raise AttributeError ( "Filename %s not found in hash file" % filename ) |
def get_name_DID_info ( self , name ) :
"""Get a name ' s DID info
Returns None if not found""" | db = get_db_state ( self . working_dir )
did_info = db . get_name_DID_info ( name )
if did_info is None :
return { 'error' : 'No such name' , 'http_status' : 404 }
return did_info |
def list_cidr_ips_ipv6 ( cidr ) :
'''Get a list of IPv6 addresses from a CIDR .
CLI example : :
salt myminion netaddress . list _ cidr _ ips _ ipv6 192.168.0.0/20''' | ips = netaddr . IPNetwork ( cidr )
return [ six . text_type ( ip . ipv6 ( ) ) for ip in list ( ips ) ] |
def add_env ( url , saltenv ) :
'''append ` saltenv ` to ` url ` as a query parameter to a ' salt : / / ' url''' | if not url . startswith ( 'salt://' ) :
return url
path , senv = parse ( url )
return create ( path , saltenv ) |
def human_and_00 ( X , y , model_generator , method_name ) :
"""AND ( false / false )
This tests how well a feature attribution method agrees with human intuition
for an AND operation combined with linear effects . This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true :
if fever : + 2 points
if cough : + 2 points
if fever and cough : + 6 points
transform = " identity "
sort _ order = 0""" | return _human_and ( X , model_generator , method_name , False , False ) |
def add_asn ( self , auth , attr ) :
"""Add AS number to NIPAP .
* ` auth ` [ BaseAuth ]
AAA options .
* ` attr ` [ asn _ attr ]
ASN attributes .
Returns a dict describing the ASN which was added .
This is the documentation of the internal backend function . It ' s
exposed over XML - RPC , please also see the XML - RPC documentation for
: py : func : ` nipap . xmlrpc . NipapXMLRPC . add _ asn ` for full
understanding .""" | self . _logger . debug ( "add_asn called; attr: %s" % unicode ( attr ) )
# sanity check - do we have all attributes ?
req_attr = [ 'asn' , ]
allowed_attr = [ 'asn' , 'name' ]
self . _check_attr ( attr , req_attr , allowed_attr )
insert , params = self . _sql_expand_insert ( attr )
sql = "INSERT INTO ip_net_asn " + insert
self . _execute ( sql , params )
asn = self . list_asn ( auth , { 'asn' : attr [ 'asn' ] } ) [ 0 ]
# write to audit table
audit_params = { 'username' : auth . username , 'authenticated_as' : auth . authenticated_as , 'full_name' : auth . full_name , 'authoritative_source' : auth . authoritative_source , 'description' : 'Added ASN %s with attr: %s' % ( attr [ 'asn' ] , unicode ( attr ) ) }
sql , params = self . _sql_expand_insert ( audit_params )
self . _execute ( 'INSERT INTO ip_net_log %s' % sql , params )
return asn |
def _validate_completeness ( self ) :
"""Verify that the actual file manifests match the files in the data directory""" | errors = list ( )
# First we ' ll make sure there ' s no mismatch between the filesystem
# and the list of files in the manifest ( s )
only_in_manifests , only_on_fs , only_in_fetch = self . compare_manifests_with_fs_and_fetch ( )
for path in only_in_manifests :
e = FileMissing ( path )
LOGGER . warning ( force_unicode ( e ) )
errors . append ( e )
for path in only_on_fs :
e = UnexpectedFile ( path )
LOGGER . warning ( force_unicode ( e ) )
errors . append ( e )
for path in only_in_fetch :
e = UnexpectedRemoteFile ( path )
# this is non - fatal according to spec but the warning is still reasonable
LOGGER . warning ( force_unicode ( e ) )
if errors :
raise BagValidationError ( _ ( "Bag validation failed" ) , errors ) |
def revoke_auth ( preserve_minion_cache = False ) :
'''The minion sends a request to the master to revoke its own key .
Note that the minion session will be revoked and the minion may
not be able to return the result of this command back to the master .
If the ' preserve _ minion _ cache ' flag is set to True , the master
cache for this minion will not be removed .
CLI Example :
. . code - block : : bash
salt ' * ' saltutil . revoke _ auth''' | masters = list ( )
ret = True
if 'master_uri_list' in __opts__ :
for master_uri in __opts__ [ 'master_uri_list' ] :
masters . append ( master_uri )
else :
masters . append ( __opts__ [ 'master_uri' ] )
for master in masters :
channel = salt . transport . client . ReqChannel . factory ( __opts__ , master_uri = master )
tok = channel . auth . gen_token ( b'salt' )
load = { 'cmd' : 'revoke_auth' , 'id' : __opts__ [ 'id' ] , 'tok' : tok , 'preserve_minion_cache' : preserve_minion_cache }
try :
channel . send ( load )
except SaltReqTimeoutError :
ret = False
finally :
channel . close ( )
return ret |
def cprint ( text , fg = Color . normal , bg = Color . normal , fg_dark = False , bg_dark = False , underlined = False , parse = False , ) :
"""Print string in to stdout using colored font .
See L { set _ color } for more details about colors .
Args :
text ( str ) : Text that needs to be printed .""" | if parse :
color_re = Color . color_re ( )
lines = text . splitlines ( )
count = len ( lines )
for i , line in enumerate ( lines ) :
previous = 0
end = len ( line )
for match in color_re . finditer ( line ) :
sys . stdout . write ( line [ previous : match . start ( ) ] )
d = match . groupdict ( )
set_color ( d [ "color" ] , fg_dark = False if d [ "dark" ] is None else True )
previous = match . end ( )
sys . stdout . write ( line [ previous : end ] + ( "\n" if ( i < ( count - 1 ) or text [ - 1 ] == "\n" ) else "" ) )
else :
set_color ( fg , bg , fg_dark , bg_dark , underlined )
sys . stdout . write ( text )
set_color ( ) |
def hacking_has_license ( physical_line , filename , lines , line_number ) :
"""Check for Apache 2.0 license .
H102 license header not found""" | # don ' t work about init files for now
# TODO ( sdague ) : enforce license in init file if it ' s not empty of content
license_found = False
# skip files that are < 10 lines , which isn ' t enough for a license to fit
# this allows us to handle empty files , as well as not fail on the Okay
# doctests .
if line_number is 1 and len ( lines ) > 10 and _project_is_apache ( ) :
for idx , line in enumerate ( lines ) : # if it ' s more than 10 characters in , it ' s probably not in the
# header
if 0 <= line . find ( 'Licensed under the Apache License' ) < 10 :
license_found = True
if 0 <= line . find ( 'SPDX-License-Identifier:' ) < 10 :
license_found = True
if not license_found :
return ( 0 , "H102: Apache 2.0 license header not found" ) |
def before_all ( context ) :
"""Setup before all tests .
Initialize the logger framework .
: param context : test context .""" | lf = LoggerFactory ( config_file = '../features/resources/test_config.yaml' )
lf . initialize ( )
ll = lf . get_instance ( 'environment' )
ll . info ( 'Logger initialized: {}' . format ( lf . config ) )
ll . info ( 'Initial test context: {}' . format ( context ) ) |
def as_list ( self , decode = False ) :
"""Return a list of items in the array .""" | return [ _decode ( i ) for i in self ] if decode else list ( self ) |
def _hid_enumerate ( vendor_id = 0 , product_id = 0 ) :
"""Enumerates all the hid devices for VID : PID . Returns a list of ` HIDDevice `
objects . If vid is 0 , then match any vendor id . Similarly , if pid is 0,
match any product id . If both are zero , enumerate all HID devices .""" | start = hidapi . hid_enumerate ( vendor_id , product_id )
result = [ ]
cur = ffi . new ( "struct hid_device_info*" ) ;
cur = start
# Copy everything into python list
while cur != ffi . NULL :
result . append ( HIDDevice ( cur ) )
cur = cur . next
# Free the C memory
hidapi . hid_free_enumeration ( start )
return result |
def get_sanitized_endpoint ( url ) :
"""Sanitize an endpoint , as removing unneeded parameters""" | # sanitize esri
sanitized_url = url . rstrip ( )
esri_string = '/rest/services'
if esri_string in url :
match = re . search ( esri_string , sanitized_url )
sanitized_url = url [ 0 : ( match . start ( 0 ) + len ( esri_string ) ) ]
return sanitized_url |
def _update_options ( model , connection = None ) :
"""Updates the table options for the given model if necessary .
: param model : The model to update .
: param connection : Name of the connection to use
: return : ` True ` , if the options were modified in Cassandra ,
` False ` otherwise .
: rtype : bool""" | ks_name = model . _get_keyspace ( )
msg = format_log_context ( "Checking %s for option differences" , keyspace = ks_name , connection = connection )
log . debug ( msg , model )
model_options = model . __options__ or { }
table_meta = _get_table_metadata ( model , connection = connection )
# go to CQL string first to normalize meta from different versions
existing_option_strings = set ( table_meta . _make_option_strings ( table_meta . options ) )
existing_options = _options_map_from_strings ( existing_option_strings )
model_option_strings = metadata . TableMetadataV3 . _make_option_strings ( model_options )
model_options = _options_map_from_strings ( model_option_strings )
update_options = { }
for name , value in model_options . items ( ) :
try :
existing_value = existing_options [ name ]
except KeyError :
msg = format_log_context ( "Invalid table option: '%s'; known options: %s" , keyspace = ks_name , connection = connection )
raise KeyError ( msg % ( name , existing_options . keys ( ) ) )
if isinstance ( existing_value , six . string_types ) :
if value != existing_value :
update_options [ name ] = value
else :
try :
for k , v in value . items ( ) :
if existing_value [ k ] != v :
update_options [ name ] = value
break
except KeyError :
update_options [ name ] = value
if update_options :
options = ' AND ' . join ( metadata . TableMetadataV3 . _make_option_strings ( update_options ) )
query = "ALTER TABLE {0} WITH {1}" . format ( model . column_family_name ( ) , options )
execute ( query , connection = connection )
return True
return False |
def update_shortcut_settings ( self ) :
"""Creates the list store for the shortcuts""" | self . shortcut_list_store . clear ( )
shortcuts = self . gui_config_model . get_current_config_value ( "SHORTCUTS" , use_preliminary = True , default = { } )
actions = sorted ( shortcuts . keys ( ) )
for action in actions :
keys = shortcuts [ action ]
self . shortcut_list_store . append ( ( str ( action ) , str ( keys ) ) ) |
def check_overlap ( a , b ) :
"""Check for wavelength overlap between two spectra .
. . note : :
Generalized from
: meth : ` pysynphot . spectrum . SpectralElement . check _ overlap ` .
Parameters
a , b : ` ~ pysynphot . spectrum . SourceSpectrum ` or ` ~ pysynphot . spectrum . SpectralElement `
Typically a source spectrum , spectral element , observation ,
or bandpass from observation mode .
Returns
result : { ' full ' , ' partial ' , ' none ' }
Full , partial , or no overlap .
Raises
AttributeError
Given spectrum does not have flux or throughput .""" | if a . isAnalytic or b . isAnalytic : # then it ' s defined everywhere
result = 'full'
else : # get the wavelength arrays
waves = list ( )
for x in ( a , b ) :
if hasattr ( x , 'throughput' ) :
wv = x . wave [ np . where ( x . throughput != 0 ) ]
elif hasattr ( x , 'flux' ) :
wv = x . wave
else :
raise AttributeError ( "neither flux nor throughput in %s" % x )
waves . append ( wv )
# get the endpoints
a1 , a2 = waves [ 0 ] . min ( ) , waves [ 0 ] . max ( )
b1 , b2 = waves [ 1 ] . min ( ) , waves [ 1 ] . max ( )
# do the comparison
if ( a1 >= b1 and a2 <= b2 ) :
result = 'full'
elif ( a2 < b1 ) or ( b2 < a1 ) :
result = 'none'
else :
result = 'partial'
return result |
def __update_info ( self ) :
"""Updates " visualization options " and " file info " areas .""" | from f311 import explorer as ex
import f311
t = self . tableWidget
z = self . listWidgetVis
z . clear ( )
classes = self . __vis_classes = [ ]
propss = self . __lock_get_current_propss ( )
npp = len ( propss )
s0 , s1 = "" , ""
if npp == 1 :
p = propss [ 0 ]
# Visualization options
if p . flag_scanned :
if isinstance ( p . f , f311 . DataFile ) :
classes . extend ( f311 . get_suitable_vis_classes ( p . f ) )
if ex . VisPrint in classes :
classes . remove ( ex . VisPrint )
if p . flag_text : # This is an exception , since " txt " is not a Vis descendant .
# This will be properly handled in _ _ visualize ( )
classes . append ( "txt" )
for x in classes :
if x == "txt" :
text = "View plain text"
else :
text = x . action
text += " (" + x . __name__ + ")"
item = QListWidgetItem ( text )
z . addItem ( item )
# File info
s0 = p . get_summary ( )
s1 = p . get_info ( )
elif npp >= 2 :
s0 = "{0:d} selected" . format ( npp )
ff = [ p . f for p in propss ]
flag_spectra = all ( [ isinstance ( f , f311 . FileSpectrum ) for f in ff ] )
# gambiarra to visualize several PFANT . mod files
has_pyfant = False
try :
import pyfant
has_pyfant = True
except :
pass
flag_mod = False
if has_pyfant :
flag_mod = all ( [ isinstance ( f , pyfant . FileModBin ) and len ( f . records ) > 1 for f in ff ] )
if flag_spectra :
z . addItem ( QListWidgetItem ( "Plot spectra stacked" ) )
classes . append ( "sta" )
z . addItem ( QListWidgetItem ( "Plot spectra overlapped" ) )
classes . append ( "ovl" )
elif flag_mod : # TODO plugin - based way to handle visualization of multiple selection
z . addItem ( QListWidgetItem ( "View model grid" ) )
classes . append ( "modgrid" )
# File info
self . labelSummary . setText ( s0 )
self . textEditInfo . setPlainText ( s1 ) |
def vertex_array ( self , program , content , index_buffer = None , index_element_size = 4 , * , skip_errors = False ) -> 'VertexArray' :
'''Create a : py : class : ` VertexArray ` object .
Args :
program ( Program ) : The program used when rendering .
content ( list ) : A list of ( buffer , format , attributes ) . See : ref : ` buffer - format - label ` .
index _ buffer ( Buffer ) : An index buffer .
Keyword Args :
index _ element _ size ( int ) : byte size of each index element , 1 , 2 or 4.
skip _ errors ( bool ) : Ignore skip _ errors varyings .
Returns :
: py : class : ` VertexArray ` object''' | members = program . _members
index_buffer_mglo = None if index_buffer is None else index_buffer . mglo
content = tuple ( ( a . mglo , b ) + tuple ( getattr ( members . get ( x ) , 'mglo' , None ) for x in c ) for a , b , * c in content )
res = VertexArray . __new__ ( VertexArray )
res . mglo , res . _glo = self . mglo . vertex_array ( program . mglo , content , index_buffer_mglo , index_element_size , skip_errors )
res . _program = program
res . _index_buffer = index_buffer
res . _index_element_size = index_element_size
res . ctx = self
res . extra = None
return res |
def predictions ( self ) :
"""Generator that yields prediction objects from an API response .""" | for prediction in self . api . predictions ( vid = self . vid ) [ 'prd' ] :
pobj = Prediction . fromapi ( self . api , prediction )
pobj . _busobj = self
yield pobj |
def ic ( ctext ) :
'''takes ciphertext , calculates index of coincidence .''' | counts = ngram_count ( ctext , N = 1 )
icval = 0
for k in counts . keys ( ) :
icval += counts [ k ] * ( counts [ k ] - 1 )
icval /= ( len ( ctext ) * ( len ( ctext ) - 1 ) )
return icval |
def connect_post_namespaced_pod_portforward ( self , name , namespace , ** kwargs ) :
"""connect POST requests to portforward of Pod
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . connect _ post _ namespaced _ pod _ portforward ( name , namespace , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the PodPortForwardOptions ( required )
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param int ports : List of ports to forward Required when using WebSockets
: return : str
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . connect_post_namespaced_pod_portforward_with_http_info ( name , namespace , ** kwargs )
else :
( data ) = self . connect_post_namespaced_pod_portforward_with_http_info ( name , namespace , ** kwargs )
return data |
def message ( self ) :
"""Read mail , parse it and return a Message instance .""" | logger . debug ( "Parsing mail at {} ..." . format ( self . path ) )
with open ( self . path , 'rb' ) as mail_file :
if PY2 :
message = email . message_from_file ( mail_file )
else :
message = email . message_from_binary_file ( mail_file )
return message |
def random_split ( dataset , prob = .5 ) :
"""Utility for performing a random split for text data that is already in
bag - of - words format . For each ( word , count ) pair in a particular element ,
the counts are uniformly partitioned in either a training set or a test
set .
Parameters
dataset : SArray of type dict , SFrame with columns of type dict
A data set in bag - of - words format .
prob : float , optional
Probability for sampling a word to be placed in the test set .
Returns
train , test : SArray
Two data sets in bag - of - words format , where the combined counts are
equal to the counts in the original data set .
Examples
> > > docs = turicreate . SArray ( [ { ' are ' : 5 , ' you ' : 3 , ' not ' : 1 , ' entertained ' : 10 } ] )
> > > train , test = turicreate . text _ analytics . random _ split ( docs )
> > > print ( train )
[ { ' not ' : 1.0 , ' you ' : 3.0 , ' are ' : 3.0 , ' entertained ' : 7.0 } ]
> > > print ( test )
[ { ' are ' : 2.0 , ' entertained ' : 3.0 } ]""" | def grab_values ( x , train = True ) :
if train :
ix = 0
else :
ix = 1
return dict ( [ ( key , value [ ix ] ) for key , value in six . iteritems ( x ) if value [ ix ] != 0 ] )
def word_count_split ( n , p ) :
num_in_test = 0
for i in range ( n ) :
if random . random ( ) < p :
num_in_test += 1
return [ n - num_in_test , num_in_test ]
# Get an SArray where each word has a 2 element list containing
# the count that will be for the training set and the count that will
# be assigned to the test set .
data = dataset . apply ( lambda x : dict ( [ ( key , word_count_split ( int ( value ) , prob ) ) for key , value in six . iteritems ( x ) ] ) )
# Materialize the data set
data . __materialize__ ( )
# Grab respective counts for each data set
train = data . apply ( lambda x : grab_values ( x , train = True ) )
test = data . apply ( lambda x : grab_values ( x , train = False ) )
return train , test |
def association ( self , group_xid ) :
"""Add association using xid value .
Args :
group _ xid ( str ) : The external id of the Group to associate .""" | association = { 'groupXid' : group_xid }
self . _indicator_data . setdefault ( 'associatedGroups' , [ ] ) . append ( association ) |
def query_trial ( request ) :
"""Rest API to query the trial info , with the given trial _ id .
The url pattern should be like this :
curl http : / / < server > : < port > / query _ trial ? trial _ id = < trial _ id >
The response may be :
" app _ url " : " None " ,
" trial _ status " : " TERMINATED " ,
" params " : { ' a ' : 1 , ' b ' : 2 } ,
" job _ id " : " asynchyperband _ test " ,
" end _ time " : " 2018-07-19 20:49:44 " ,
" start _ time " : " 2018-07-19 20:49:40 " ,
" trial _ id " : " 2067R2ZD " ,""" | trial_id = request . GET . get ( "trial_id" )
trials = TrialRecord . objects . filter ( trial_id = trial_id ) . order_by ( "-start_time" )
if len ( trials ) == 0 :
resp = "Unkonwn trial id %s.\n" % trials
else :
trial = trials [ 0 ]
result = { "trial_id" : trial . trial_id , "job_id" : trial . job_id , "trial_status" : trial . trial_status , "start_time" : trial . start_time , "end_time" : trial . end_time , "params" : trial . params }
resp = json . dumps ( result )
return HttpResponse ( resp , content_type = "application/json;charset=utf-8" ) |
def get_parent_plate_value ( tree , node , value = None ) :
"""Recurse up the tree getting parent plate values
: param tree : The tree
: param node : The current node
: param value : The initial plate value
: return : The plate value as a list of tuples""" | if value is None :
value = [ ]
parent = tree . parent ( node . identifier )
if parent . is_root ( ) : # value . append ( ( parent . tag , parent . identifier ) )
return value
value = PlateManager . get_parent_plate_value ( tree , parent , value )
if "." in parent . identifier :
pass
value . append ( ( parent . tag , parent . data ) )
return value |
def bounds ( self ) :
""": return : List of tuples of all bounds on parameters .""" | bounds = [ ]
for p in self . params :
if p . fixed :
if p . value >= 0.0 :
bounds . append ( [ np . nextafter ( p . value , 0 ) , p . value ] )
else :
bounds . append ( [ p . value , np . nextafter ( p . value , 0 ) ] )
else :
bounds . append ( [ p . min , p . max ] )
return bounds |
def splitter ( lines , sep = "-=" , keep_idx = False ) :
"""Splits underlined blocks without indentation ( reStructuredText pattern ) .
Parameters
lines :
A list of strings
sep :
Underline symbols . A line with only such symbols will be seen as a
underlined one .
keep _ idx :
If False ( default ) , the function returns a collections . OrderedDict . Else ,
returns a
list of index pairs
Returns
A collections . OrderedDict instance where a block with underlined key like
` ` " Key \\ n = = = " ` ` and a list of lines following will have the item ( key , list
of lines ) , in the order that they appeared in the lists input . Empty keys
gets an order numbering , and might happen for example after a ` ` " - - - - " ` `
separator . The values ( lists of lines ) don ' t include the key nor its
underline , and is also stripped / trimmed as lines ( i . e . , there ' s no empty
line as the first and last list items , but first and last line may start / end
with whitespaces ) .""" | separators = audiolazy . Stream ( idx - 1 for idx , el in enumerate ( lines ) if all ( char in sep for char in el ) and len ( el ) > 0 ) . append ( [ len ( lines ) ] )
first_idx = separators . copy ( ) . take ( )
blk_data = OrderedDict ( )
empty_count = iter ( audiolazy . count ( 1 ) )
next_empty = lambda : "--Empty--{0}--" . format ( next ( empty_count ) )
if first_idx != 0 :
blk_data [ next_empty ( ) ] = lines [ : first_idx ]
for idx1 , idx2 in separators . blocks ( size = 2 , hop = 1 ) :
name = lines [ idx1 ] . strip ( ) if lines [ idx1 ] . strip ( ) != "" else next_empty ( )
blk_data [ name ] = lines [ idx1 + 2 : idx2 ]
# Strips the empty lines
for name in blk_data :
while blk_data [ name ] [ - 1 ] . strip ( ) == "" :
blk_data [ name ] . pop ( )
while blk_data [ name ] [ 0 ] . strip ( ) == "" :
blk_data [ name ] = blk_data [ name ] [ 1 : ]
return blk_data |
def write ( self , title , data , output = None ) :
'''Add a data to the current opened section .
: return :''' | if not isinstance ( data , ( dict , list , tuple ) ) :
data = { 'raw-content' : str ( data ) }
output = output or self . __default_outputter
if output != 'null' :
try :
if isinstance ( data , dict ) and 'return' in data :
data = data [ 'return' ]
content = self . _printout ( data , output )
except Exception : # Fall - back to just raw YAML
content = None
else :
content = None
if content is None :
data = json . loads ( json . dumps ( data ) )
if isinstance ( data , dict ) and data . get ( 'return' ) :
data = data . get ( 'return' )
content = yaml . safe_dump ( data , default_flow_style = False , indent = 4 )
self . __current_section . append ( { title : content } ) |
def x10_command_type ( command ) :
"""Return the X10 command type from an X10 command .""" | command_type = X10CommandType . DIRECT
if command in [ X10_COMMAND_ALL_UNITS_OFF , X10_COMMAND_ALL_LIGHTS_ON , X10_COMMAND_ALL_LIGHTS_OFF ] :
command_type = X10CommandType . BROADCAST
return command_type |
def on_batch_end ( self , train , ** kwargs ) :
"Take the stored results and puts it in ` self . stats `" | if train :
self . stats . append ( self . hooks . stored ) |
def set_list_predicates ( self ) :
"""Reads through the rml mappings and determines all fields that should
map to a list / array with a json output""" | results = self . rml . query ( """
SELECT DISTINCT ?subj_class ?list_field
{
?bn rr:datatype rdf:List .
?bn rr:predicate ?list_field .
?s ?p ?bn .
?s rr:subjectMap ?sm_bn .
?sm_bn rr:class ?subj_class .
}""" )
list_preds = [ ( Uri ( row [ 0 ] ) . sparql , Uri ( row [ 1 ] ) . sparql ) for row in results ]
array_fields = { }
for tup in list_preds :
try :
array_fields [ tup [ 0 ] ] . append ( tup [ 1 ] )
except KeyError :
array_fields [ tup [ 0 ] ] = [ tup [ 1 ] ]
self . array_fields = array_fields |
def launch ( self ) :
"""Make the script file and return the newly created job id""" | # Make script file #
self . make_script ( )
# Do it #
sbatch_out = sh . sbatch ( self . script_path )
jobs . expire ( )
# Message #
print Color . i_blu + "SLURM:" + Color . end + " " + str ( sbatch_out ) ,
# Return id #
self . id = int ( re . findall ( "Submitted batch job ([0-9]+)" , str ( sbatch_out ) ) [ 0 ] )
return self . id |
def setPrefix ( self , p , u = None ) :
"""Set the element namespace prefix .
@ param p : A new prefix for the element .
@ type p : basestring
@ param u : A namespace URI to be mapped to the prefix .
@ type u : basestring
@ return : self
@ rtype : L { Element }""" | self . prefix = p
if p is not None and u is not None :
self . addPrefix ( p , u )
return self |
def on_close ( self ) :
'''Called when client closes this connection . Cleanup
is done here .''' | if self . id in self . funcserver . websocks :
self . funcserver . websocks [ self . id ] = None
ioloop = tornado . ioloop . IOLoop . instance ( )
ioloop . add_callback ( lambda : self . funcserver . websocks . pop ( self . id , None ) )
psession = self . funcserver . pysessions . get ( self . pysession_id , None )
if psession :
psession [ 'socks' ] . remove ( self . id )
if not psession [ 'socks' ] :
del self . funcserver . pysessions [ self . pysession_id ] |
def arabic_digit_to_thai_digit ( text : str ) -> str :
""": param str text : Text with Arabic digits such as ' 1 ' , ' 2 ' , ' 3'
: return : Text with Arabic digits being converted to Thai digits such as ' ๑ ' , ' ๒ ' , ' ๓'""" | if not text or not isinstance ( text , str ) :
return ""
newtext = [ ]
for ch in text :
if ch in _arabic_thai :
newtext . append ( _arabic_thai [ ch ] )
else :
newtext . append ( ch )
return "" . join ( newtext ) |
def _get_keyid_by_gpg_key ( key_material ) :
"""Get a GPG key fingerprint by GPG key material .
Gets a GPG key fingerprint ( 40 - digit , 160 - bit ) by the ASCII armor - encoded
or binary GPG key material . Can be used , for example , to generate file
names for keys passed via charm options .
: param key _ material : ASCII armor - encoded or binary GPG key material
: type key _ material : bytes
: raises : GPGKeyError if invalid key material has been provided
: returns : A GPG key fingerprint
: rtype : str""" | # Use the same gpg command for both Xenial and Bionic
cmd = 'gpg --with-colons --with-fingerprint'
ps = subprocess . Popen ( cmd . split ( ) , stdout = subprocess . PIPE , stderr = subprocess . PIPE , stdin = subprocess . PIPE )
out , err = ps . communicate ( input = key_material )
if six . PY3 :
out = out . decode ( 'utf-8' )
err = err . decode ( 'utf-8' )
if 'gpg: no valid OpenPGP data found.' in err :
raise GPGKeyError ( 'Invalid GPG key material provided' )
# from gnupg2 docs : fpr : : Fingerprint ( fingerprint is in field 10)
return re . search ( r"^fpr:{9}([0-9A-F]{40}):$" , out , re . MULTILINE ) . group ( 1 ) |
def check_lazy_load_subadres ( f ) :
'''Decorator function to lazy load a : class : ` Subadres ` .''' | def wrapper ( * args ) :
subadres = args [ 0 ]
if ( subadres . _metadata is None or subadres . aard_id is None or subadres . huisnummer_id is None ) :
log . debug ( 'Lazy loading Subadres %d' , subadres . id )
subadres . check_gateway ( )
s = subadres . gateway . get_subadres_by_id ( subadres . id )
subadres . _metadata = s . _metadata
subadres . aard_id = s . aard_id
subadres . huisnummer_id = s . huisnummer_id
return f ( * args )
return wrapper |
def get_sent ( self , * args , ** kwargs ) :
"""Return a get _ content generator for sent messages .
The additional parameters are passed directly into
: meth : ` . get _ content ` . Note : the ` url ` parameter cannot be altered .""" | return self . get_content ( self . config [ 'sent' ] , * args , ** kwargs ) |
def drum_status ( self , filter_supported : bool = True ) -> Dict [ str , Any ] :
"""Return the state of all drums .""" | drum_status = { }
for color in self . COLOR_NAMES :
try :
drum_stat = self . data . get ( '{}_{}' . format ( SyncThru . DRUM , color ) , { } )
if filter_supported and drum_stat . get ( 'opt' , 0 ) == 0 :
continue
else :
drum_status [ color ] = drum_stat
except ( KeyError , AttributeError ) :
drum_status [ color ] = { }
return drum_status |
def reset ( self ) :
"""Reset the connection""" | self . _request = None
self . _response = None
self . _transaction_id = uuid . uuid4 ( ) . hex |
def import_from_path ( path ) :
"""Imports a package , module or attribute from path
Thanks http : / / stackoverflow . com / a / 14050282/1267398
> > > import _ from _ path ( ' os . path ' )
< module ' posixpath ' . . .
> > > import _ from _ path ( ' os . path . basename ' )
< function basename at . . .
> > > import _ from _ path ( ' os ' )
< module ' os ' from . . .
> > > import _ from _ path ( ' getrektcunt ' )
Traceback ( most recent call last ) :
ImportError :
> > > import _ from _ path ( ' os . dummyfunc ' )
Traceback ( most recent call last ) :
ImportError :
> > > import _ from _ path ( ' os . dummyfunc . dummylol ' )
Traceback ( most recent call last ) :
ImportError :""" | try :
return importlib . import_module ( path )
except ImportError :
if '.' not in path :
raise
module_name , attr_name = path . rsplit ( '.' , 1 )
if not does_module_exist ( module_name ) :
raise ImportError ( "No object found at '{}'" . format ( path ) )
mod = importlib . import_module ( module_name )
if not hasattr ( mod , attr_name ) :
raise ImportError ( "No object found at '{}'" . format ( path ) )
return getattr ( mod , attr_name ) |
def update_table ( self , table_name , provisioned_throughput ) :
"""Updates the provisioned throughput for a given table .
: type table _ name : str
: param table _ name : The name of the table to update .
: type provisioned _ throughput : dict
: param provisioned _ throughput : A Python version of the
ProvisionedThroughput data structure defined by
DynamoDB .""" | data = { 'TableName' : table_name , 'ProvisionedThroughput' : provisioned_throughput }
json_input = json . dumps ( data )
return self . make_request ( 'UpdateTable' , json_input ) |
def set_end ( self , t ) :
"""Override the GPS end time ( and set the duration ) of this ScienceSegment .
@ param t : new GPS end time .""" | self . __dur -= self . __end - t
self . __end = t |
def _get_save_wall_photo ( session , photo , server , hash , user_id = None , group_id = None ) :
"""https : / / vk . com / dev / photos . saveWallPhoto""" | if group_id < 0 :
group_id = abs ( group_id )
response = session . fetch ( "photos.saveWallPhoto" , photo = photo , server = server , hash = hash , user_id = user_id , group_id = group_id ) [ 0 ]
return response [ 'id' ] , response [ 'owner_id' ] |
def get_all_breakpoints ( self ) :
"""Returns all breakpoint objects as a list of tuples .
Each tuple contains :
- Process global ID to which the breakpoint applies .
- Thread global ID to which the breakpoint applies , or C { None } .
- The L { Breakpoint } object itself .
@ note : If you ' re only interested in a specific breakpoint type , or in
breakpoints for a specific process or thread , it ' s probably faster
to call one of the following methods :
- L { get _ all _ code _ breakpoints }
- L { get _ all _ page _ breakpoints }
- L { get _ all _ hardware _ breakpoints }
- L { get _ process _ code _ breakpoints }
- L { get _ process _ page _ breakpoints }
- L { get _ process _ hardware _ breakpoints }
- L { get _ thread _ hardware _ breakpoints }
@ rtype : list of tuple ( pid , tid , bp )
@ return : List of all breakpoints .""" | bplist = list ( )
# Get the code breakpoints .
for ( pid , bp ) in self . get_all_code_breakpoints ( ) :
bplist . append ( ( pid , None , bp ) )
# Get the page breakpoints .
for ( pid , bp ) in self . get_all_page_breakpoints ( ) :
bplist . append ( ( pid , None , bp ) )
# Get the hardware breakpoints .
for ( tid , bp ) in self . get_all_hardware_breakpoints ( ) :
pid = self . system . get_thread ( tid ) . get_pid ( )
bplist . append ( ( pid , tid , bp ) )
# Return the list of breakpoints .
return bplist |
def open ( self ) :
"""Implementation of NAPALM method open .""" | try :
connection = self . transport_class ( host = self . hostname , username = self . username , password = self . password , timeout = self . timeout , ** self . eapi_kwargs )
if self . device is None :
self . device = pyeapi . client . Node ( connection , enablepwd = self . enablepwd )
# does not raise an Exception if unusable
# let ' s try to run a very simple command
self . device . run_commands ( [ "show clock" ] , encoding = "text" )
except ConnectionError as ce : # and this is raised either if device not avaiable
# either if HTTP ( S ) agent is not enabled
# show management api http - commands
raise ConnectionException ( py23_compat . text_type ( ce ) ) |
def parse_array ( array_string ) :
"""Parse an array string as returned by clickhouse . For example :
" [ ' hello ' , ' world ' ] " = = > [ " hello " , " world " ]
" [ 1,2,3 ] " = = > [ 1 , 2 , 3]""" | # Sanity check
if len ( array_string ) < 2 or array_string [ 0 ] != '[' or array_string [ - 1 ] != ']' :
raise ValueError ( 'Invalid array string: "%s"' % array_string )
# Drop opening brace
array_string = array_string [ 1 : ]
# Go over the string , lopping off each value at the beginning until nothing is left
values = [ ]
while True :
if array_string == ']' : # End of array
return values
elif array_string [ 0 ] in ', ' : # In between values
array_string = array_string [ 1 : ]
elif array_string [ 0 ] == "'" : # Start of quoted value , find its end
match = re . search ( r"[^\\]'" , array_string )
if match is None :
raise ValueError ( 'Missing closing quote: "%s"' % array_string )
values . append ( array_string [ 1 : match . start ( ) + 1 ] )
array_string = array_string [ match . end ( ) : ]
else : # Start of non - quoted value , find its end
match = re . search ( r",|\]" , array_string )
values . append ( array_string [ 0 : match . start ( ) ] )
array_string = array_string [ match . end ( ) - 1 : ] |
def get_task_config_fields ( config_class ) :
"""Get all configuration Fields from a Config class .
Parameters
config _ class : ` ` lsst . pipe . base . Config ` ` - type
The configuration class ( not an instance ) corresponding to a Task .
Returns
config _ fields : ` dict `
Mapping where keys are the config attribute names and values are
subclasses of ` ` lsst . pex . config . Field ` ` . The mapping is alphabetically
ordered by attribute name .""" | from lsst . pex . config import Field
def is_config_field ( obj ) :
return isinstance ( obj , Field )
return _get_alphabetical_members ( config_class , is_config_field ) |
def _updateInferenceStats ( self , statistics , objectName = None ) :
"""Updates the inference statistics .
Parameters :
@ param statistics ( dict )
Dictionary in which to write the statistics
@ param objectName ( str )
Name of the inferred object , if known . Otherwise , set to None .""" | L4Representations = self . getL4Representations ( )
L4PredictedCells = self . getL4PredictedCells ( )
L4PredictedActiveCells = self . getL4PredictedActiveCells ( )
L2Representation = self . getL2Representations ( )
for i in xrange ( self . numColumns ) :
statistics [ "L4 Representation C" + str ( i ) ] . append ( len ( L4Representations [ i ] ) )
statistics [ "L4 Predicted C" + str ( i ) ] . append ( len ( L4PredictedCells [ i ] ) )
statistics [ "L4 PredictedActive C" + str ( i ) ] . append ( len ( L4PredictedActiveCells [ i ] ) )
statistics [ "L2 Representation C" + str ( i ) ] . append ( len ( L2Representation [ i ] ) )
statistics [ "L4 Apical Segments C" + str ( i ) ] . append ( len ( self . L4Columns [ i ] . _tm . getActiveApicalSegments ( ) ) )
statistics [ "L4 Basal Segments C" + str ( i ) ] . append ( len ( self . L4Columns [ i ] . _tm . getActiveBasalSegments ( ) ) )
# add true overlap if objectName was provided
if objectName in self . objectL2Representations :
objectRepresentation = self . objectL2Representations [ objectName ]
statistics [ "Overlap L2 with object C" + str ( i ) ] . append ( len ( objectRepresentation [ i ] & L2Representation [ i ] ) ) |
def on ( self , event , handler ) :
"""Attaches the handler to the specified event .
@ param event : event to attach the handler to . Any object can be passed
as event , but string is preferable . If qcore . EnumBase
instance is passed , its name is used as event key .
@ param handler : event handler .
@ return : self , so calls like this can be chained together .""" | event_hook = self . get_or_create ( event )
event_hook . subscribe ( handler )
return self |
def make_pre_authed_request ( self , env , method = None , path = None , body = None , headers = None ) :
"""Nearly the same as swift . common . wsgi . make _ pre _ authed _ request
except that this also always sets the ' swift . source ' and user
agent .
Newer Swift code will support swift _ source as a kwarg , but we
do it this way so we don ' t have to have a newer Swift .
Since we ' re doing this anyway , we may as well set the user
agent too since we always do that .""" | if self . default_storage_policy :
sp = self . default_storage_policy
if headers :
headers . update ( { 'X-Storage-Policy' : sp } )
else :
headers = { 'X-Storage-Policy' : sp }
subreq = swift . common . wsgi . make_pre_authed_request ( env , method = method , path = path , body = body , headers = headers , agent = self . agent )
subreq . environ [ 'swift.source' ] = self . swift_source
return subreq |
def fill_edge_matrix ( nsrcs , match_dict ) :
"""Create and fill a matrix with the graph ' edges ' between sources .
Parameters
nsrcs : int
number of sources ( used to allocate the size of the matrix )
match _ dict : dict ( ( int , int ) : float )
Each entry gives a pair of source indices , and the
corresponding measure ( either distance or sigma )
Returns
e _ matrix : ` ~ numpy . ndarray `
numpy . ndarray ( ( nsrcs , nsrcs ) ) filled with zeros except for the
matches , which are filled with the edge measures ( either
distances or sigmas )""" | e_matrix = np . zeros ( ( nsrcs , nsrcs ) )
for k , v in match_dict . items ( ) :
e_matrix [ k [ 0 ] , k [ 1 ] ] = v
return e_matrix |
def CheckFile ( self , filename ) :
"""Validates the artifacts definition in a specific file .
Args :
filename ( str ) : name of the artifacts definition file .
Returns :
bool : True if the file contains valid artifacts definitions .""" | result = True
artifact_reader = reader . YamlArtifactsReader ( )
try :
for artifact_definition in artifact_reader . ReadFile ( filename ) :
try :
self . _artifact_registry . RegisterDefinition ( artifact_definition )
except KeyError :
logging . warning ( 'Duplicate artifact definition: {0:s} in file: {1:s}' . format ( artifact_definition . name , filename ) )
result = False
artifact_definition_supports_macos = ( definitions . SUPPORTED_OS_DARWIN in ( artifact_definition . supported_os ) )
artifact_definition_supports_windows = ( definitions . SUPPORTED_OS_WINDOWS in ( artifact_definition . supported_os ) )
for source in artifact_definition . sources :
if source . type_indicator in ( definitions . TYPE_INDICATOR_FILE , definitions . TYPE_INDICATOR_PATH ) :
if ( definitions . SUPPORTED_OS_DARWIN in source . supported_os or ( artifact_definition_supports_macos and not source . supported_os ) ) :
if not self . _CheckMacOSPaths ( filename , artifact_definition , source , source . paths ) :
result = False
elif ( artifact_definition_supports_windows or definitions . SUPPORTED_OS_WINDOWS in source . supported_os ) :
for path in source . paths :
if not self . _CheckWindowsPath ( filename , artifact_definition , source , path ) :
result = False
elif source . type_indicator == ( definitions . TYPE_INDICATOR_WINDOWS_REGISTRY_KEY ) : # Exempt the legacy file from duplicate checking because it has
# duplicates intentionally .
if ( filename != self . LEGACY_PATH and self . _HasDuplicateRegistryKeyPaths ( filename , artifact_definition , source ) ) :
result = False
for key_path in source . keys :
if not self . _CheckWindowsRegistryKeyPath ( filename , artifact_definition , key_path ) :
result = False
elif source . type_indicator == ( definitions . TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE ) :
for key_value_pair in source . key_value_pairs :
if not self . _CheckWindowsRegistryKeyPath ( filename , artifact_definition , key_value_pair [ 'key' ] ) :
result = False
except errors . FormatError as exception :
logging . warning ( 'Unable to validate file: {0:s} with error: {1!s}' . format ( filename , exception ) )
result = False
return result |
def from_list ( commands ) :
"""Given a list of tuples of form ( depth , text )
that represents a DFS traversal of a command tree ,
returns a dictionary representing command tree .""" | def subtrees ( commands , level ) :
if not commands :
return
acc = [ ]
parent , * commands = commands
for command in commands :
if command [ 'level' ] > level :
acc . append ( command )
else :
yield ( parent , acc )
parent = command
acc . clear ( )
yield ( parent , acc )
def walk ( commands , level = 0 ) :
return [ { 'description' : key [ 'description' ] , 'children' : walk ( subtree , level + 1 ) , 'id' : key [ 'id' ] } for key , subtree in subtrees ( commands , level ) ]
return walk ( commands ) |
def create_widget ( self ) :
"""Create the underlying widget .
A toast is not a subclass of view , hence we don ' t set name as widget
or children will try to use it as their parent ( which crashes ) .""" | d = self . declaration
Snackbar . make ( self . parent_widget ( ) , d . text , 0 if d . duration else - 2 ) . then ( self . on_widget_created ) |
def view_links ( obj ) :
'''Link to performance data and duplicate overview .''' | result = format_html ( '' )
result += format_html ( '<a href="%s" style="white-space: nowrap">Show duplicates</a><br/>' % reverse ( 'duplicates' , args = ( obj . pk , ) ) )
result += format_html ( '<a href="%s" style="white-space: nowrap">Show submissions</a><br/>' % obj . grading_url ( ) )
result += format_html ( '<a href="%s" style="white-space: nowrap">Download submissions</a>' % reverse ( 'assarchive' , args = ( obj . pk , ) ) )
return result |
def tournament ( self , negative = False ) :
"""Tournament selection and when negative is True it performs negative
tournament selection""" | if self . generation <= self . _random_generations and not negative :
return self . random_selection ( )
if not self . _negative_selection and negative :
return self . random_selection ( negative = negative )
vars = self . random ( )
fit = [ ( k , self . population [ x ] . fitness ) for k , x in enumerate ( vars ) ]
if negative :
fit = min ( fit , key = lambda x : x [ 1 ] )
else :
fit = max ( fit , key = lambda x : x [ 1 ] )
index = fit [ 0 ]
return vars [ index ] |
def root ( ) :
"""Placeholder root url for the PCI .
Ideally this should never be called !""" | response = { "links" : { "message" : "Welcome to the SIP Processing Controller Interface" , "items" : [ { "href" : "{}health" . format ( request . url ) } , { "href" : "{}subarrays" . format ( request . url ) } , { "href" : "{}scheduling_blocks" . format ( request . url ) } , { "href" : "{}processing_blocks" . format ( request . url ) } ] } }
return response , HTTPStatus . OK |
def int_global_to_local_stop ( self , index , axis = 0 ) :
"""Calculate local index from global index from stop _ index
: param index : global index as integer
: param axis : current axis to process
: return :""" | if index < self . __mask [ axis ] . start + self . __halos [ 0 ] [ axis ] :
return None
if index > self . __mask [ axis ] . stop :
return self . __mask [ axis ] . stop - self . __mask [ axis ] . start
return index - self . __mask [ axis ] . start |
def _bucket_key ( self ) :
"""Returns hash bucket key for the redis key""" | return "{}.size.{}" . format ( self . prefix , ( self . _hashed_key // 1000 ) if self . _hashed_key > 1000 else self . _hashed_key ) |
def load ( fnames , tag = '' , sat_id = None ) :
"""Load the SuperMAG files
Parameters
fnames : ( list )
List of filenames
tag : ( str or NoneType )
Denotes type of file to load . Accepted types are ' indices ' , ' all ' ,
' stations ' , and ' ' ( for just magnetometer measurements ) . ( default = ' ' )
sat _ id : ( str or NoneType )
Satellite ID for constellations , not used . ( default = None )
Returns
data : ( pandas . DataFrame )
Object containing satellite data
meta : ( pysat . Meta )
Object containing metadata such as column names and units""" | # Ensure that there are files to load
if len ( fnames ) <= 0 :
return pysat . DataFrame ( None ) , pysat . Meta ( None )
# Ensure that the files are in a list
if isinstance ( fnames , str ) :
fnames = [ fnames ]
# Initialise the output data
data = pds . DataFrame ( )
baseline = list ( )
# Cycle through the files
for fname in fnames :
fname = fname [ : - 11 ]
# Remove date index from end of filename
file_type = path . splitext ( fname ) [ 1 ] . lower ( )
# Open and load the files for each file type
if file_type == ".csv" :
if tag != "indices" :
temp = load_csv_data ( fname , tag )
else :
temp , bline = load_ascii_data ( fname , tag )
if bline is not None :
baseline . append ( bline )
# Save the loaded data in the output data structure
if len ( temp . columns ) > 0 :
data = pds . concat ( [ data , temp ] , axis = 0 )
del temp
# If data was loaded , update the meta data
if len ( data . columns ) > 0 :
meta = pysat . Meta ( )
for cc in data . columns :
meta [ cc ] = update_smag_metadata ( cc )
meta . info = { 'baseline' : format_baseline_list ( baseline ) }
else :
meta = pysat . Meta ( None )
return data , meta |
def as_xml ( self ) :
"""XML representation of the error to be used in HTTP response .
This XML format follows the IIIF Image API v1.0 specification ,
see < http : / / iiif . io / api / image / 1.0 / # error >""" | # Build tree
spacing = ( "\n" if ( self . pretty_xml ) else "" )
root = Element ( 'error' , { 'xmlns' : I3F_NS } )
root . text = spacing
e_parameter = Element ( 'parameter' , { } )
e_parameter . text = self . parameter
e_parameter . tail = spacing
root . append ( e_parameter )
if ( self . text ) :
e_text = Element ( 'text' , { } )
e_text . text = self . text
e_text . tail = spacing
root . append ( e_text )
# Write out as XML document to return
tree = ElementTree ( root )
xml_buf = io . BytesIO ( )
if ( sys . version_info < ( 2 , 7 ) ) :
tree . write ( xml_buf , encoding = 'UTF-8' )
else :
tree . write ( xml_buf , encoding = 'UTF-8' , xml_declaration = True , method = 'xml' )
return ( xml_buf . getvalue ( ) . decode ( 'utf-8' ) ) |
def element_count ( self ) :
"""Retrieve the number of elements in this type .
Returns an int .
If the Type is not an array or vector , this raises .""" | result = conf . lib . clang_getNumElements ( self )
if result < 0 :
raise Exception ( 'Type does not have elements.' )
return result |
def save ( self ) :
"""Updates the list with changes .""" | # Based on the documentation at
# http : / / msdn . microsoft . com / en - us / library / lists . lists . updatelistitems % 28v = office . 12%29 . aspx
# Note , this ends up un - namespaced . SharePoint doesn ' t care about
# namespaces on this XML node , and will bork if any of these elements
# have a namespace prefix . Likewise Method and Field in
# SharePointRow . get _ batch _ method ( ) .
batches = E . Batch ( ListVersion = '1' , OnError = 'Return' )
# Here ' s the root element of our SOAP request .
xml = SP . UpdateListItems ( SP . listName ( self . id ) , SP . updates ( batches ) )
# rows _ by _ batch _ id contains a mapping from new rows to their batch
# IDs , so we can set their IDs when they are returned by SharePoint .
rows_by_batch_id , batch_id = { } , 1
for row in self . _rows :
batch = row . get_batch_method ( )
if batch is None :
continue
# Add the batch ID
batch . attrib [ 'ID' ] = text_type ( batch_id )
rows_by_batch_id [ batch_id ] = row
batches . append ( batch )
batch_id += 1
for row in self . _deleted_rows :
batch = E . Method ( E . Field ( text_type ( row . id ) , Name = 'ID' ) , ID = text_type ( batch_id ) , Cmd = 'Delete' )
rows_by_batch_id [ batch_id ] = row
batches . append ( batch )
batch_id += 1
if len ( batches ) == 0 :
return
response = self . opener . post_soap ( LIST_WEBSERVICE , xml , soapaction = 'http://schemas.microsoft.com/sharepoint/soap/UpdateListItems' )
for result in response . xpath ( './/sp:Result' , namespaces = namespaces ) :
batch_id , batch_result = result . attrib [ 'ID' ] . split ( ',' )
row = rows_by_batch_id [ int ( batch_id ) ]
error_code = result . find ( 'sp:ErrorCode' , namespaces = namespaces )
error_text = result . find ( 'sp:ErrorText' , namespaces = namespaces )
if error_code is not None and error_code . text != '0x00000000' :
raise UpdateFailedError ( row , batch_result , error_code . text , error_text . text )
if batch_result in ( 'Update' , 'New' ) :
row . _update ( result . xpath ( 'z:row' , namespaces = namespaces ) [ 0 ] , clear = True )
else :
self . _deleted_rows . remove ( row )
assert not self . _deleted_rows
assert not any ( row . _changed for row in self . rows ) |
def reverse ( self ) :
"""NAME :
reverse
PURPOSE :
reverse an already integrated orbit ( that is , make it go from end to beginning in t = 0 to tend )
INPUT :
( none )
OUTPUT :
( none )
HISTORY :
2011-04-13 - Written - Bovy ( NYU )""" | if hasattr ( self , '_orbInterp' ) :
delattr ( self , '_orbInterp' )
if hasattr ( self , 'rs' ) :
delattr ( self , 'rs' )
sortindx = list ( range ( len ( self . _orb . t ) ) )
sortindx . sort ( key = lambda x : self . _orb . t [ x ] , reverse = True )
for ii in range ( self . _orb . orbit . shape [ 1 ] ) :
self . _orb . orbit [ : , ii ] = self . _orb . orbit [ sortindx , ii ]
return None |
def vectorize ( function ) :
"""Allow a method that only accepts scalars to accept vectors too .
This decorator has two different behaviors depending on the dimensionality of the
array passed as an argument :
* * 1 - d array * *
It will work under the assumption that the ` function ` argument is a callable
with signature : :
function ( self , X , * args , * * kwargs )
where X is an scalar magnitude .
In this case the arguments of the input array will be given one at a time , and
both the input and output of the decorated function will have shape ( n , ) .
* * 2 - d array * *
It will work under the assumption that the ` function ` argument is a callable with signature : :
function ( self , X0 , . . . , Xj , * args , * * kwargs )
where ` Xi ` are scalar magnitudes .
It will pass the contents of each row unpacked on each call . The input is espected to have
shape ( n , j ) , the output a shape of ( n , )
It will return a function that is guaranteed to return a ` numpy . array ` .
Args :
function ( callable ) : Function that only accept and return scalars .
Returns :
callable : Decorated function that can accept and return : attr : ` numpy . array ` .""" | def decorated ( self , X , * args , ** kwargs ) :
if not isinstance ( X , np . ndarray ) :
return function ( self , X , * args , ** kwargs )
if len ( X . shape ) == 1 :
X = X . reshape ( [ - 1 , 1 ] )
if len ( X . shape ) == 2 :
return np . fromiter ( ( function ( self , * x , * args , ** kwargs ) for x in X ) , np . dtype ( 'float64' ) )
else :
raise ValueError ( 'Arrays of dimensionality higher than 2 are not supported.' )
decorated . __doc__ = function . __doc__
return decorated |
def save ( self , folder_path = '' , configuration_type = 'running' , vrf_management_name = None , return_artifact = False ) :
"""Backup ' startup - config ' or ' running - config ' from device to provided file _ system [ ftp | tftp ]
Also possible to backup config to localhost
: param folder _ path : tftp / ftp server where file be saved
: param configuration _ type : type of configuration that will be saved ( StartUp or Running )
: param vrf _ management _ name : Virtual Routing and Forwarding management name
: return : status message / exception
: rtype : OrchestrationSavedArtifact or str""" | if hasattr ( self . resource_config , "vrf_management_name" ) :
vrf_management_name = vrf_management_name or self . resource_config . vrf_management_name
self . _validate_configuration_type ( configuration_type )
folder_path = self . get_path ( folder_path )
system_name = re . sub ( '\s+' , '_' , self . resource_config . name ) [ : 23 ]
time_stamp = time . strftime ( "%d%m%y-%H%M%S" , time . localtime ( ) )
destination_filename = '{0}-{1}-{2}' . format ( system_name , configuration_type . lower ( ) , time_stamp )
full_path = join ( folder_path , destination_filename )
folder_path = self . get_path ( full_path )
self . save_flow . execute_flow ( folder_path = folder_path , configuration_type = configuration_type . lower ( ) , vrf_management_name = vrf_management_name )
if return_artifact :
artifact_type = full_path . split ( ':' ) [ 0 ]
identifier = full_path . replace ( "{0}:" . format ( artifact_type ) , "" )
return OrchestrationSavedArtifact ( identifier = identifier , artifact_type = artifact_type )
return destination_filename |
def remove ( name , ** kwargs ) :
'''Remove system rc configuration variables
CLI Example :
. . code - block : : bash
salt ' * ' sysrc . remove name = sshd _ enable''' | cmd = 'sysrc -v'
if 'file' in kwargs :
cmd += ' -f ' + kwargs [ 'file' ]
if 'jail' in kwargs :
cmd += ' -j ' + kwargs [ 'jail' ]
cmd += ' -x ' + name
sysrcs = __salt__ [ 'cmd.run' ] ( cmd )
if "sysrc: unknown variable" in sysrcs :
raise CommandExecutionError ( sysrcs )
else :
return name + " removed" |
def reshape ( self , * shape , ** kwargs ) :
"""Returns a * * view * * of this array with a new shape without altering any data .
Parameters
shape : tuple of int , or n ints
The new shape should not change the array size , namely
` ` np . prod ( new _ shape ) ` ` should be equal to ` ` np . prod ( self . shape ) ` ` .
Some dimensions of the shape can take special values from the set { 0 , - 1 , - 2 , - 3 , - 4 } .
The significance of each is explained below :
- ` ` 0 ` ` copy this dimension from the input to the output shape .
Example : :
- input shape = ( 2,3,4 ) , shape = ( 4,0,2 ) , output shape = ( 4,3,2)
- input shape = ( 2,3,4 ) , shape = ( 2,0,0 ) , output shape = ( 2,3,4)
- ` ` - 1 ` ` infers the dimension of the output shape by using the remainder of the
input dimensions keeping the size of the new array same as that of the input array .
At most one dimension of shape can be - 1.
Example : :
- input shape = ( 2,3,4 ) , shape = ( 6,1 , - 1 ) , output shape = ( 6,1,4)
- input shape = ( 2,3,4 ) , shape = ( 3 , - 1,8 ) , output shape = ( 3,1,8)
- input shape = ( 2,3,4 ) , shape = ( - 1 , ) , output shape = ( 24 , )
- ` ` - 2 ` ` copy all / remainder of the input dimensions to the output shape .
Example : :
- input shape = ( 2,3,4 ) , shape = ( - 2 , ) , output shape = ( 2,3,4)
- input shape = ( 2,3,4 ) , shape = ( 2 , - 2 ) , output shape = ( 2,3,4)
- input shape = ( 2,3,4 ) , shape = ( - 2,1,1 ) , output shape = ( 2,3,4,1,1)
- ` ` - 3 ` ` use the product of two consecutive dimensions of the input shape as the
output dimension .
Example : :
- input shape = ( 2,3,4 ) , shape = ( - 3,4 ) , output shape = ( 6,4)
- input shape = ( 2,3,4,5 ) , shape = ( - 3 , - 3 ) , output shape = ( 6,20)
- input shape = ( 2,3,4 ) , shape = ( 0 , - 3 ) , output shape = ( 2,12)
- input shape = ( 2,3,4 ) , shape = ( - 3 , - 2 ) , output shape = ( 6,4)
- ` ` - 4 ` ` split one dimension of the input into two dimensions passed subsequent to
-4 in shape ( can contain - 1 ) .
Example : :
- input shape = ( 2,3,4 ) , shape = ( - 4,1,2 , - 2 ) , output shape = ( 1,2,3,4)
- input shape = ( 2,3,4 ) , shape = ( 2 , - 4 , - 1,3 , - 2 ) , output shape = ( 2,1,3,4)
- If the argument ` reverse ` is set to 1 , then the special values are inferred from right
to left .
Example : :
- without reverse = 1 , for input shape = ( 10,5,4 ) , shape = ( - 1,0 ) , output shape would be ( 40,5 ) .
- with reverse = 1 , output shape will be ( 50,4 ) .
reverse : bool , default False
If true then the special values are inferred from right to left . Only supported as
keyword argument .
Returns
NDArray
An array with desired shape that shares data with this array .
Examples
> > > x = mx . nd . arange ( 0,6 ) . reshape ( 2,3)
> > > x . asnumpy ( )
array ( [ [ 0 . , 1 . , 2 . ] ,
[ 3 . , 4 . , 5 . ] ] , dtype = float32)
> > > y = x . reshape ( 3,2)
> > > y . asnumpy ( )
array ( [ [ 0 . , 1 . ] ,
[ 2 . , 3 . ] ,
[ 4 . , 5 . ] ] , dtype = float32)
> > > y = x . reshape ( 3 , - 1)
> > > y . asnumpy ( )
array ( [ [ 0 . , 1 . ] ,
[ 2 . , 3 . ] ,
[ 4 . , 5 . ] ] , dtype = float32)
> > > y = x . reshape ( 3,2)
> > > y . asnumpy ( )
array ( [ [ 0 . , 1 . ] ,
[ 2 . , 3 . ] ,
[ 4 . , 5 . ] ] , dtype = float32)
> > > y = x . reshape ( - 3)
> > > y . asnumpy ( )
array ( [ 0 . 1 . 2 . 3 . 4 . 5 . ] , dtype = float32)
> > > y [ : ] = - 1
> > > x . asnumpy ( )
array ( [ [ - 1 . , - 1 . , - 1 . ] ,
[ - 1 . , - 1 . , - 1 . ] ] , dtype = float32)""" | if len ( shape ) == 1 and isinstance ( shape [ 0 ] , ( list , tuple ) ) :
shape = shape [ 0 ]
elif not shape :
shape = kwargs . get ( 'shape' )
assert shape , "Shape must be provided."
if not all ( k in [ 'shape' , 'reverse' ] for k in kwargs ) :
raise TypeError ( "Got unknown keywords in reshape: {}. " "Accepted keyword arguments are 'shape' and 'reverse'." . format ( ', ' . join ( [ k for k in kwargs if k not in [ 'shape' , 'reverse' ] ] ) ) )
reverse = kwargs . get ( 'reverse' , False )
handle = NDArrayHandle ( )
# Actual reshape
check_call ( _LIB . MXNDArrayReshape64 ( self . handle , len ( shape ) , c_array ( ctypes . c_int64 , shape ) , reverse , ctypes . byref ( handle ) ) )
return NDArray ( handle = handle , writable = self . writable ) |
def __set_checksum ( self ) :
"""Sets the checksum on the last byte of buffer ,
based on values in the buffer
: return : None""" | checksum = self . __get_checksum ( self . __out_buffer . raw )
self . STRUCT_CHECKSUM . pack_into ( self . __out_buffer , self . OFFSET_CHECKSUM , checksum ) |
def _check_reads_hit ( self , alignment_io , min_aligned_fraction ) :
'''Given an alignment return a list of sequence names that are less
than the min _ aligned _ fraction''' | to_return = [ ]
alignment_length = None
for s in SeqIO . parse ( alignment_io , "fasta" ) :
if not alignment_length :
alignment_length = len ( s . seq )
min_length = int ( min_aligned_fraction * alignment_length )
logging . debug ( "Determined min number of aligned bases to be %s" % min_length )
elif len ( s . seq ) != alignment_length :
raise Exception ( "Alignment file appears to not be of uniform length" )
num_unaligned = s . seq . count ( '-' )
num_aligned = alignment_length - num_unaligned
logging . debug ( "Sequence %s has %d aligned positions" % ( s . name , alignment_length - num_unaligned ) )
if num_aligned <= min_length :
to_return . append ( s . name )
return to_return |
def plotprofMulti ( self , ini , end , delta , what_specie , xlim1 , xlim2 , ylim1 , ylim2 , symbol = None ) :
'''create a movie with mass fractions vs mass coordinate between
xlim1 and xlim2 , ylim1 and ylim2 . Only works with instances of
se .
Parameters
ini : integer
Initial model i . e . cycle .
end : integer
Final model i . e . cycle .
delta : integer
Sparsity factor of the frames .
what _ specie : list
Array with species in the plot .
xlim1 , xlim2 : integer or float
Mass coordinate range .
ylim1 , ylim2 : integer or float
Mass fraction coordinate range .
symbol : list , optional
Array indicating which symbol you want to use . Must be of
the same len of what _ specie array . The default is None .''' | plotType = self . _classTest ( )
if plotType == 'se' :
for i in range ( ini , end + 1 , delta ) :
step = int ( i )
# print step
if symbol == None :
symbol_dummy = '-'
for j in range ( len ( what_specie ) ) :
self . plot_prof_1 ( step , what_specie [ j ] , xlim1 , xlim2 , ylim1 , ylim2 , symbol_dummy )
else :
for j in range ( len ( what_specie ) ) :
symbol_dummy = symbol [ j ]
self . plot_prof_1 ( step , what_specie [ j ] , xlim1 , xlim2 , ylim1 , ylim2 , symbol_dummy )
filename = str ( '%03d' % step ) + '_test.png'
pl . savefig ( filename , dpi = 400 )
print ( 'wrote file ' , filename )
pl . clf ( )
else :
print ( 'This method is not supported for ' + str ( self . __class__ ) )
return |
def report_events ( self , start_date , end_date , type = "system" ) :
"""Create a report for all client events or all system events .
Uses GET to / reports / events / { clients , system } interface
: Args :
* * start _ date * : ( datetime ) Start time for report generation
* * end _ date * : ( datetime ) End time for report generation
: Kwargs :
* * type * : ( str ) Type of event report to create . " system " or " clients "
: Returns : ( list ) List of events in the input range""" | start_str , end_str = self . _format_input_dates ( start_date , end_date )
params = { "start_date" : start_str , "end_date" : end_str }
endpoint = url . reports_events_clients if type == "clients" else url . reports_events_system
response = self . _get ( endpoint , params = params )
self . _check_response ( response , 200 )
return self . _create_response ( response ) . get ( "events" ) |
def validateMasterOption ( master ) :
"""Validate master ( - m , - - master ) command line option .
Checks that option is a string of the ' hostname : port ' form , otherwise
raises an UsageError exception .
@ type master : string
@ param master : master option
@ raise usage . UsageError : on invalid master option""" | try :
hostname , port = master . split ( ":" )
port = int ( port )
except ( TypeError , ValueError ) :
raise usage . UsageError ( "master must have the form 'hostname:port'" ) |
def post ( self , uri , data , ** kwargs ) :
"""POST the provided data to the specified path
See : meth : ` request ` for additional details . The ` data ` parameter here is
expected to be a string type .""" | return self . request ( "POST" , uri , data = data , ** kwargs ) |
def output ( self , result ) :
"""Adapts the result of a function based on the returns definition .""" | if self . returns :
errors = None
try :
return self . _adapt_result ( result )
except AdaptErrors as e :
errors = e . errors
except AdaptError as e :
errors = [ e ]
raise AnticipateErrors ( message = 'Return value %r does not match anticipated type %r' % ( type ( result ) , self . returns ) , errors = errors )
elif self . strict :
if result is not None :
raise AnticipateErrors ( message = 'Return value %r does not match anticipated value ' 'of None' % type ( result ) , errors = None )
return None
else :
return result |
def relative_bias ( fm , scale_factor = 1 , estimator = None ) :
"""Computes the relative bias , i . e . the distribution of saccade angles
and amplitudes .
Parameters :
fm : DataMat
The fixation data to use
scale _ factor : double
Returns :
2D probability distribution of saccade angles and amplitudes .""" | assert 'fix' in fm . fieldnames ( ) , "Can not work without fixation numbers"
excl = fm . fix - np . roll ( fm . fix , 1 ) != 1
# Now calculate the direction where the NEXT fixation goes to
diff_x = ( np . roll ( fm . x , 1 ) - fm . x ) [ ~ excl ]
diff_y = ( np . roll ( fm . y , 1 ) - fm . y ) [ ~ excl ]
# Make a histogram of diff values
# this specifies left edges of the histogram bins , i . e . fixations between
# ]0 binedge [ 0 ] ] are included . - - > fixations are ceiled
ylim = np . round ( scale_factor * fm . image_size [ 0 ] )
xlim = np . round ( scale_factor * fm . image_size [ 1 ] )
x_steps = np . ceil ( 2 * xlim ) + 1
if x_steps % 2 != 0 :
x_steps += 1
y_steps = np . ceil ( 2 * ylim ) + 1
if y_steps % 2 != 0 :
y_steps += 1
e_x = np . linspace ( - xlim , xlim , x_steps )
e_y = np . linspace ( - ylim , ylim , y_steps )
# e _ y = np . arange ( - ylim , ylim + 1)
# e _ x = np . arange ( - xlim , xlim + 1)
samples = np . array ( list ( zip ( ( scale_factor * diff_y ) , ( scale_factor * diff_x ) ) ) )
if estimator == None :
( hist , _ ) = np . histogramdd ( samples , ( e_y , e_x ) )
else :
hist = estimator ( samples , e_y , e_x )
return hist |
def set_cellpydata ( self , cellpydata , cycle ) :
"""Performing fit of the OCV steps in the cycles set by set _ cycles ( )
from the data set by set _ data ( )
r is found by calculating v0 / i _ start - - > err ( r ) = err ( v0 ) + err ( i _ start ) .
c is found from using tau / r - - > err ( c ) = err ( r ) + err ( tau )
Args :
cellpydata ( CellpyData ) : data object from cellreader
cycle ( int ) : cycle number to get from CellpyData object
Returns :
None""" | self . data = cellpydata
self . step_table = self . data . dataset
# hope it works . . .
time_voltage = self . data . get_ocv ( direction = 'up' , cycles = cycle )
time = time_voltage . Step_Time
voltage = time_voltage . Voltage
self . time = np . array ( time )
self . voltage = np . array ( voltage ) |
def points ( self ) :
"""The center of each filled cell as a list of points .
Returns
points : ( self . filled , 3 ) float , list of points""" | points = matrix_to_points ( matrix = self . matrix , pitch = self . pitch , origin = self . origin )
return points |
def notify_command ( command_format , mounter ) :
"""Command notification tool .
This works similar to Notify , but will issue command instead of showing
the notifications on the desktop . This can then be used to react to events
from shell scripts .
The command can contain modern pythonic format placeholders like :
{ device _ file } . The following placeholders are supported :
event , device _ file , device _ id , device _ size , drive , drive _ label , id _ label ,
id _ type , id _ usage , id _ uuid , mount _ path , root
: param str command _ format : command to run when an event occurs .
: param mounter : Mounter object""" | udisks = mounter . udisks
for event in [ 'device_mounted' , 'device_unmounted' , 'device_locked' , 'device_unlocked' , 'device_added' , 'device_removed' , 'job_failed' ] :
udisks . connect ( event , run_bg ( DeviceCommand ( command_format , event = event ) ) ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.