signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def _run_prospector_on ( filenames , tools , disabled_linters , show_lint_files , ignore_codes = None ) :
"""Run prospector on filename , using the specified tools .
This function enables us to run different tools on different
classes of files , which is necessary in the case of tests ."""
|
from prospector . run import Prospector , ProspectorConfig
assert tools
tools = list ( set ( tools ) - set ( disabled_linters ) )
return_dict = dict ( )
ignore_codes = ignore_codes or list ( )
# Early return if all tools were filtered out
if not tools :
return return_dict
# pylint doesn ' t like absolute paths , so convert to relative .
all_argv = ( [ "-F" , "-D" , "-M" , "--no-autodetect" , "-s" , "veryhigh" ] + ( "-t " + " -t " . join ( tools ) ) . split ( " " ) )
for filename in filenames :
_debug_linter_status ( "prospector" , filename , show_lint_files )
with _custom_argv ( all_argv + [ os . path . relpath ( f ) for f in filenames ] ) :
prospector = Prospector ( ProspectorConfig ( ) )
prospector . execute ( )
messages = prospector . get_messages ( ) or list ( )
for message in messages :
message . to_absolute_path ( os . getcwd ( ) )
loc = message . location
code = message . code
if code in ignore_codes :
continue
key = _Key ( loc . path , loc . line , code )
return_dict [ key ] = message
return return_dict
|
def validate_matrix ( self , data ) :
"""Validates matrix data and creates the config objects"""
|
is_grid_search = ( data . get ( 'grid_search' ) is not None or ( data . get ( 'grid_search' ) is None and data . get ( 'random_search' ) is None and data . get ( 'hyperband' ) is None and data . get ( 'bo' ) is None ) )
is_bo = data . get ( 'bo' ) is not None
validate_matrix ( data . get ( 'matrix' ) , is_grid_search = is_grid_search , is_bo = is_bo )
|
def cov2corr ( cov ) :
"""Calculate the correlation matrix based on a
covariance matrix
Parameters
cov : 2D array
Returns
corr : 2D array
correlation converted from the covarince matrix"""
|
assert cov . ndim == 2 , 'covariance matrix should be 2D array'
inv_sd = 1 / np . sqrt ( np . diag ( cov ) )
corr = cov * inv_sd [ None , : ] * inv_sd [ : , None ]
return corr
|
def setAsSent ( self , prekeyIds ) :
""": param preKeyIds :
: type preKeyIds : list
: return :
: rtype :"""
|
for prekeyId in prekeyIds :
q = "UPDATE prekeys SET sent_to_server = ? WHERE prekey_id = ?"
cursor = self . dbConn . cursor ( )
cursor . execute ( q , ( 1 , prekeyId ) )
self . dbConn . commit ( )
|
def _get_state ( self ) :
"""Returns the VM state ( e . g . running , paused etc . )
: returns : state ( string )"""
|
result = yield from self . _execute ( "showvminfo" , [ self . _vmname , "--machinereadable" ] )
for info in result . splitlines ( ) :
if '=' in info :
name , value = info . split ( '=' , 1 )
if name == "VMState" :
return value . strip ( '"' )
return "unknown"
|
def current_rev_reg_id ( base_dir : str , cd_id : str ) -> str :
"""Return the current revocation registry identifier for
input credential definition identifier , in input directory .
Raise AbsentTails if no corresponding tails file , signifying no such revocation registry defined .
: param base _ dir : base directory for tails files , thereafter split by cred def id
: param cd _ id : credential definition identifier of interest
: return : identifier for current revocation registry on input credential definition identifier"""
|
tags = [ int ( rev_reg_id2tag ( basename ( f ) ) ) for f in Tails . links ( base_dir ) if cd_id in basename ( f ) ]
if not tags :
raise AbsentTails ( 'No tails files present for cred def id {}' . format ( cd_id ) )
return rev_reg_id ( cd_id , str ( max ( tags ) ) )
|
def create_payload ( self ) :
"""Rename the payload key " prior _ id " to " prior " .
For more information , see ` Bugzilla # 1238757
< https : / / bugzilla . redhat . com / show _ bug . cgi ? id = 1238757 > ` _ ."""
|
payload = super ( LifecycleEnvironment , self ) . create_payload ( )
if ( _get_version ( self . _server_config ) < Version ( '6.1' ) and 'prior_id' in payload ) :
payload [ 'prior' ] = payload . pop ( 'prior_id' )
return payload
|
def action ( arguments ) :
"""Given one more more sequence files , determine if the file is an alignment ,
the maximum sequence length and the total number of sequences . Provides
different output formats including tab ( tab - delimited ) , csv and align
( aligned as if part of a borderless table ) ."""
|
# Ignore SIGPIPE , for head support
common . exit_on_sigpipe ( )
common . exit_on_sigint ( )
handle = arguments . destination_file
output_format = arguments . output_format
if not output_format :
try :
output_format = 'align' if handle . isatty ( ) else 'tab'
except AttributeError :
output_format = 'tab'
writer_cls = _WRITERS [ output_format ]
ssf = partial ( summarize_sequence_file , file_type = arguments . input_format )
# if only one thread , do not use the multithreading so parent process
# can be terminated using ctrl + c
if arguments . threads > 1 :
pool = multiprocessing . Pool ( processes = arguments . threads )
summary = pool . imap ( ssf , arguments . source_files )
else :
summary = ( ssf ( f ) for f in arguments . source_files )
with handle :
writer = writer_cls ( arguments . source_files , summary , handle )
writer . write ( )
|
def _add_jitter ( self , vec ) :
""": param vec : array to jitter
: return : array , jittered version of arrays"""
|
if self . scatterchartdata . jitter == 0 or self . scatterchartdata . jitter is None :
return vec
return vec + np . random . rand ( 1 , len ( vec ) ) [ 0 ] * self . scatterchartdata . jitter
|
def _create_subscription_definition ( gg_client , group_type , config ) :
"""Configure routing subscriptions for a Greengrass group .
group _ type : either default or an overridden group type
config : GroupConfigFile object used for routing subscriptions"""
|
logging . info ( '[begin] Configuring routing subscriptions' )
sub_info = gg_client . create_subscription_definition ( Name = "{0}_routing" . format ( group_type . type_name ) )
logging . info ( 'Created subscription definition: {0}' . format ( sub_info ) )
subs = group_type . get_subscription_definition ( config = config )
if subs is None :
logging . warning ( "[end] No SubscriptionDefinition exists in GroupType:{0}" . format ( group_type . type_name ) )
return
subv = gg_client . create_subscription_definition_version ( SubscriptionDefinitionId = sub_info [ 'Id' ] , Subscriptions = subs )
sub_arn = subv [ 'Arn' ]
config [ 'subscription_def' ] = { "id" : sub_info [ 'Id' ] , "version_arn" : sub_arn }
logging . info ( '[end] Configured routing subscriptions' )
return sub_arn
|
def _image_loop ( self ) :
"""Retrieve an iterable of images either with , or without a progress bar ."""
|
if self . progress_bar and 'tqdm' in self . progress_bar . lower ( ) :
return tqdm ( self . imgs , desc = 'Saving PNGs as flat PDFs' , total = len ( self . imgs ) , unit = 'PDFs' )
else :
return self . imgs
|
def _getPFilename ( self , native , prompt ) :
"""Get p _ filename field for this parameter
Same as get for non - list params"""
|
return self . get ( native = native , prompt = prompt )
|
def make_sshable ( c ) :
"""Set up passwordless SSH keypair & authorized _ hosts access to localhost ."""
|
user = c . travis . sudo . user
home = "~{0}" . format ( user )
# Run sudo ( ) as the new sudo user ; means less chown ' ing , etc .
c . config . sudo . user = user
ssh_dir = "{0}/.ssh" . format ( home )
# TODO : worth wrapping in ' sh - c ' and using ' & & ' instead of doing this ?
for cmd in ( "mkdir {0}" , "chmod 0700 {0}" ) :
c . sudo ( cmd . format ( ssh_dir , user ) )
c . sudo ( 'ssh-keygen -f {0}/id_rsa -N ""' . format ( ssh_dir ) )
c . sudo ( "cp {0}/{{id_rsa.pub,authorized_keys}}" . format ( ssh_dir ) )
|
def to_native ( self , obj , name , value ) : # pylint : disable = unused - argument
"""Transform the MongoDB value into a Marrow Mongo value ."""
|
from marrow . mongo import Document
from marrow . mongo . trait import Derived
kind = self . _kind ( obj . __class__ )
if isinstance ( value , Document ) :
if __debug__ and kind and issubclass ( kind , Document ) and not isinstance ( value , kind ) :
raise ValueError ( "Not an instance of " + kind . __name__ + " or a sub-class: " + repr ( value ) )
return value
if isinstance ( kind , Field ) :
return kind . transformer . native ( value , ( kind , obj ) )
return ( kind or Derived ) . from_mongo ( value )
|
def _search ( self ) :
"""Returns all documents in the doc dict .
This function is not a part of the DocManager API , and is only used
to simulate searching all documents from a backend ."""
|
results = [ ]
for _id in self . doc_dict :
entry = self . doc_dict [ _id ]
if entry . doc is not None :
results . append ( entry . merged_dict )
return results
|
def deserialize ( cls , data ) :
"""Given some data from the queue , deserializes it into a ` ` Task ` `
instance .
The data must be similar in format to what comes from
` ` Task . serialize ` ` ( a JSON - serialized dictionary ) . Required keys are
` ` task _ id ` ` , ` ` retries ` ` & ` ` async ` ` .
: param data : A JSON - serialized string of the task data
: type data : string
: returns : A populated task
: rtype : A ` ` Task ` ` instance"""
|
data = json . loads ( data )
options = data . get ( 'options' , { } )
task = cls ( task_id = data [ 'task_id' ] , retries = data [ 'retries' ] , async = data [ 'async' ] )
func = import_attr ( data [ 'module' ] , data [ 'callable' ] )
task . to_call ( func , * data . get ( 'args' , [ ] ) , ** data . get ( 'kwargs' , { } ) )
if options . get ( 'on_start' ) :
task . on_start = import_attr ( options [ 'on_start' ] [ 'module' ] , options [ 'on_start' ] [ 'callable' ] )
if options . get ( 'on_success' ) :
task . on_success = import_attr ( options [ 'on_success' ] [ 'module' ] , options [ 'on_success' ] [ 'callable' ] )
if options . get ( 'on_error' ) :
task . on_error = import_attr ( options [ 'on_error' ] [ 'module' ] , options [ 'on_error' ] [ 'callable' ] )
return task
|
def action_fluents ( self ) -> Dict [ str , PVariable ] :
'''Returns action - fluent pvariables .'''
|
return { str ( pvar ) : pvar for pvar in self . pvariables if pvar . is_action_fluent ( ) }
|
def basescript ( line ) :
'''> > > import pprint
> > > input _ line = ' { " level " : " warning " , " timestamp " : " 2018-02-07T06:37:00.297610Z " , " event " : " exited via keyboard interrupt " , " type " : " log " , " id " : " 20180207T063700_4d03fe800bd111e89ecb96000007bc65 " , " _ " : { " ln " : 58 , " file " : " / usr / local / lib / python2.7 / dist - packages / basescript / basescript . py " , " name " : " basescript . basescript " , " fn " : " start " } } '
> > > output _ line1 = basescript ( input _ line )
> > > pprint . pprint ( output _ line1)
{ ' data ' : { u ' _ ' : { u ' file ' : u ' / usr / local / lib / python2.7 / dist - packages / basescript / basescript . py ' ,
u ' fn ' : u ' start ' ,
u ' ln ' : 58,
u ' name ' : u ' basescript . basescript ' } ,
u ' event ' : u ' exited via keyboard interrupt ' ,
u ' id ' : u ' 20180207T063700_4d03fe800bd111e89ecb96000007bc65 ' ,
u ' level ' : u ' warning ' ,
u ' timestamp ' : u ' 2018-02-07T06:37:00.297610Z ' ,
u ' type ' : u ' log ' } ,
' event ' : u ' exited via keyboard interrupt ' ,
' id ' : u ' 20180207T063700_4d03fe800bd111e89ecb96000007bc65 ' ,
' level ' : u ' warning ' ,
' timestamp ' : u ' 2018-02-07T06:37:00.297610Z ' ,
' type ' : u ' log ' }'''
|
log = json . loads ( line )
return dict ( timestamp = log [ 'timestamp' ] , data = log , id = log [ 'id' ] , type = log [ 'type' ] , level = log [ 'level' ] , event = log [ 'event' ] )
|
def linkify_h_by_realms ( self , realms ) :
"""Link hosts with realms
: param realms : realms object to link with
: type realms : alignak . objects . realm . Realms
: return : None"""
|
default_realm = realms . get_default ( )
for host in self :
if not getattr ( host , 'realm' , None ) : # Applying default realm to an host
host . realm = default_realm . uuid if default_realm else ''
host . realm_name = default_realm . get_name ( ) if default_realm else ''
host . got_default_realm = True
if host . realm not in realms :
realm = realms . find_by_name ( host . realm )
if not realm :
continue
host . realm = realm . uuid
else :
realm = realms [ host . realm ]
|
def delete_attr ( self , name : str , axis : int = 0 ) -> None :
"""* * DEPRECATED * * - Use ` del ds . ra . key ` or ` del ds . ca . key ` instead , where ` key ` is replaced with the attribute name"""
|
deprecated ( "'delete_attr' is deprecated. Use 'del ds.ra.key' or 'del ds.ca.key' instead" )
if axis == 0 :
del self . ra [ name ]
else :
del self . ca [ name ]
|
def to_bytes ( self ) :
"""Convert to bytes .
: rtype : bytes"""
|
return struct . pack ( "!IIIIHHbb" , self . width , self . height , self . x_offset , self . y_offset , self . delay , self . delay_den , self . depose_op , self . blend_op )
|
def _call_api ( self , url , method = 'GET' , params = None , data = None ) :
"""Method used to call the API .
It returns the raw JSON returned by the API or raises an exception
if something goes wrong .
: arg url : the URL to call
: kwarg method : the HTTP method to use when calling the specified
URL , can be GET , POST , DELETE , UPDATE . . .
Defaults to GET
: kwarg params : the params to specify to a GET request
: kwarg data : the data to send to a POST request"""
|
req = self . session . request ( method = method , url = url , params = params , headers = self . header , data = data , verify = not self . insecure , )
output = None
try :
output = req . json ( )
except Exception as err :
LOG . debug ( req . text )
# TODO : use a dedicated error class
raise Exception ( 'Error while decoding JSON: {0}' . format ( err ) )
if req . status_code != 200 :
LOG . error ( output )
if 'error_code' in output :
raise APIError ( output [ 'error' ] )
return output
|
def rgb ( self ) :
"""Same as raw ( ) but RGB values are scaled to 0-255"""
|
( red , green , blue ) = self . raw
return ( min ( int ( ( red * 255 ) / self . red_max ) , 255 ) , min ( int ( ( green * 255 ) / self . green_max ) , 255 ) , min ( int ( ( blue * 255 ) / self . blue_max ) , 255 ) )
|
def fill_borders ( self , * args ) :
"""Extrapolate tiepoint lons and lats to fill in the border of the
chunks ."""
|
to_run = [ ]
cases = { "y" : self . _fill_row_borders , "x" : self . _fill_col_borders }
for dim in args :
try :
to_run . append ( cases [ dim ] )
except KeyError :
raise NameError ( "Unrecognized dimension: " + str ( dim ) )
for fun in to_run :
fun ( )
|
def _call_parallel_target ( self , name , cdata , low ) :
'''The target function to call that will create the parallel thread / process'''
|
# we need to re - record start / end duration here because it is impossible to
# correctly calculate further down the chain
utc_start_time = datetime . datetime . utcnow ( )
tag = _gen_tag ( low )
try :
ret = self . states [ cdata [ 'full' ] ] ( * cdata [ 'args' ] , ** cdata [ 'kwargs' ] )
except Exception as exc :
log . debug ( 'An exception occurred in this state: %s' , exc , exc_info_on_loglevel = logging . DEBUG )
trb = traceback . format_exc ( )
ret = { 'result' : False , 'name' : name , 'changes' : { } , 'comment' : 'An exception occurred in this state: {0}' . format ( trb ) }
utc_finish_time = datetime . datetime . utcnow ( )
delta = ( utc_finish_time - utc_start_time )
# duration in milliseconds . microseconds
duration = ( delta . seconds * 1000000 + delta . microseconds ) / 1000.0
ret [ 'duration' ] = duration
troot = os . path . join ( self . opts [ 'cachedir' ] , self . jid )
tfile = os . path . join ( troot , salt . utils . hashutils . sha1_digest ( tag ) )
if not os . path . isdir ( troot ) :
try :
os . makedirs ( troot )
except OSError : # Looks like the directory was created between the check
# and the attempt , we are safe to pass
pass
with salt . utils . files . fopen ( tfile , 'wb+' ) as fp_ :
fp_ . write ( msgpack_serialize ( ret ) )
|
def queue ( self ) :
"""Get a queue of notifications
Use it with Python with"""
|
queue = NotificationQueue ( )
self . _listeners . add ( queue )
yield queue
self . _listeners . remove ( queue )
|
def _get_commands ( filename , class_name , language ) :
"""Generate the related compilation and
execution commands .
Parameters
: param filename : str
The used filename .
: param class _ name : str
The used class name .
: param language : { ' c ' , ' go ' , ' java ' , ' js ' , ' php ' , ' ruby ' }
The target programming language .
Returns
comp _ cmd , exec _ cmd : ( str , str )
The compilation and execution command ."""
|
cname = str ( class_name )
fname = str ( filename )
lang = str ( language )
# Compilation variants :
comp_vars = { # gcc brain . c - o brain
'c' : 'gcc {} -lm -o {}' . format ( fname , cname ) , # javac Brain . java
'java' : 'javac {}' . format ( fname ) , # go build - o brain brain . go
'go' : 'go build -o {} {}.go' . format ( cname , cname ) }
comp_cmd = comp_vars . get ( lang , None )
# Execution variants :
exec_vars = { # . / brain
'c' : os . path . join ( '.' , cname ) , # java - classpath . Brain
'java' : 'java -classpath . {}' . format ( cname ) , # node brain . js
'js' : 'node {}' . format ( fname ) , # php - f Brain . php
'php' : 'php -f {}' . format ( fname ) , # ruby brain . rb
'ruby' : 'ruby {}' . format ( fname ) , # . / brain
'go' : os . path . join ( '.' , cname ) , }
exec_cmd = exec_vars . get ( lang , None )
return comp_cmd , exec_cmd
|
def formatResults ( self , op , results ) :
"""This formats the results of the database operations for printing
back to the caller
@ param op : operation to perform ( add , remove , update , get )
@ type op : string
@ param results : results from db queries in perspective _ commandline
@ type results : list
@ returns : string containing formatted results"""
|
formatted_results = ""
if op == 'add' : # list , alternating ident , uid
formatted_results += "user(s) added:\n"
for user in results :
if isinstance ( user , str ) :
formatted_results += "identifier: %s\n" % user
else :
formatted_results += "uid: %d\n\n" % user
elif op == 'remove' : # list of dictionaries
formatted_results += "user(s) removed:\n"
for user in results :
if user :
formatted_results += "identifier: %s\n" % ( user )
elif op == 'update' : # list , alternating ident , None
formatted_results += "user(s) updated:\n"
for user in results :
if user :
formatted_results += "identifier: %s\n" % ( user )
elif op == 'get' : # list of dictionaries
formatted_results += "user(s) found:\n"
for user in results :
if user :
for key in sorted ( user . keys ( ) ) :
if key != 'bb_password' :
formatted_results += "%s: %s\n" % ( key , user [ key ] )
formatted_results += "\n"
else :
formatted_results += "no match found\n"
return formatted_results
|
def add_last_closed_file ( self , fname ) :
"""Add to last closed file list ."""
|
if fname in self . last_closed_files :
self . last_closed_files . remove ( fname )
self . last_closed_files . insert ( 0 , fname )
if len ( self . last_closed_files ) > 10 :
self . last_closed_files . pop ( - 1 )
|
def get_user_shakes ( self ) :
"""Get a list of Shake objects for the currently authenticated user .
Returns :
A list of Shake objects ."""
|
endpoint = '/api/shakes'
data = self . _make_request ( verb = "GET" , endpoint = endpoint )
shakes = [ Shake . NewFromJSON ( shk ) for shk in data [ 'shakes' ] ]
return shakes
|
def parse_definition_docstring ( obj , process_doc ) :
"""Gets swag data from docstring for class based definitions"""
|
doc_lines , swag = None , None
full_doc = None
swag_path = getattr ( obj , 'swag_path' , None )
swag_type = getattr ( obj , 'swag_type' , 'yml' )
if swag_path is not None :
full_doc = load_from_file ( swag_path , swag_type )
else :
full_doc = inspect . getdoc ( obj )
if full_doc :
if full_doc . startswith ( 'file:' ) :
if not hasattr ( obj , 'root_path' ) :
obj . root_path = get_root_path ( obj )
swag_path , swag_type = get_path_from_doc ( full_doc )
doc_filepath = os . path . join ( obj . root_path , swag_path )
full_doc = load_from_file ( doc_filepath , swag_type )
yaml_sep = full_doc . find ( '---' )
if yaml_sep != - 1 :
doc_lines = process_doc ( full_doc [ : yaml_sep - 1 ] )
swag = yaml . load ( full_doc [ yaml_sep : ] )
else :
doc_lines = process_doc ( full_doc )
return doc_lines , swag
|
def fit ( self , sequences , y = None ) :
"""Fit a PCCA lumping model using a sequence of cluster assignments .
Parameters
sequences : list ( np . ndarray ( dtype = ' int ' ) )
List of arrays of cluster assignments
y : None
Unused , present for sklearn compatibility only .
Returns
self"""
|
super ( PCCA , self ) . fit ( sequences , y = y )
self . _do_lumping ( )
return self
|
def import_file ( self , dataTypeList = None , defaultInteraction = None , delimiters = None , delimitersForDataList = None , afile = None , firstRowAsColumnNames = None , indexColumnSourceInteraction = None , indexColumnTargetInteraction = None , indexColumnTypeInteraction = None , NetworkViewRendererList = None , RootNetworkList = None , startLoadRow = None , TargetColumnList = None , verbose = False ) :
"""Import a new network from a tabular formatted file type ( e . g . csv , tsv ,
Excel , etc . ) . Use network load file to load network formatted files . This
command will create a new network collection if no current network collection
is selected , otherwise it will add the network to the current collection . The
SUIDs of the new networks and views are returned .
: param dataTypeList ( string , optional ) : List of column data types ordered
by column index ( e . g . " string , int , long , double , boolean , intlist " or
just " s , i , l , d , b , il " ) : ,
: param defaultInteraction ( string , optional ) : Used to set the default
interaction type to use when there is no interaction type column .
: param delimiters ( string , optional ) : Select the delimiters to use to
separate columns in the table , from the list ' , ' , ' ' , ' TAB ' , or ' ; ' .
TAB and ' , ' are used by default = [ ' , ' , ' ; ' , ' ' , ' \t ' ]
: param delimitersForDataList ( string , optional ) : Select the delimiters
to use to separate list entries in a list , from the list ' | ' , ' \' , ' / ' ,
or ' , ' . | is used by default = [ ' \ | ' , ' \' , ' / ' , ' , ' ]
: param afile ( string ) : The path to the file that contains the table or
network to be imported .
: param firstRowAsColumnNames ( string , optional ) : If this is true then
the first row should contain the names of the columns . Note that
startLoadRow must be set for this to work properly
: param indexColumnSourceInteraction ( string ) : The column index that
contains the source node identifiers .
: param indexColumnTargetInteraction ( string , optional ) : The column index
that contains the target node identifiers . If this is not specified
then the resulting network will have no edges
: param indexColumnTypeInteraction ( string , optional ) : The column index
that contains the interaction type . This is not required .
: param NetworkViewRendererList ( string , optional ) : Enter the network
view renderer that this network should use . This is only useful if
multiple renderers have been installed , which is rare . = [ ' ' ]
: param RootNetworkList ( string , optional ) : The name of the network
collection ( root network ) that the imported network should be part of .
A name of - - Create new network collection - - will result in the
creation of a new network collection for this import . = [ ' - - Create
new network collection - - ' , ' cy : command _ documentation _ generation ' ]
: param startLoadRow ( string , optional ) : The starting row of the import .
This is used to skip over comments and other non - data rows at the
beginning of the file .
: param TargetColumnList ( string , optional ) : Enter the name of the column
in the existing network collection ( root network ) that you want to
map your input identifiers to . = [ ' ' ]
: param verbose : print more
: returns : { SUIDs of the new networks and views }"""
|
PARAMS = set_param ( [ "dataTypeList" , "defaultInteraction" , "delimiters" , "delimitersForDataList" , "file" , "firstRowAsColumnNames" , "indexColumnSourceInteraction" , "indexColumnTargetInteraction" , "indexColumnTypeInteraction" , "NetworkViewRendererList" , "RootNetworkList" , "startLoadRow" , "TargetColumnList" ] , [ dataTypeList , defaultInteraction , delimiters , delimitersForDataList , afile , firstRowAsColumnNames , indexColumnSourceInteraction , indexColumnTargetInteraction , indexColumnTypeInteraction , NetworkViewRendererList , RootNetworkList , startLoadRow , TargetColumnList ] )
response = api ( url = self . __url + "/import file" , PARAMS = PARAMS , method = "POST" , verbose = verbose )
return response
|
def addAsn1MibSource ( self , * asn1Sources , ** kwargs ) :
"""Adds path to a repository to search ASN . 1 MIB files .
Parameters
* asn1Sources :
one or more URL in form of : py : obj : ` str ` identifying local or
remote ASN . 1 MIB repositories . Path must include the * @ mib @ *
component which will be replaced with MIB module name at the
time of search .
Returns
: : py : class : ` ~ pysnmp . smi . rfc1902 . ObjectType `
reference to itself
Notes
Please refer to : py : class : ` ~ pysmi . reader . localfile . FileReader ` ,
: py : class : ` ~ pysmi . reader . httpclient . HttpReader ` and
: py : class : ` ~ pysmi . reader . ftpclient . FtpReader ` classes for
in - depth information on ASN . 1 MIB lookup .
Examples
> > > ObjectType ( ObjectIdentity ( ' SNMPv2 - MIB ' , ' sysDescr ' ) ) . addAsn1Source ( ' http : / / mibs . snmplabs . com / asn1 / @ mib @ ' )
ObjectType ( ObjectIdentity ( ' SNMPv2 - MIB ' , ' sysDescr ' ) )"""
|
self . _args [ 0 ] . addAsn1MibSource ( * asn1Sources , ** kwargs )
return self
|
def write_packages ( self , reqs_file ) :
"""Dump the packages in the catalog in a requirements file"""
|
write_file_lines ( reqs_file , ( '{}\n' . format ( package ) for package in self . packages ) )
|
def get ( self , path_info ) :
"""Gets the checksum for the specified path info . Checksum will be
retrieved from the state database if available .
Args :
path _ info ( dict ) : path info to get the checksum for .
Returns :
str or None : checksum for the specified path info or None if it
doesn ' t exist in the state database ."""
|
assert path_info [ "scheme" ] == "local"
path = path_info [ "path" ]
if not os . path . exists ( path ) :
return None
actual_mtime , actual_size = get_mtime_and_size ( path )
actual_inode = get_inode ( path )
existing_record = self . get_state_record_for_inode ( actual_inode )
if not existing_record :
return None
mtime , size , checksum , _ = existing_record
if self . _file_metadata_changed ( actual_mtime , mtime , actual_size , size ) :
return None
self . _update_state_record_timestamp_for_inode ( actual_inode )
return checksum
|
def allpossibilities ( self ) :
"""Returns all possible outputtemplates that may occur ( recusrively applied )"""
|
l = [ ]
if isinstance ( self . then , ParameterCondition ) : # recursive parametercondition
l += self . then . allpossibilities ( )
elif self . then :
l . append ( self . then )
if self . otherwise :
if isinstance ( self . otherwise , ParameterCondition ) :
l += self . otherwise . allpossibilities ( )
else :
l . append ( self . otherwise )
return l
|
def delete ( self , cluster ) :
"""Deletes the cluster from persistent state .
: param cluster : cluster to delete from persistent state
: type cluster : : py : class : ` elasticluster . cluster . Cluster `"""
|
path = self . _get_cluster_storage_path ( cluster . name )
if os . path . exists ( path ) :
os . unlink ( path )
|
def upgradestep ( upgrade_product , version ) :
"""Decorator for updating the QuickInstaller of a upgrade"""
|
def wrap_func ( fn ) :
def wrap_func_args ( context , * args ) :
p = getToolByName ( context , 'portal_quickinstaller' ) . get ( upgrade_product )
setattr ( p , 'installedversion' , version )
return fn ( context , * args )
return wrap_func_args
return wrap_func
|
def read_pid_status ( pid = "self" ) :
"""Returns the system process sstatus .
: param pid : The process ID .
: returns : The system process status .
: rtype : dict"""
|
data = { }
with open ( "/proc/%s/status" % ( pid , ) , "rb" ) as status_file :
for row in status_file :
fields = row . split ( )
if fields and fields [ 0 ] in [ b"VmRSS:" , b"Threads:" , b"FDSize:" ] :
try :
data [ fields [ 0 ] . decode ( "ascii" ) [ : - 1 ] ] = int ( fields [ 1 ] )
except ValueError :
data [ fields [ 0 ] . decode ( "ascii" ) [ : - 1 ] ] = fields [ 1 ] . decode ( "ascii" )
return data
|
def move_dirty_lock_file ( dirty_lock_file , sm_path ) :
"""Move the dirt _ lock file to the sm _ path and thereby is not found by auto recovery of backup anymore"""
|
if dirty_lock_file is not None and not dirty_lock_file == os . path . join ( sm_path , dirty_lock_file . split ( os . sep ) [ - 1 ] ) :
logger . debug ( "Move dirty lock from root tmp folder {0} to state machine folder {1}" "" . format ( dirty_lock_file , os . path . join ( sm_path , dirty_lock_file . split ( os . sep ) [ - 1 ] ) ) )
os . rename ( dirty_lock_file , os . path . join ( sm_path , dirty_lock_file . split ( os . sep ) [ - 1 ] ) )
|
def is_log_format ( value ) :
u"""Check whether the value as argument be included the following list .
[ ' ltsv ' , ' combined ' ]"""
|
log_levels = [ 'ltsv' , 'combined' ]
if value in log_levels :
return value
else :
err_message = ( '"log_format" supported following value: ' '{0}' . format ( log_levels ) )
raise validate . VdtValueError ( err_message )
|
def _run_alg ( self , max_iter ) :
"""Run Algorithm
Run the update step of a given algorithm up to the maximum number of
iterations .
Parameters
max _ iter : int
Maximum number of iterations"""
|
if self . progress :
with ProgressBar ( redirect_stdout = True , max_value = max_iter ) as bar :
self . _iterations ( max_iter , bar = bar )
else :
self . _iterations ( max_iter )
|
def get_experiment_results ( ) :
"""Computes the results of all experiments , stores it in redis , and prints it
out"""
|
redis = oz . redis . create_connection ( )
for experiment in oz . bandit . get_experiments ( redis ) :
experiment . compute_default_choice ( )
csq , confident = experiment . confidence ( )
print ( "%s:" % experiment . name )
print ( "- creation date: %s" % experiment . metadata [ "creation_date" ] )
print ( "- default choice: %s" % experiment . default_choice )
print ( "- chi squared: %s" % csq )
print ( "- confident: %s" % confident )
print ( "- choices:" )
for choice in experiment . choices :
print ( " - %s: plays=%s, rewards=%s, performance=%s" % ( choice . name , choice . plays , choice . rewards , choice . performance ) )
|
def ignore_stops_before_now ( self ) :
"""Ignore any stops received before this point"""
|
self . _sentinel_stop = object ( )
self . _q . put ( self . _sentinel_stop )
|
def frozen_price ( self ) :
"""[ float ] 冻结价格"""
|
if np . isnan ( self . _frozen_price ) :
raise RuntimeError ( "Frozen price of order {} is not supposed to be nan." . format ( self . order_id ) )
return self . _frozen_price
|
def setShowGridColumns ( self , state ) :
"""Sets whether or not columns should be rendered when drawing the grid .
: param state | < bool >"""
|
delegate = self . itemDelegate ( )
if ( isinstance ( delegate , XTreeWidgetDelegate ) ) :
delegate . setShowGridColumns ( state )
|
def create ( self , obj , payload , async = False ) :
"""Function create
Create an new object
@ param obj : object name ( ' hosts ' , ' puppetclasses ' . . . )
@ param payload : the dict of the payload
@ param async : should this request be async , if true use
return . result ( ) to get the response
@ return RETURN : the server response"""
|
self . url = self . base_url + obj
self . method = 'POST'
self . payload = json . dumps ( payload )
if async :
self . method = 'POST(Async)'
session = FuturesSession ( )
self . resp = session . post ( url = self . url , auth = self . auth , headers = self . headers , data = self . payload , cert = self . ca_cert )
return self . resp
else :
self . resp = requests . post ( url = self . url , auth = self . auth , headers = self . headers , data = self . payload , cert = self . ca_cert )
return self . __process_resp__ ( obj )
|
def code_assist ( project , source_code , offset , resource = None , templates = None , maxfixes = 1 , later_locals = True ) :
"""Return python code completions as a list of ` CodeAssistProposal ` \ s
` resource ` is a ` rope . base . resources . Resource ` object . If
provided , relative imports are handled .
` maxfixes ` is the maximum number of errors to fix if the code has
errors in it .
If ` later _ locals ` is ` False ` names defined in this scope and after
this line is ignored ."""
|
if templates is not None :
warnings . warn ( 'Codeassist no longer supports templates' , DeprecationWarning , stacklevel = 2 )
assist = _PythonCodeAssist ( project , source_code , offset , resource = resource , maxfixes = maxfixes , later_locals = later_locals )
return assist ( )
|
def get_magicc7_to_openscm_variable_mapping ( inverse = False ) :
"""Get the mappings from MAGICC7 to OpenSCM variables .
Parameters
inverse : bool
If True , return the inverse mappings i . e . OpenSCM to MAGICC7 mappings
Returns
dict
Dictionary of mappings"""
|
def get_openscm_replacement ( in_var ) :
if in_var . endswith ( "_INVERSE_EMIS" ) :
prefix = "Inverse Emissions"
elif in_var . endswith ( "_EMIS" ) :
prefix = "Emissions"
elif in_var . endswith ( "_CONC" ) :
prefix = "Atmospheric Concentrations"
elif in_var . endswith ( "_RF" ) :
prefix = "Radiative Forcing"
elif in_var . endswith ( "_OT" ) :
prefix = "Optical Thickness"
else :
raise ValueError ( "This shouldn't happen" )
variable = in_var . split ( "_" ) [ 0 ]
# I hate edge cases
if variable . endswith ( "EQ" ) :
variable = variable . replace ( "EQ" , " Equivalent" )
if "GHG" in variable :
variable = variable . replace ( "GHG" , "Greenhouse Gases" )
if "BIOMASSAER" in variable :
variable = variable . replace ( "BIOMASSAER" , "Aerosols|MAGICC AFOLU" )
if "CO2CH4N2O" in variable :
variable = variable . replace ( "CO2CH4N2O" , "CO2, CH4 and N2O" )
aggregate_indicators = { "KYOTO" : "Kyoto Gases" , "FGASSUM" : "F Gases" , "MHALOSUM" : "Montreal Protocol Halogen Gases" , }
for agg_indicator , long_name in aggregate_indicators . items ( ) :
if variable . startswith ( agg_indicator ) :
stripped_var = variable . replace ( agg_indicator , "" )
if stripped_var :
variable = DATA_HIERARCHY_SEPARATOR . join ( [ stripped_var , long_name ] )
else :
variable = long_name
edge_case_B = variable . upper ( ) in ( "HCFC141B" , "HCFC142B" )
if variable . endswith ( "I" ) :
variable = DATA_HIERARCHY_SEPARATOR . join ( [ variable [ : - 1 ] , "MAGICC Fossil and Industrial" ] )
elif variable . endswith ( "B" ) and not edge_case_B :
variable = DATA_HIERARCHY_SEPARATOR . join ( [ variable [ : - 1 ] , "MAGICC AFOLU" ] )
case_adjustments = { "SOX" : "SOx" , "NOX" : "NOx" , "HFC134A" : "HFC134a" , "HFC143A" : "HFC143a" , "HFC152A" : "HFC152a" , "HFC227EA" : "HFC227ea" , "HFC236FA" : "HFC236fa" , "HFC245FA" : "HFC245fa" , "HFC365MFC" : "HFC365mfc" , "HCFC141B" : "HCFC141b" , "HCFC142B" : "HCFC142b" , "CH3CCL3" : "CH3CCl3" , "CCL4" : "CCl4" , "CH3CL" : "CH3Cl" , "CH2CL2" : "CH2Cl2" , "CHCL3" : "CHCl3" , "CH3BR" : "CH3Br" , "HALON1211" : "Halon1211" , "HALON1301" : "Halon1301" , "HALON2402" : "Halon2402" , "HALON1202" : "Halon1202" , "SOLAR" : "Solar" , "VOLCANIC" : "Volcanic" , "EXTRA" : "Extra" , }
variable = apply_string_substitutions ( variable , case_adjustments )
return DATA_HIERARCHY_SEPARATOR . join ( [ prefix , variable ] )
magicc7_suffixes = [ "_EMIS" , "_CONC" , "_RF" , "_OT" , "_INVERSE_EMIS" ]
magicc7_base_vars = MAGICC7_EMISSIONS_UNITS . magicc_variable . tolist ( ) + [ "SOLAR" , "VOLCANIC" , "CO2EQ" , "KYOTOCO2EQ" , "FGASSUMHFC134AEQ" , "MHALOSUMCFC12EQ" , "GHG" , "KYOTOGHG" , "FGASSUM" , "MHALOSUM" , "BIOMASSAER" , "CO2CH4N2O" , "EXTRA" , ]
magicc7_vars = [ base_var + suffix for base_var in magicc7_base_vars for suffix in magicc7_suffixes ]
replacements = { m7v : get_openscm_replacement ( m7v ) for m7v in magicc7_vars }
replacements . update ( { "SURFACE_TEMP" : "Surface Temperature" , "TOTAL_INCLVOLCANIC_RF" : "Radiative Forcing" , "VOLCANIC_ANNUAL_RF" : "Radiative Forcing|Volcanic" , "TOTAL_ANTHRO_RF" : "Radiative Forcing|Anthropogenic" , "TOTAER_DIR_RF" : "Radiative Forcing|Aerosols|Direct Effect" , "CLOUD_TOT_RF" : "Radiative Forcing|Aerosols|Indirect Effect" , "MINERALDUST_RF" : "Radiative Forcing|Mineral Dust" , "STRATOZ_RF" : "Radiative Forcing|Stratospheric Ozone" , "TROPOZ_RF" : "Radiative Forcing|Tropospheric Ozone" , "CH4OXSTRATH2O_RF" : "Radiative Forcing|CH4 Oxidation Stratospheric H2O" , # what is this
"LANDUSE_RF" : "Radiative Forcing|Land-use Change" , "BCSNOW_RF" : "Radiative Forcing|Black Carbon on Snow" , "CO2PF_EMIS" : "Land to Air Flux|CO2|MAGICC Permafrost" , # " CH4PF _ EMIS " : " Land to Air Flux | CH4 | MAGICC Permafrost " , # TODO : test and then add when needed
} )
agg_ocean_heat_top = "Aggregated Ocean Heat Content"
heat_content_aggreg_depths = { "HEATCONTENT_AGGREG_DEPTH{}" . format ( i ) : "{}{}Depth {}" . format ( agg_ocean_heat_top , DATA_HIERARCHY_SEPARATOR , i ) for i in range ( 1 , 4 ) }
replacements . update ( heat_content_aggreg_depths )
replacements . update ( { "HEATCONTENT_AGGREG_TOTAL" : agg_ocean_heat_top } )
ocean_temp_layer = { "OCEAN_TEMP_LAYER_{0:03d}" . format ( i ) : "Ocean Temperature{}Layer {}" . format ( DATA_HIERARCHY_SEPARATOR , i ) for i in range ( 1 , 999 ) }
replacements . update ( ocean_temp_layer )
if inverse :
return { v : k for k , v in replacements . items ( ) }
else :
return replacements
|
def indentation_step ( node ) :
"""Dirty little trick to get the difference between each indentation level
Implemented by finding the shortest indentation string
( technically , the " least " of all of the indentation strings , but
tabs and spaces mixed won ' t get this far , so those are synonymous . )"""
|
r = find_root ( node )
# Collect all indentations into one set .
all_indents = set ( i . value for i in r . pre_order ( ) if i . type == token . INDENT )
if not all_indents : # nothing is indented anywhere , so we get to pick what we want
return u" "
# four spaces is a popular convention
else :
return min ( all_indents )
|
def run ( self , host : str = '0.0.0.0' , port : int = 8080 ) :
"""Start sirbot
Configure sirbot and start the aiohttp . web . Application
Args :
host ( str ) : host
port ( int ) : port"""
|
self . _loop . run_until_complete ( self . _configure_plugins ( ) )
web . run_app ( self . _app , host = host , port = port )
|
def build_conda_packages ( self ) :
"""Run the Linux build and use converter to build OSX"""
|
# # check if update is necessary
# if self . nversion = = self . pversion :
# raise SystemExit ( " Exited : new version = = existing version " )
# # tmp dir
bldir = "./tmp-bld"
if not os . path . exists ( bldir ) :
os . makedirs ( bldir )
# # iterate over builds
for pybuild in [ "2.7" , "3" ] : # # build and upload Linux to anaconda . org
build = api . build ( "conda-recipe/{}" . format ( self . package ) , python = pybuild )
# # upload Linux build
if not self . deploy :
cmd = [ "anaconda" , "upload" , build [ 0 ] , "--label" , "test" , "--force" ]
else :
cmd = [ "anaconda" , "upload" , build [ 0 ] ]
err = subprocess . Popen ( cmd ) . communicate ( )
# # build OSX copies
api . convert ( build [ 0 ] , output_dir = bldir , platforms = [ "osx-64" ] )
osxdir = os . path . join ( bldir , "osx-64" , os . path . basename ( build [ 0 ] ) )
if not self . deploy :
cmd = [ "anaconda" , "upload" , osxdir , "--label" , "test" , "--force" ]
else :
cmd = [ "anaconda" , "upload" , osxdir ]
err = subprocess . Popen ( cmd ) . communicate ( )
# # cleanup tmpdir
shutil . rmtree ( bldir )
|
def split_in_columns ( filterform , fields_per_column = None ) :
'''Return iterator that yields a column ( iterator too ) .
By default , flat field list is divided in columns with
fields _ per _ column elements in each ( fields _ per _ column is a
class attribute ) .'''
|
nfields = len ( filterform . fields )
if fields_per_column is None :
fields_per_column = filterform . fields_per_column
ncolumns , tail = divmod ( nfields , fields_per_column )
if tail > 0 :
ncolumns += 1
itr = iter ( filterform )
for _i in range ( ncolumns ) :
yield itertools . islice ( itr , fields_per_column )
|
def receive_information_confirmation ( self , message ) :
"""A InformationConfirmation is received .
If : meth : ` the api version is supported
< AYABInterface . communication . Communication . api _ version _ is _ supported > ` ,
the communication object transitions into a
: class : ` InitializingMachine ` , if unsupported , into a
: class : ` UnsupportedApiVersion `"""
|
if message . api_version_is_supported ( ) :
self . _next ( InitializingMachine )
else :
self . _next ( UnsupportedApiVersion )
self . _communication . controller = message
|
def get_categorical_features_to_sampling ( examples , top_k ) :
"""Returns categorical features and a sampling of their most - common values .
The results of this slow function are used by the visualization repeatedly ,
so the results are cached .
Args :
examples : Examples to read to get feature samples .
top _ k : Max number of samples to return per feature .
Returns :
A dict of feature _ name - > { ' samples ' : [ ' Married - civ - spouse ' ,
' Never - married ' , ' Divorced ' ] } .
There is one key for each categorical feature .
Currently , the inner dict just has one key , but this structure leaves room
for further expansion , and mirrors the structure used by
` get _ numeric _ features _ to _ observed _ range ` ."""
|
observed_features = collections . defaultdict ( list )
# name - > [ value , ]
for example in examples :
for feature_name in get_categorical_feature_names ( example ) :
original_feature = parse_original_feature_from_example ( example , feature_name )
observed_features [ feature_name ] . extend ( original_feature . original_value )
result = { }
for feature_name , feature_values in sorted ( iteritems ( observed_features ) ) :
samples = [ word for word , count in collections . Counter ( feature_values ) . most_common ( top_k ) if count > 1 ]
if samples :
result [ feature_name ] = { 'samples' : samples }
return result
|
def dict_to_obj ( cls , dict_ , columns , row_columns , tab = '' , key_on = None ) :
""": param dict _ : dict of dict or dict of list
: param columns : list of strings to label the columns on print out
: param row _ columns : list of columns in the actually data
: param tab : str of the tab to use before the row on printout
: param key _ on : str of the column to key each row on
: return : SeabornTable"""
|
if isinstance ( list ( dict_ . values ( ) ) [ 0 ] , dict ) :
row_columns = row_columns or columns or cls . _key_on_columns ( key_on , cls . _ordered_keys ( dict_ . values ( ) [ 0 ] ) )
column_index = cls . _create_column_index ( row_columns )
if key_on is None :
table = [ SeabornRow ( column_index , [ row [ c ] for c in row_columns ] ) for row in dict_ . values ( ) ]
else :
table = [ SeabornRow ( column_index , [ row . get ( c , c == key_on and key or None ) for c in row_columns ] ) for key , row in dict_ . items ( ) ]
elif isinstance ( list ( dict_ . values ( ) ) [ 0 ] , list ) :
row_columns = row_columns or columns or cls . _key_on_columns ( key_on , sorted ( dict_ . keys ( ) ) )
column_index = cls . _create_column_index ( row_columns )
if key_on is None :
table = [ SeabornRow ( column_index , [ dict_ [ c ] [ i ] for c in columns ] ) for i in range ( len ( dict_ [ columns [ 0 ] ] ) ) ]
else :
table = [ SeabornRow ( column_index , [ dict_ [ c ] [ i ] for c in columns ] ) for i in range ( len ( dict_ [ columns [ 0 ] ] ) ) ]
else :
row_columns = row_columns or columns or [ 'KEY' , 'VALUE' ]
column_index = cls . _create_column_index ( row_columns )
table = [ SeabornRow ( column_index , [ k , v ] ) for k , v in dict_ . items ( ) ]
return cls ( table , columns , row_columns , tab , key_on )
|
def add_name ( self , name_attr , space_attr , new_schema ) : # type : ( Text , Optional [ Text ] , NamedSchema ) - > Name
"""Add a new schema object to the name set .
@ arg name _ attr : name value read in schema
@ arg space _ attr : namespace value read in schema .
@ return : the Name that was just added ."""
|
to_add = Name ( name_attr , space_attr , self . default_namespace )
if to_add . fullname in VALID_TYPES :
fail_msg = '%s is a reserved type name.' % to_add . fullname
raise SchemaParseException ( fail_msg )
elif to_add . fullname in self . names :
fail_msg = 'The name "%s" is already in use.' % to_add . fullname
raise SchemaParseException ( fail_msg )
self . names [ to_add . fullname ] = new_schema
return to_add
|
def bk_black ( cls ) :
"Make the text background color black ."
|
wAttributes = cls . _get_text_attributes ( )
wAttributes &= ~ win32 . BACKGROUND_MASK
# wAttributes | = win32 . BACKGROUND _ BLACK
cls . _set_text_attributes ( wAttributes )
|
def readInstanceTag ( instanceID , tagName = "Name" , connection = None ) :
"""Load a tag from EC2
: param str instanceID : Instance ID to read the tag on
: param str tagName : Name of tag to load
: param connection : optional boto connection to use
: returns : the tag ' s value
: rtype : str"""
|
assert isinstance ( instanceID , basestring ) , ( "instanceID must be a string but is %r" % instanceID )
assert isinstance ( tagName , basestring ) , ( "tagName must be a string but is %r" % tagName )
if not connection : # Assume AWS credentials are in the environment or the instance is using an IAM role
connection = boto . ec2 . connect_to_region ( myRegion ( ) )
# Filter the tag values for our instance _ id
# http : / / docs . aws . amazon . com / AWSEC2 / latest / CommandLineReference / ApiReference - cmd - DescribeTags . html
tagData = connection . get_all_tags ( filters = { "resource-id" : instanceID , "key" : tagName } )
if tagData :
tagValue = tagData [ 0 ] . value
else :
raise RuntimeError , "%s: No such tag on %s" % ( tagName , instanceID )
return tagValue
|
def send ( self , timeout = None ) :
"""Returns an event or None if no events occur before timeout ."""
|
if self . sigint_event and is_main_thread ( ) :
with ReplacedSigIntHandler ( self . sigint_handler ) :
return self . _send ( timeout )
else :
return self . _send ( timeout )
|
def set ( self , data_type , values ) :
"""Set the attribute value .
Args : :
data _ type : attribute data type ( see constants HC . xxx )
values : attribute value ( s ) ; specify a list to create
a multi - valued attribute ; a string valued
attribute can be created by setting ' data _ type '
to HC . CHAR8 and ' values ' to the corresponding
string
If the attribute already exists , it will be
updated . However , it is illegal to try to change
its data type or its order ( number of values ) .
Returns : :
None
C library equivalent : VSsetattr"""
|
try :
n_values = len ( values )
except :
values = [ values ]
n_values = 1
if data_type == HC . CHAR8 :
buf = _C . array_byte ( n_values )
# Allow values to be passed as a string .
# Noop if a list is passed .
values = list ( values )
for n in range ( n_values ) :
if not isinstance ( values [ n ] , int ) :
values [ n ] = ord ( values [ n ] )
elif data_type in [ HC . UCHAR8 , HC . UINT8 ] :
buf = _C . array_byte ( n_values )
elif data_type == HC . INT8 : # SWIG refuses negative values here . We found that if we
# pass them as byte values , it will work .
buf = _C . array_int8 ( n_values )
values = list ( values )
for n in range ( n_values ) :
v = values [ n ]
if v >= 0 :
v &= 0x7f
else :
v = abs ( v ) & 0x7f
if v :
v = 256 - v
else :
v = 128
# -128 in 2s complement
values [ n ] = v
elif data_type == HC . INT16 :
buf = _C . array_int16 ( n_values )
elif data_type == HC . UINT16 :
buf = _C . array_uint16 ( n_values )
elif data_type == HC . INT32 :
buf = _C . array_int32 ( n_values )
elif data_type == HC . UINT32 :
buf = _C . array_uint32 ( n_values )
elif data_type == HC . FLOAT32 :
buf = _C . array_float32 ( n_values )
elif data_type == HC . FLOAT64 :
buf = _C . array_float64 ( n_values )
else :
raise HDF4Error ( "set: illegal or unimplemented data_type" )
for n in range ( n_values ) :
buf [ n ] = values [ n ]
status = _C . VSsetattr ( self . _vd_inst . _id , self . _fIndex , self . _name , data_type , n_values , buf )
_checkErr ( 'attr' , status , 'cannot execute' )
# Update the attribute index
self . _index = _C . VSfindattr ( self . _vd_inst . _id , self . _fIndex , self . _name ) ;
if self . _index < 0 :
raise HDF4Error ( "set: error retrieving attribute index" )
|
def save_bed ( cls , query , filename = sys . stdout ) :
"""write a bed12 file of the query .
Parameters
query : query
a table or query to save to file
filename : file
string or filehandle to write output"""
|
out = _open ( filename , 'w' )
for o in query :
out . write ( o . bed ( ) + '\n' )
|
def view ( self , rec ) :
'''View the page .'''
|
kwd = { 'pager' : '' , }
self . render ( 'wiki_page/page_view.html' , postinfo = rec , kwd = kwd , author = rec . user_name , format_date = tools . format_date , userinfo = self . userinfo , cfg = CMS_CFG )
|
def _process_tensor_event ( self , event , thresholds ) :
"""Converts a TensorEvent into a dict that encapsulates information on it .
Args :
event : The TensorEvent to convert .
thresholds : An array of floats that ranges from 0 to 1 ( in that
direction and inclusive of 0 and 1 ) .
Returns :
A JSON - able dictionary of PR curve data for 1 step ."""
|
return self . _make_pr_entry ( event . step , event . wall_time , tensor_util . make_ndarray ( event . tensor_proto ) , thresholds )
|
def sils_cut ( T , f , c , d , h ) :
"""solve _ sils - - solve the lot sizing problem with cutting planes
- start with a relaxed model
- add cuts until there are no fractional setup variables
Parameters :
- T : number of periods
- P : set of products
- f [ t ] : set - up costs ( on period t )
- c [ t ] : variable costs
- d [ t ] : demand values
- h [ t ] : holding costs
Returns the final model solved , with all necessary cuts added ."""
|
Ts = range ( 1 , T + 1 )
model = sils ( T , f , c , d , h )
y , x , I = model . data
# relax integer variables
for t in Ts :
y [ t ] . vtype = "C"
# compute D [ i , j ] = sum _ { t = i } ^ j d [ t ]
D = { }
for t in Ts :
s = 0
for j in range ( t , T + 1 ) :
s += d [ j ]
D [ t , j ] = s
EPS = 1.e-6
cuts = True
while cuts :
model . optimize ( )
cuts = False
for ell in Ts :
lhs = 0
S , L = [ ] , [ ]
for t in range ( 1 , ell + 1 ) :
yt = model . getVal ( y [ t ] )
xt = model . getVal ( x [ t ] )
if D [ t , ell ] * yt < xt :
S . append ( t )
lhs += D [ t , ell ] * yt
else :
L . append ( t )
lhs += xt
if lhs < D [ 1 , ell ] : # add cutting plane constraint
model . addCons ( quicksum ( [ x [ t ] for t in L ] ) + quicksum ( D [ t , ell ] * y [ t ] for t in S ) >= D [ 1 , ell ] )
cuts = True
model . data = y , x , I
return model
|
def getFaxStatsCounters ( self ) :
"""Query Asterisk Manager Interface for Fax Stats .
CLI Command - fax show stats
@ return : Dictionary of fax stats ."""
|
if not self . hasFax ( ) :
return None
info_dict = { }
cmdresp = self . executeCommand ( 'fax show stats' )
ctxt = 'general'
for section in cmdresp . strip ( ) . split ( '\n\n' ) [ 1 : ] :
i = 0
for line in section . splitlines ( ) :
mobj = re . match ( '(\S.*\S)\s*:\s*(\d+)\s*$' , line )
if mobj :
if not info_dict . has_key ( ctxt ) :
info_dict [ ctxt ] = { }
info_dict [ ctxt ] [ mobj . group ( 1 ) . lower ( ) ] = int ( mobj . group ( 2 ) . lower ( ) )
elif i == 0 :
ctxt = line . strip ( ) . lower ( )
i += 1
return info_dict
|
def start ( self ) :
"""Initiate the download ."""
|
log . info ( "Sending tftp download request to %s" % self . host )
log . info ( " filename -> %s" % self . file_to_transfer )
log . info ( " options -> %s" % self . options )
self . metrics . start_time = time . time ( )
log . debug ( "Set metrics.start_time to %s" % self . metrics . start_time )
# FIXME : put this in a sendRRQ method ?
pkt = TftpPacketRRQ ( )
pkt . filename = self . file_to_transfer
pkt . mode = "octet"
# FIXME - shouldn ' t hardcode this
pkt . options = self . options
self . sock . sendto ( pkt . encode ( ) . buffer , ( self . host , self . port ) )
self . next_block = 1
self . last_pkt = pkt
self . state = TftpStateSentRRQ ( self )
while self . state :
try :
log . debug ( "State is %s" % self . state )
self . cycle ( )
except TftpTimeout as err :
log . error ( str ( err ) )
self . retry_count += 1
if self . retry_count >= TIMEOUT_RETRIES :
log . debug ( "hit max retries, giving up" )
raise
else :
log . warning ( "resending last packet" )
self . state . resendLast ( )
except TftpFileNotFoundError as err : # If we received file not found , then we should not save the open
# output file or we ' ll be left with a size zero file . Delete it ,
# if it exists .
log . error ( "Received File not found error" )
if self . fileobj is not None and not self . filelike_fileobj :
if os . path . exists ( self . fileobj . name ) :
log . debug ( "unlinking output file of %s" , self . fileobj . name )
os . unlink ( self . fileobj . name )
raise
|
def longitude ( value ) :
""": param value : input string
: returns : longitude float , rounded to 5 digits , i . e . 1 meter maximum
> > > longitude ( ' 0.123456 ' )
0.12346"""
|
lon = round ( float_ ( value ) , 5 )
if lon > 180. :
raise ValueError ( 'longitude %s > 180' % lon )
elif lon < - 180. :
raise ValueError ( 'longitude %s < -180' % lon )
return lon
|
def build_vars ( path = None ) :
"""Build initial vars ."""
|
init_vars = { "__name__" : "__main__" , "__package__" : None , "reload" : reload , }
if path is not None :
init_vars [ "__file__" ] = fixpath ( path )
# put reserved _ vars in for auto - completion purposes
for var in reserved_vars :
init_vars [ var ] = None
return init_vars
|
def get_user ( self , name ) :
"""Finds the user in this instance with the specified name , and
returns a CloudDatabaseUser object . If no match is found , a
NoSuchDatabaseUser exception is raised ."""
|
try :
return self . _user_manager . get ( name )
except exc . NotFound :
raise exc . NoSuchDatabaseUser ( "No user by the name '%s' exists." % name )
|
def _plot_figure ( self , idx , fig_format = 'json' ) :
"""Returns the figure in html format on the
first call and"""
|
self . plot . update ( idx )
if self . embed :
patch = self . renderer . diff ( self . plot , binary = False )
msg = serialize_json ( dict ( content = patch . content , root = self . plot . state . _id ) )
return msg
|
def get_app_key ( self ) :
"""If app _ key is not provided will look in environment
variables for username ."""
|
if self . app_key is None :
if os . environ . get ( self . username ) :
self . app_key = os . environ . get ( self . username )
else :
raise AppKeyError ( self . username )
|
def zforce ( self , R , z , phi = 0. , t = 0. ) :
"""NAME :
zforce
PURPOSE :
evaluate the vertical force F _ z ( R , z , t )
INPUT :
R - Cylindrical Galactocentric radius ( can be Quantity )
z - vertical height ( can be Quantity )
phi - azimuth ( optional ; can be Quantity )
t - time ( optional ; can be Quantity )
OUTPUT :
F _ z ( R , z , phi , t )
HISTORY :
2010-04-16 - Written - Bovy ( NYU )"""
|
return self . _zforce_nodecorator ( R , z , phi = phi , t = t )
|
def match ( self , models , results , relation ) :
"""Match the eagerly loaded results to their parents .
: type models : list
: type results : Collection
: type relation : str"""
|
return self . match_many ( models , results , relation )
|
def compute_jackson_cheby_coeff ( filter_bounds , delta_lambda , m ) :
r"""To compute the m + 1 coefficients of the polynomial approximation of an ideal band - pass between a and b , between a range of values defined by lambda _ min and lambda _ max .
Parameters
filter _ bounds : list
[ a , b ]
delta _ lambda : list
[ lambda _ min , lambda _ max ]
m : int
Returns
ch : ndarray
jch : ndarray
References
: cite : ` tremblay2016compressive `"""
|
# Parameters check
if delta_lambda [ 0 ] > filter_bounds [ 0 ] or delta_lambda [ 1 ] < filter_bounds [ 1 ] :
_logger . error ( "Bounds of the filter are out of the lambda values" )
raise ( )
elif delta_lambda [ 0 ] > delta_lambda [ 1 ] :
_logger . error ( "lambda_min is greater than lambda_max" )
raise ( )
# Scaling and translating to standard cheby interval
a1 = ( delta_lambda [ 1 ] - delta_lambda [ 0 ] ) / 2
a2 = ( delta_lambda [ 1 ] + delta_lambda [ 0 ] ) / 2
# Scaling bounds of the band pass according to lrange
filter_bounds [ 0 ] = ( filter_bounds [ 0 ] - a2 ) / a1
filter_bounds [ 1 ] = ( filter_bounds [ 1 ] - a2 ) / a1
# First compute cheby coeffs
ch = np . empty ( m + 1 , dtype = float )
ch [ 0 ] = ( 2 / ( np . pi ) ) * ( np . arccos ( filter_bounds [ 0 ] ) - np . arccos ( filter_bounds [ 1 ] ) )
for i in range ( 1 , len ( ch ) ) :
ch [ i ] = ( 2 / ( np . pi * i ) ) * ( np . sin ( i * np . arccos ( filter_bounds [ 0 ] ) ) - np . sin ( i * np . arccos ( filter_bounds [ 1 ] ) ) )
# Then compute jackson coeffs
jch = np . empty ( m + 1 , dtype = float )
alpha = ( np . pi / ( m + 2 ) )
for i in range ( len ( jch ) ) :
jch [ i ] = ( 1 / np . sin ( alpha ) ) * ( ( 1 - i / ( m + 2 ) ) * np . sin ( alpha ) * np . cos ( i * alpha ) + ( 1 / ( m + 2 ) ) * np . cos ( alpha ) * np . sin ( i * alpha ) )
# Combine jackson and cheby coeffs
jch = ch * jch
return ch , jch
|
def predict ( self , X ) :
"""Predict risk scores .
Parameters
X : array - like , shape = ( n _ samples , n _ features )
Data matrix .
Returns
risk _ score : array , shape = ( n _ samples , )
Predicted risk scores ."""
|
check_is_fitted ( self , 'estimators_' )
if X . shape [ 1 ] != self . n_features_ :
raise ValueError ( 'Dimensions of X are inconsistent with training data: ' 'expected %d features, but got %s' % ( self . n_features_ , X . shape [ 1 ] ) )
n_samples = X . shape [ 0 ]
Xi = numpy . column_stack ( ( numpy . ones ( n_samples ) , X ) )
pred = numpy . zeros ( n_samples , dtype = float )
for estimator in self . estimators_ :
pred += self . learning_rate * estimator . predict ( Xi )
if isinstance ( self . loss_ , ( CensoredSquaredLoss , IPCWLeastSquaresError ) ) :
numpy . exp ( pred , out = pred )
return pred
|
def db_putHex ( self , db_name , key , value ) :
"""https : / / github . com / ethereum / wiki / wiki / JSON - RPC # db _ puthex
DEPRECATED"""
|
warnings . warn ( 'deprecated' , DeprecationWarning )
if not value . startswith ( '0x' ) :
value = add_0x ( value )
return ( yield from self . rpc_call ( 'db_putHex' , [ db_name , key , value ] ) )
|
def friedrich_coefficients ( x , param ) :
"""Coefficients of polynomial : math : ` h ( x ) ` , which has been fitted to
the deterministic dynamics of Langevin model
. . math : :
\dot{x}(t) = h(x(t)) + \mathcal{N}(0,R)
as described by [ 1 ] .
For short time - series this method is highly dependent on the parameters .
. . rubric : : References
| [ 1 ] Friedrich et al . ( 2000 ) : Physics Letters A 271 , p . 217-222
| * Extracting model equations from experimental data *
: param x : the time series to calculate the feature of
: type x : numpy . ndarray
: param param : contains dictionaries { " m " : x , " r " : y , " coeff " : z } with x being positive integer , the order of polynom to fit for estimating fixed points of
dynamics , y positive float , the number of quantils to use for averaging and finally z , a positive integer corresponding to the returned
coefficient
: type param : list
: return : the different feature values
: return type : pandas . Series"""
|
calculated = { }
# calculated is dictionary storing the calculated coefficients { m : { r : friedrich _ coefficients } }
res = { }
# res is a dictionary containg the results { " m _ 10 _ _ r _ 2 _ _ coeff _ 3 " : 15.43}
for parameter_combination in param :
m = parameter_combination [ 'm' ]
r = parameter_combination [ 'r' ]
coeff = parameter_combination [ "coeff" ]
assert coeff >= 0 , "Coefficients must be positive or zero. Found {}" . format ( coeff )
# calculate the current friedrich coefficients if they do not exist yet
if m not in calculated :
calculated [ m ] = { r : _estimate_friedrich_coefficients ( x , m , r ) }
else :
if r not in calculated [ m ] :
calculated [ m ] = { r : _estimate_friedrich_coefficients ( x , m , r ) }
try :
res [ "m_{}__r_{}__coeff_{}" . format ( m , r , coeff ) ] = calculated [ m ] [ r ] [ coeff ]
except IndexError :
res [ "m_{}__r_{}__coeff_{}" . format ( m , r , coeff ) ] = np . NaN
return [ ( key , value ) for key , value in res . items ( ) ]
|
def d2BinaryRochedx2 ( r , D , q , F ) :
"""Computes second derivative of the potential with respect to x .
@ param r : relative radius vector ( 3 components )
@ param D : instantaneous separation
@ param q : mass ratio
@ param F : synchronicity parameter"""
|
return ( 2 * r [ 0 ] * r [ 0 ] - r [ 1 ] * r [ 1 ] - r [ 2 ] * r [ 2 ] ) / ( r [ 0 ] * r [ 0 ] + r [ 1 ] * r [ 1 ] + r [ 2 ] * r [ 2 ] ) ** 2.5 + q * ( 2 * ( r [ 0 ] - D ) * ( r [ 0 ] - D ) - r [ 1 ] * r [ 1 ] - r [ 2 ] * r [ 2 ] ) / ( ( r [ 0 ] - D ) * ( r [ 0 ] - D ) + r [ 1 ] * r [ 1 ] + r [ 2 ] * r [ 2 ] ) ** 2.5 + F * F * ( 1 + q )
|
def _parse_phone_and_hash ( self , phone , phone_hash ) :
"""Helper method to both parse and validate phone and its hash ."""
|
phone = utils . parse_phone ( phone ) or self . _phone
if not phone :
raise ValueError ( 'Please make sure to call send_code_request first.' )
phone_hash = phone_hash or self . _phone_code_hash . get ( phone , None )
if not phone_hash :
raise ValueError ( 'You also need to provide a phone_code_hash.' )
return phone , phone_hash
|
def comments ( self , case_id = None , variant_id = None , username = None ) :
"""Return comments for a case or variant .
Args :
case _ id ( str ) : id for a related case
variant _ id ( Optional [ str ] ) : id for a related variant"""
|
logger . debug ( "Looking for comments" )
comment_objs = self . query ( Comment )
if case_id :
comment_objs = comment_objs . filter_by ( case_id = case_id )
if variant_id :
comment_objs = comment_objs . filter_by ( variant_id = variant_id )
elif case_id :
comment_objs = comment_objs . filter_by ( variant_id = None )
return comment_objs
|
def json ( self , branch = 'master' , filename = '' ) :
"""Retrieve _ filename _ from GitLab .
Args :
branch ( str ) : Git Branch to find file .
filename ( str ) : Name of file to retrieve .
Returns :
dict : Decoded JSON .
Raises :
SystemExit : Invalid JSON provided ."""
|
file_contents = self . get ( branch = branch , filename = filename )
try :
json_dict = json . loads ( file_contents )
# TODO : Use json . JSONDecodeError when Python 3.4 has been deprecated
except ValueError as error :
msg = ( '"{filename}" appears to be invalid json. ' 'Please validate it with http://jsonlint.com. ' 'JSON decoder error:\n' '{error}' ) . format ( filename = filename , error = error )
raise SystemExit ( msg )
LOG . debug ( 'JSON object:\n%s' , json_dict )
return json_dict
|
def load_project ( cls , fname , auto_update = None , make_plot = True , draw = False , alternative_axes = None , main = False , encoding = None , enable_post = False , new_fig = True , clear = None , ** kwargs ) :
"""Load a project from a file or dict
This classmethod allows to load a project that has been stored using
the : meth : ` save _ project ` method and reads all the data and creates the
figures .
Since the data is stored in external files when saving a project ,
make sure that the data is accessible under the relative paths
as stored in the file ` fname ` or from the current working directory
if ` fname ` is a dictionary . Alternatively use the ` alternative _ paths `
parameter or the ` pwd ` parameter
Parameters
fname : str or dict
The string might be the path to a file created with the
: meth : ` save _ project ` method , or it might be a dictionary from this
method
% ( InteractiveBase . parameters . auto _ update ) s
% ( Project . _ add _ data . parameters . make _ plot ) s
% ( InteractiveBase . start _ update . parameters . draw ) s
alternative _ axes : dict , None or list
alternative axes instances to use
- If it is None , the axes and figures from the saving point will be
reproduced .
- a dictionary should map from array names in the created
project to matplotlib axes instances
- a list should contain axes instances that will be used for
iteration
main : bool , optional
If True , a new main project is created and returned .
Otherwise ( by default default ) the data is added to the current
main project .
encoding : str
The encoding to use for loading the project . If None , it is
automatically determined by pickle . Note : Set this to ` ` ' latin1 ' ` `
if using a project created with python2 on python3.
enable _ post : bool
If True , the : attr : ` ~ psyplot . plotter . Plotter . post ` formatoption is
enabled and post processing scripts are allowed . Do only set this
parameter to ` ` True ` ` if you know you can trust the information in
` fname `
new _ fig : bool
If True ( default ) and ` alternative _ axes ` is None , new figures are
created if the figure already exists
% ( Project . _ add _ data . parameters . clear ) s
pwd : str or None , optional
Path to the working directory from where the data can be imported .
If None and ` fname ` is the path to a file , ` pwd ` is set to the
directory of this file . Otherwise the current working directory is
used .
% ( ArrayList . from _ dict . parameters . no _ d | pwd ) s
Other Parameters
% ( ArrayList . from _ dict . parameters ) s
Returns
Project
The project in state of the saving point"""
|
from pkg_resources import iter_entry_points
def get_ax_base ( name , alternatives ) :
ax_base = next ( iter ( obj ( arr_name = name ) . axes ) , None )
if ax_base is None :
ax_base = next ( iter ( obj ( arr_name = alternatives ) . axes ) , None )
if ax_base is not None :
alternatives . difference_update ( obj ( ax = ax_base ) . arr_names )
return ax_base
pwd = kwargs . pop ( 'pwd' , None )
if isinstance ( fname , six . string_types ) :
with open ( fname , 'rb' ) as f :
pickle_kws = { } if not encoding else { 'encoding' : encoding }
d = pickle . load ( f , ** pickle_kws )
pwd = pwd or os . path . dirname ( fname )
else :
d = dict ( fname )
pwd = pwd or getcwd ( )
# check for patches of plugins
for ep in iter_entry_points ( 'psyplot' , name = 'patches' ) :
patches = ep . load ( )
for arr_d in d . get ( 'arrays' ) . values ( ) :
plotter_cls = arr_d . get ( 'plotter' , { } ) . get ( 'cls' )
if plotter_cls is not None and plotter_cls in patches : # apply the patch
patches [ plotter_cls ] ( arr_d [ 'plotter' ] , d . get ( 'versions' , { } ) )
fig_map = { }
if alternative_axes is None :
for fig_dict in six . itervalues ( d . get ( 'figs' , { } ) ) :
orig_num = fig_dict . get ( 'num' ) or 1
fig_map [ orig_num ] = _ProjectLoader . load_figure ( fig_dict , new_fig = new_fig ) . number
elif not isinstance ( alternative_axes , dict ) :
alternative_axes = cycle ( iter ( alternative_axes ) )
obj = cls . from_dict ( d [ 'arrays' ] , pwd = pwd , ** kwargs )
if main : # we create a new project with the project factory to make sure
# that everything is handled correctly
obj = project ( None , obj )
axes = { }
arr_names = obj . arr_names
sharex = defaultdict ( set )
sharey = defaultdict ( set )
for arr , ( arr_name , arr_dict ) in zip ( obj , filter ( lambda t : t [ 0 ] in arr_names , six . iteritems ( d [ 'arrays' ] ) ) ) :
if not arr_dict . get ( 'plotter' ) :
continue
plot_dict = arr_dict [ 'plotter' ]
plotter_cls = getattr ( import_module ( plot_dict [ 'cls' ] [ 0 ] ) , plot_dict [ 'cls' ] [ 1 ] )
ax = None
if alternative_axes is not None :
if isinstance ( alternative_axes , dict ) :
ax = alternative_axes . get ( arr . arr_name )
else :
ax = next ( alternative_axes , None )
if ax is None and 'ax' in plot_dict :
already_opened = plot_dict [ 'ax' ] . get ( 'shared' , set ( ) ) . intersection ( axes )
if already_opened :
ax = axes [ next ( iter ( already_opened ) ) ]
else :
plot_dict [ 'ax' ] . pop ( 'shared' , None )
plot_dict [ 'ax' ] [ 'fig' ] = fig_map [ plot_dict [ 'ax' ] . get ( 'fig' ) or 1 ]
if plot_dict [ 'ax' ] . get ( 'sharex' ) :
sharex [ plot_dict [ 'ax' ] . pop ( 'sharex' ) ] . add ( arr . psy . arr_name )
if plot_dict [ 'ax' ] . get ( 'sharey' ) :
sharey [ plot_dict [ 'ax' ] . pop ( 'sharey' ) ] . add ( arr . psy . arr_name )
axes [ arr . psy . arr_name ] = ax = _ProjectLoader . load_axes ( plot_dict [ 'ax' ] )
plotter_cls ( arr , make_plot = False , draw = False , clear = False , ax = ax , project = obj . main , enable_post = enable_post , ** plot_dict [ 'fmt' ] )
# handle shared x and y - axes
for key , names in sharex . items ( ) :
ax_base = get_ax_base ( key , names )
if ax_base is not None :
ax_base . get_shared_x_axes ( ) . join ( ax_base , * obj ( arr_name = names ) . axes )
for ax in obj ( arr_name = names ) . axes :
ax . _sharex = ax_base
for key , names in sharey . items ( ) :
ax_base = get_ax_base ( key , names )
if ax_base is not None :
ax_base . get_shared_y_axes ( ) . join ( ax_base , * obj ( arr_name = names ) . axes )
for ax in obj ( arr_name = names ) . axes :
ax . _sharey = ax_base
for arr in obj . with_plotter :
shared = d [ 'arrays' ] [ arr . psy . arr_name ] [ 'plotter' ] . get ( 'shared' , { } )
for key , arr_names in six . iteritems ( shared ) :
arr . psy . plotter . share ( obj ( arr_name = arr_names ) . plotters , keys = [ key ] )
if make_plot :
for plotter in obj . plotters :
plotter . reinit ( draw = False , clear = clear or ( clear is None and plotter_cls . _get_sample_projection ( ) is not None ) )
if draw is None :
draw = rcParams [ 'auto_draw' ]
if draw :
obj . draw ( )
if rcParams [ 'auto_show' ] :
obj . show ( )
if auto_update is None :
auto_update = rcParams [ 'lists.auto_update' ]
if not main :
obj . _main = gcp ( True )
obj . main . extend ( obj , new_name = True )
obj . no_auto_update = not auto_update
scp ( obj )
return obj
|
def load_images ( url , format = 'auto' , with_path = True , recursive = True , ignore_failure = True , random_order = False ) :
"""Loads images from a directory . JPEG and PNG images are supported .
Parameters
url : str
The string of the path where all the images are stored .
format : { ' PNG ' | ' JPG ' | ' auto ' } , optional
The format of the images in the directory . The default ' auto ' parameter
value tries to infer the image type from the file extension . If a
format is specified , all images must be of that format .
with _ path : bool , optional
Indicates whether a path column is added to the SFrame . If ' with _ path '
is set to True , the returned SFrame contains a ' path ' column , which
holds a path string for each Image object .
recursive : bool , optional
Indicates whether ' load _ images ' should do recursive directory traversal ,
or a flat directory traversal .
ignore _ failure : bool , optional
If true , prints warning for failed images and keep loading the rest of
the images .
random _ order : bool , optional
Load images in random order .
Returns
out : SFrame
Returns an SFrame with either an ' image ' column or both an ' image ' and
a ' path ' column . The ' image ' column is a column of Image objects . If
with _ path is True , there is also a ' path ' column which contains the image
path for each of each corresponding Image object .
Examples
> > > url = ' https : / / static . turi . com / datasets / images / nested '
> > > image _ sframe = turicreate . image _ analysis . load _ images ( url , " auto " , with _ path = False ,
. . . recursive = True )"""
|
from . . . import extensions as _extensions
from . . . util import _make_internal_url
return _extensions . load_images ( url , format , with_path , recursive , ignore_failure , random_order )
|
def _record_last_active ( self , host ) :
"""Put host first in our host list , so we try it first next time
The implementation of get _ active _ namenode relies on this reordering ."""
|
if host in self . hosts : # this check is for when user passes a host at request time
# Keep this thread safe : set hosts atomically and update it before the timestamp
self . hosts = [ host ] + [ h for h in self . hosts if h != host ]
self . _last_time_recorded_active = time . time ( )
|
def is_chinese ( name ) :
"""Check if a symbol is a Chinese character .
Note
Taken from http : / / stackoverflow . com / questions / 16441633 / python - 2-7 - test - if - characters - in - a - string - are - all - chinese - characters"""
|
if not name :
return False
for ch in name :
ordch = ord ( ch )
if not ( 0x3400 <= ordch <= 0x9fff ) and not ( 0x20000 <= ordch <= 0x2ceaf ) and not ( 0xf900 <= ordch <= ordch ) and not ( 0x2f800 <= ordch <= 0x2fa1f ) :
return False
return True
|
def Insert ( self , index , rdfpathspec = None , ** kwarg ) :
"""Insert a single component at index ."""
|
if rdfpathspec is None :
rdfpathspec = self . __class__ ( ** kwarg )
if index == 0 : # Copy ourselves to a temp copy .
nested_proto = self . __class__ ( )
nested_proto . SetRawData ( self . GetRawData ( ) )
# Replace ourselves with the new object .
self . SetRawData ( rdfpathspec . GetRawData ( ) )
# Append the temp copy to the end .
self . last . nested_path = nested_proto
else :
previous = self [ index - 1 ]
rdfpathspec . last . nested_path = previous . nested_path
previous . nested_path = rdfpathspec
|
def update_quota ( self , project_id , body = None ) :
"""Update a project ' s quotas ."""
|
return self . put ( self . quota_path % ( project_id ) , body = body )
|
def append ( self , name , value ) :
"""Appends the given value to the list variable with the given name .
: type name : string
: param name : The name of the variable .
: type value : object
: param value : The appended value ."""
|
if self . vars is None :
self . vars = { }
if name in self . vars :
self . vars [ name ] . append ( value )
else :
self . vars [ name ] = [ value ]
|
def build_request ( self , input_data = None , * args , ** kwargs ) :
"""Builds request
: param input _ data :
: param args :
: param kwargs :
: return :"""
|
if input_data is not None :
self . input_data = input_data
if self . input_data is None :
raise ValueError ( 'Input data is None' )
if self . uo is None :
raise ValueError ( 'UO is None' )
self . request = RequestHolder ( )
self . request . nonce = get_random_vector ( EBConsts . FRESHNESS_NONCE_LEN )
self . request . api_object = EBUtils . build_api_object ( self . uo )
self . request . endpoint = self . uo . resolve_endpoint ( )
self . request . configuration = self . configuration
self . request . api_method = EBConsts . REQUEST_PROCESS_DATA
# Build plaintext plain _ buffer
plain_buffer = to_bytes ( 31 , 1 ) + to_bytes ( self . uo . uo_id , 4 ) + to_bytes ( self . request . nonce , EBConsts . FRESHNESS_NONCE_LEN ) + to_bytes ( self . input_data )
plain_buffer = PKCS7 . pad ( plain_buffer )
# Encrypt - then - mac
ciphertext = aes_enc ( self . uo . enc_key , plain_buffer )
mac = cbc_mac ( self . uo . mac_key , ciphertext )
# Result request body
self . request . body = { "data" : "Packet0_%s_0000%s" % ( EBUtils . get_request_type ( self . uo ) , to_hex ( ciphertext + mac ) ) }
return self . request
|
def value ( self ) :
"""Return current value for the metric"""
|
if self . buffer :
return np . mean ( [ ep [ 'l' ] for ep in self . buffer ] )
else :
return 0
|
def magic_contract ( * args , ** kwargs ) :
"""Drop - in replacement for ` ` pycontracts . contract ` ` decorator , except that it supports locally - visible types
: param args : Arguments to pass to the ` ` contract ` ` decorator
: param kwargs : Keyword arguments to pass to the ` ` contract ` ` decorator
: return : The contracted function"""
|
def inner_decorator ( f ) :
for name , val in f . __globals__ . items ( ) :
if not name . startswith ( '_' ) and isinstance ( val , type ) :
safe_new_contract ( name , val )
return contract ( * args , ** kwargs ) ( f )
return inner_decorator
|
def registerStatsHandler ( app , serverName , prefix = '/status/' ) :
"""Register the stats handler with a Flask app , serving routes
with a given prefix . The prefix defaults to ' / status / ' , which is
generally what you want ."""
|
if prefix [ - 1 ] != '/' :
prefix += '/'
handler = functools . partial ( statsHandler , serverName )
app . add_url_rule ( prefix , 'statsHandler' , handler , methods = [ 'GET' ] )
app . add_url_rule ( prefix + '<path:path>' , 'statsHandler' , handler , methods = [ 'GET' ] )
|
def insert_file ( self , file ) :
"""insert _ file ( file )
Load resources entries from FILE , and insert them into the
database . FILE can be a filename ( a string ) or a file object ."""
|
if type ( file ) is bytes :
file = open ( file , 'r' )
self . insert_string ( file . read ( ) )
|
def _postback ( self ) :
"""Perform PayPal Postback validation ."""
|
return requests . post ( self . get_endpoint ( ) , data = b"cmd=_notify-validate&" + self . query . encode ( "ascii" ) ) . content
|
def wrap ( cls , value ) :
'''Some property types need to wrap their values in special containers , etc .'''
|
if isinstance ( value , dict ) :
if isinstance ( value , PropertyValueColumnData ) :
return value
else :
return PropertyValueColumnData ( value )
else :
return value
|
def all_announcements_view ( request ) :
'''The view of manager announcements .'''
|
page_name = "Archives - All Announcements"
userProfile = UserProfile . objects . get ( user = request . user )
announcement_form = None
manager_positions = Manager . objects . filter ( incumbent = userProfile )
if manager_positions :
announcement_form = AnnouncementForm ( request . POST if "post_announcement" in request . POST else None , profile = userProfile , )
if announcement_form . is_valid ( ) :
announcement_form . save ( )
return HttpResponseRedirect ( reverse ( 'managers:all_announcements' ) )
# A pseudo - dictionary , actually a list with items of form ( announcement ,
# announcement _ pin _ form )
announcements_dict = list ( )
for a in Announcement . objects . all ( ) :
pin_form = None
if a . manager . incumbent == userProfile or request . user . is_superuser :
pin_form = PinForm ( request . POST if "pin-{0}" . format ( a . pk ) in request . POST else None , instance = a , )
if pin_form . is_valid ( ) :
pin_form . save ( )
return HttpResponseRedirect ( reverse ( 'managers:all_announcements' ) )
announcements_dict . append ( ( a , pin_form ) )
return render_to_response ( 'announcements.html' , { 'page_name' : page_name , 'manager_positions' : manager_positions , 'announcements_dict' : announcements_dict , 'announcement_form' : announcement_form , } , context_instance = RequestContext ( request ) )
|
def BulkLabel ( label , hostnames , owner = None , token = None , client_index = None ) :
"""Assign a label to a group of clients based on hostname .
Sets a label as an identifier to a group of clients . Removes the label from
other clients .
This can be used to automate labeling clients based on externally derived
attributes , for example machines assigned to particular users , or machines
fulfilling particular roles .
Args :
label : The label to apply .
hostnames : The collection of hostnames that should have the label .
owner : The owner for the newly created labels . Defaults to token . username .
token : The authentication token .
client _ index : An optional client index to use . If not provided , use the
default client index ."""
|
if client_index is None :
client_index = CreateClientIndex ( token = token )
fqdns = set ( )
for hostname in hostnames :
fqdns . add ( hostname . lower ( ) )
labelled_urns = client_index . LookupClients ( [ "+label:%s" % label ] )
# If a labelled client fqdn isn ' t in the set of target fqdns remove the label .
# Labelled clients with a target fqdn need no action and are removed from the
# set of target fqdns .
for client in aff4 . FACTORY . MultiOpen ( labelled_urns , token = token , aff4_type = aff4_grr . VFSGRRClient , mode = "rw" ) :
fqdn = utils . SmartStr ( client . Get ( "FQDN" ) ) . lower ( )
if fqdn not in fqdns :
client_index . RemoveClientLabels ( client )
client . RemoveLabel ( label , owner = owner )
client . Flush ( )
client_index . AddClient ( client )
else :
fqdns . discard ( fqdn )
# The residual set of fqdns needs labelling .
# Get the latest URN for these clients and open them to add the label .
urns = [ ]
keywords = [ "+host:%s" % fqdn for fqdn in fqdns ]
for client_list in client_index . ReadClientPostingLists ( keywords ) . itervalues ( ) :
for client_id in client_list :
urns . append ( rdfvalue . RDFURN ( client_id ) )
for client in aff4 . FACTORY . MultiOpen ( urns , token = token , aff4_type = aff4_grr . VFSGRRClient , mode = "rw" ) :
client . AddLabel ( label , owner = owner )
client . Flush ( )
client_index . AddClient ( client )
|
def withRange ( cls , minimum , maximum ) :
"""Creates a subclass with value range constraint ."""
|
class X ( cls ) :
subtypeSpec = cls . subtypeSpec + constraint . ValueRangeConstraint ( minimum , maximum )
X . __name__ = cls . __name__
return X
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.