signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
async def start_task ( self , task : Task ) -> None :
'''Initialize the task , queue it for execution , add the done callback ,
and keep track of it for when tasks need to be stopped .'''
|
try :
Log . debug ( 'task %s starting' , task . name )
before = time . time ( )
task . counters [ 'last_run' ] = before
task . running = True
self . running_tasks . add ( task )
await task . run_task ( )
Log . debug ( 'task %s completed' , task . name )
except CancelledError :
Log . debug ( 'task %s cancelled' , task . name )
except Exception :
Log . exception ( 'unhandled exception in task %s' , task . name )
finally :
self . running_tasks . discard ( task )
task . running = False
task . task = None
after = time . time ( )
total = after - before
task . counters [ 'last_completed' ] = after
task . counters [ 'duration' ] = total
|
def df_routes ( self , value ) :
'''. . versionadded : : 0.11.3'''
|
self . _df_routes = value
try :
self . emit ( 'routes-set' , self . _df_routes . copy ( ) )
except TypeError :
pass
|
def generate_section_signatures ( self , pe , name , sig_length = 512 ) :
"""Generates signatures for all the sections in a PE file .
If the section contains any data a signature will be created
for it . The signature name will be a combination of the
parameter ' name ' and the section number and its name ."""
|
section_signatures = list ( )
for idx , section in enumerate ( pe . sections ) :
if section . SizeOfRawData < sig_length :
continue
# offset = pe . get _ offset _ from _ rva ( section . VirtualAddress )
offset = section . PointerToRawData
sig_name = '%s Section(%d/%d,%s)' % ( name , idx + 1 , len ( pe . sections ) , '' . join ( [ c for c in section . Name if c in string . printable ] ) )
section_signatures . append ( self . __generate_signature ( pe , offset , sig_name , ep_only = False , section_start_only = True , sig_length = sig_length ) )
return '\n' . join ( section_signatures ) + '\n'
|
def load_settings_file ( self , settings_file = None ) :
"""Load our settings file ."""
|
if not settings_file :
settings_file = self . get_json_or_yaml_settings ( )
if not os . path . isfile ( settings_file ) :
raise ClickException ( "Please configure your zappa_settings file or call `zappa init`." )
path , ext = os . path . splitext ( settings_file )
if ext == '.yml' or ext == '.yaml' :
with open ( settings_file ) as yaml_file :
try :
self . zappa_settings = yaml . load ( yaml_file )
except ValueError : # pragma : no cover
raise ValueError ( "Unable to load the Zappa settings YAML. It may be malformed." )
elif ext == '.toml' :
with open ( settings_file ) as toml_file :
try :
self . zappa_settings = toml . load ( toml_file )
except ValueError : # pragma : no cover
raise ValueError ( "Unable to load the Zappa settings TOML. It may be malformed." )
else :
with open ( settings_file ) as json_file :
try :
self . zappa_settings = json . load ( json_file )
except ValueError : # pragma : no cover
raise ValueError ( "Unable to load the Zappa settings JSON. It may be malformed." )
|
def RGB_color_picker ( obj ) :
"""Build a color representation from the string representation of an object
This allows to quickly get a color from some data , with the
additional benefit that the color will be the same as long as the
( string representation of the ) data is the same : :
> > > from colour import RGB _ color _ picker , Color
Same inputs produce the same result : :
> > > RGB _ color _ picker ( " Something " ) = = RGB _ color _ picker ( " Something " )
True
. . . but different inputs produce different colors : :
> > > RGB _ color _ picker ( " Something " ) ! = RGB _ color _ picker ( " Something else " )
True
In any case , we still get a ` ` Color ` ` object : :
> > > isinstance ( RGB _ color _ picker ( " Something " ) , Color )
True"""
|
# # Turn the input into a by 3 - dividable string . SHA - 384 is good because it
# # divides into 3 components of the same size , which will be used to
# # represent the RGB values of the color .
digest = hashlib . sha384 ( str ( obj ) . encode ( 'utf-8' ) ) . hexdigest ( )
# # Split the digest into 3 sub - strings of equivalent size .
subsize = int ( len ( digest ) / 3 )
splitted_digest = [ digest [ i * subsize : ( i + 1 ) * subsize ] for i in range ( 3 ) ]
# # Convert those hexadecimal sub - strings into integer and scale them down
# # to the 0 . . 1 range .
max_value = float ( int ( "f" * subsize , 16 ) )
components = ( int ( d , 16 ) # # Make a number from a list with hex digits
/ max_value # # Scale it down to [ 0.0 , 1.0]
for d in splitted_digest )
return Color ( rgb2hex ( components ) )
|
def kill_application ( self , app_id , user = "" ) :
"""Kill an application .
Parameters
app _ id : str
The id of the application to kill .
user : str , optional
The user to kill the application as . Requires the current user to
have permissions to proxy as ` ` user ` ` . Default is the current user ."""
|
self . _call ( 'kill' , proto . KillRequest ( id = app_id , user = user ) )
|
def enc ( self , byts , asscd = None ) :
'''Encrypt the given bytes and return an envelope dict in msgpack form .
Args :
byts ( bytes ) : The message to be encrypted .
asscd ( bytes ) : Extra data that needs to be authenticated ( but not encrypted ) .
Returns :
bytes : The encrypted message . This is a msgpacked dictionary
containing the IV , ciphertext , and associated data .'''
|
iv = os . urandom ( 16 )
encryptor = AESGCM ( self . ekey )
byts = encryptor . encrypt ( iv , byts , asscd )
envl = { 'iv' : iv , 'data' : byts , 'asscd' : asscd }
return s_msgpack . en ( envl )
|
def histogram ( self , axis = None , ** kargs ) :
"""- histogram ( axis = None , * * kargs ) : It computes and shows the histogram of the image . This is
usefull for choosing a proper scale to the output , or for clipping some values . If
axis is None , it selects the current axis to plot the histogram .
Keyword arguments :
* bins * :
Either an integer number of bins or a sequence giving the
bins . If * bins * is an integer , * bins * + 1 bin edges
will be returned , consistent with : func : ` numpy . histogram `
for numpy version > = 1.3 , and with the * new * = True argument
in earlier versions .
Unequally spaced bins are supported if * bins * is a sequence .
* range * :
The lower and upper range of the bins . Lower and upper outliers
are ignored . If not provided , * range * is ( x . min ( ) , x . max ( ) ) .
Range has no effect if * bins * is a sequence .
If * bins * is a sequence or * range * is specified , autoscaling
is based on the specified bin range instead of the
range of x .
* normed * :
If * True * , the first element of the return tuple will
be the counts normalized to form a probability density , i . e . ,
` ` n / ( len ( x ) * dbin ) ` ` . In a probability density , the integral of
the histogram should be 1 ; you can verify that with a
trapezoidal integration of the probability density function : :
pdf , bins , patches = ax . hist ( . . . )
print ( np . sum ( pdf * np . diff ( bins ) ) )
. . note : :
Until numpy release 1.5 , the underlying numpy
histogram function was incorrect with * normed * = * True *
if bin sizes were unequal . MPL inherited that
error . It is now corrected within MPL when using
earlier numpy versions
* weights * :
An array of weights , of the same shape as * x * . Each value in
* x * only contributes its associated weight towards the bin
count ( instead of 1 ) . If * normed * is True , the weights are
normalized , so that the integral of the density over the range
remains 1.
* cumulative * :
If * True * , then a histogram is computed where each bin
gives the counts in that bin plus all bins for smaller values .
The last bin gives the total number of datapoints . If * normed *
is also * True * then the histogram is normalized such that the
last bin equals 1 . If * cumulative * evaluates to less than 0
( e . g . - 1 ) , the direction of accumulation is reversed . In this
case , if * normed * is also * True * , then the histogram is normalized
such that the first bin equals 1.
* histtype * : [ ' bar ' | ' barstacked ' | ' step ' | ' stepfilled ' ]
The type of histogram to draw .
- ' bar ' is a traditional bar - type histogram . If multiple data
are given the bars are aranged side by side .
- ' barstacked ' is a bar - type histogram where multiple
data are stacked on top of each other .
- ' step ' generates a lineplot that is by default
unfilled .
- ' stepfilled ' generates a lineplot that is by default
filled .
* align * : [ ' left ' | ' mid ' | ' right ' ]
Controls how the histogram is plotted .
- ' left ' : bars are centered on the left bin edges .
- ' mid ' : bars are centered between the bin edges .
- ' right ' : bars are centered on the right bin edges .
* orientation * : [ ' horizontal ' | ' vertical ' ]
If ' horizontal ' , : func : ` ~ matplotlib . pyplot . barh ` will be
used for bar - type histograms and the * bottom * kwarg will be
the left edges .
* rwidth * :
The relative width of the bars as a fraction of the bin
width . If * None * , automatically compute the width . Ignored
if * histtype * = ' step ' or ' stepfilled ' .
* log * :
If * True * , the histogram axis will be set to a log scale .
If * log * is * True * and * x * is a 1D array , empty bins will
be filtered out and only the non - empty ( * n * , * bins * ,
* patches * ) will be returned .
* color * :
Color spec or sequence of color specs , one per
dataset . Default ( * None * ) uses the standard line
color sequence .
* label * :
String , or sequence of strings to match multiple
datasets . Bar charts yield multiple patches per
dataset , but only the first gets the label , so
that the legend command will work as expected : :
ax . hist ( 10 + 2 * np . random . randn ( 1000 ) , label = ' men ' )
ax . hist ( 12 + 3 * np . random . randn ( 1000 ) , label = ' women ' , alpha = 0.5)
ax . legend ( )
kwargs are used to update the properties of the
: class : ` ~ matplotlib . patches . Patch ` instances returned by * hist * :
agg _ filter : unknown
alpha : float or None
animated : [ True | False ]
antialiased or aa : [ True | False ] or None for default
axes : an : class : ` ~ matplotlib . axes . Axes ` instance
clip _ box : a : class : ` matplotlib . transforms . Bbox ` instance
clip _ on : [ True | False ]
clip _ path : [ ( : class : ` ~ matplotlib . path . Path ` , : class : ` ~ matplotlib . transforms . Transform ` ) | : class : ` ~ matplotlib . patches . Patch ` | None ]
color : matplotlib color spec
contains : a callable function
edgecolor or ec : mpl color spec , or None for default , or ' none ' for no color
facecolor or fc : mpl color spec , or None for default , or ' none ' for no color
figure : a : class : ` matplotlib . figure . Figure ` instance
fill : [ True | False ]
gid : an id string
hatch : [ ' / ' | ' \\ ' | ' | ' | ' - ' | ' + ' | ' x ' | ' o ' | ' O ' | ' . ' | ' * ' ]
label : any string
linestyle or ls : [ ' solid ' | ' dashed ' | ' dashdot ' | ' dotted ' ]
linewidth or lw : float or None for default
lod : [ True | False ]
path _ effects : unknown
picker : [ None | float | boolean | callable ]
rasterized : [ True | False | None ]
snap : unknown
transform : : class : ` ~ matplotlib . transforms . Transform ` instance
url : a url string
visible : [ True | False ]
zorder : any number"""
|
if ( axis == None ) :
axis = plt . gca ( )
axis . hist ( self . __image . ravel ( ) , ** kargs )
|
def next_batch ( self , _ ) :
"Handler for ' Next Batch ' button click . Delete all flagged images and renders next batch ."
|
for img_widget , delete_btn , fp , in self . _batch :
fp = delete_btn . file_path
if ( delete_btn . flagged_for_delete == True ) :
self . delete_image ( fp )
self . _deleted_fns . append ( fp )
self . _all_images = self . _all_images [ self . _batch_size : ]
self . empty_batch ( )
self . render ( )
|
def get_job_asset_url ( self , job_id , filename ) :
"""Get details about the static assets collected for a specific job ."""
|
return 'https://saucelabs.com/rest/v1/{}/jobs/{}/assets/{}' . format ( self . client . sauce_username , job_id , filename )
|
def from_config ( cls , config : dict ) :
"""Create an event object from an event dictionary object .
Args :
config ( dict ) : Event Configuration dictionary ."""
|
timestamp = config . get ( 'timestamp' , None )
return cls ( config . get ( 'id' ) , config . get ( 'type' ) , config . get ( 'data' , dict ( ) ) , config . get ( 'origin' , None ) , timestamp , config . get ( 'object_type' , None ) , config . get ( 'object_id' , None ) , config . get ( 'object_key' , None ) )
|
def export_features ( self , delimiter = '\\' ) :
"""Export the features from this namespace as a list of feature tokens
with the namespace name prepended ( delimited by ' delimiter ' ) .
Example :
> > > namespace = Namespace ( ' name1 ' , features = [ ' feature1 ' , ' feature2 ' ] )
> > > print namespace . export _ features ( )
[ ' name1 \\ feature1 ' , ' name1 \\ feature2 ' ]"""
|
result_list = [ ]
for feature in self . features :
result = '{}{}{}' . format ( self . name , delimiter , feature )
result_list . append ( result )
return result_list
|
def get_fields_class ( self , class_name ) :
"""Return all fields of a specific class
: param class _ name : the class name
: type class _ name : string
: rtype : a list with : class : ` EncodedField ` objects"""
|
l = [ ]
for i in self . get_classes ( ) :
for j in i . get_fields ( ) :
if class_name == j . get_class_name ( ) :
l . append ( j )
return l
|
def get_unknown_barcodes ( self , lane_unknown_barcode ) :
"""Python 2 . * dictionaries are not sorted .
This function return an ` OrderedDict ` sorted by barcode count ."""
|
try :
sorted_barcodes = OrderedDict ( sorted ( lane_unknown_barcode . items ( ) , key = operator . itemgetter ( 1 ) , reverse = True ) )
except AttributeError :
sorted_barcodes = None
return sorted_barcodes
|
def cleanup ( self ) :
"""remove all members without reconfig"""
|
for item in self . server_map :
self . member_del ( item , reconfig = False )
self . server_map . clear ( )
|
def get_init_container ( self , init_command , init_args , env_vars , context_mounts , persistence_outputs , persistence_data ) :
"""Pod init container for setting outputs path ."""
|
env_vars = to_list ( env_vars , check_none = True )
if self . original_name is not None and self . cloning_strategy == CloningStrategy . RESUME :
return [ ]
if self . original_name is not None and self . cloning_strategy == CloningStrategy . COPY :
command = InitCommands . COPY
original_outputs_path = stores . get_experiment_outputs_path ( persistence = persistence_outputs , experiment_name = self . original_name )
else :
command = InitCommands . CREATE
original_outputs_path = None
outputs_path = stores . get_experiment_outputs_path ( persistence = persistence_outputs , experiment_name = self . experiment_name )
_ , outputs_volume_mount = get_pod_outputs_volume ( persistence_outputs = persistence_outputs )
volume_mounts = outputs_volume_mount + to_list ( context_mounts , check_none = True )
init_command = init_command or [ "/bin/sh" , "-c" ]
init_args = init_args or to_list ( get_output_args ( command = command , outputs_path = outputs_path , original_outputs_path = original_outputs_path ) )
init_args += to_list ( get_auth_context_args ( entity = 'experiment' , entity_name = self . experiment_name ) )
return [ client . V1Container ( name = self . init_container_name , image = self . init_docker_image , image_pull_policy = self . init_docker_image_pull_policy , command = init_command , args = [ '' . join ( init_args ) ] , env = env_vars , volume_mounts = volume_mounts ) ]
|
def hagen_poiseuille ( target , pore_area = 'pore.area' , throat_area = 'throat.area' , pore_viscosity = 'pore.viscosity' , throat_viscosity = 'throat.viscosity' , conduit_lengths = 'throat.conduit_lengths' , conduit_shape_factors = 'throat.flow_shape_factors' ) :
r"""Calculate the hydraulic conductance of conduits in network , where a
conduit is ( 1/2 pore - full throat - 1/2 pore ) . See the notes section .
Parameters
target : OpenPNM Object
The object which this model is associated with . This controls the
length of the calculated array , and also provides access to other
necessary properties .
pore _ viscosity : string
Dictionary key of the pore viscosity values
throat _ viscosity : string
Dictionary key of the throat viscosity values
pore _ area : string
Dictionary key of the pore area values
throat _ area : string
Dictionary key of the throat area values
conduit _ shape _ factors : string
Dictionary key of the conduit FLOW shape factor values
Returns
g : ndarray
Array containing hydraulic conductance values for conduits in the
geometry attached to the given physics object .
Notes
(1 ) This function requires that all the necessary phase properties already
be calculated .
(2 ) This function calculates the specified property for the * entire *
network then extracts the values for the appropriate throats at the end .
(3 ) This function assumes cylindrical throats with constant cross - section
area . Corrections for different shapes and variable cross - section area can
be imposed by passing the proper flow _ shape _ factor argument ."""
|
return generic_conductance ( target = target , transport_type = 'flow' , pore_area = pore_area , throat_area = throat_area , pore_diffusivity = pore_viscosity , throat_diffusivity = throat_viscosity , conduit_lengths = conduit_lengths , conduit_shape_factors = conduit_shape_factors )
|
def _column_type ( strings , has_invisible = True ) :
"""The least generic type all column values are convertible to .
> > > _ column _ type ( [ " 1 " , " 2 " ] ) is _ int _ type
True
> > > _ column _ type ( [ " 1 " , " 2.3 " ] ) is _ float _ type
True
> > > _ column _ type ( [ " 1 " , " 2.3 " , " four " ] ) is _ text _ type
True
> > > _ column _ type ( [ " four " , ' \u043f \u044f \u0442 \u044c ' ] ) is _ text _ type
True
> > > _ column _ type ( [ None , " brux " ] ) is _ text _ type
True
> > > _ column _ type ( [ 1 , 2 , None ] ) is _ int _ type
True
> > > import datetime as dt
> > > _ column _ type ( [ dt . datetime ( 1991,2,19 ) , dt . time ( 17,35 ) ] ) is _ text _ type
True"""
|
types = [ _type ( s , has_invisible ) for s in strings ]
return reduce ( _more_generic , types , int )
|
def view_for_image_named ( image_name ) :
"""Create an ImageView for the given image ."""
|
image = resource . get_image ( image_name )
if not image :
return None
return ImageView ( pygame . Rect ( 0 , 0 , 0 , 0 ) , image )
|
def setup ( ) :
"""Creates shared and upload directory then fires setup to recipes ."""
|
init_tasks ( )
run_hook ( "before_setup" )
# Create shared folder
env . run ( "mkdir -p %s" % ( paths . get_shared_path ( ) ) )
env . run ( "chmod 755 %s" % ( paths . get_shared_path ( ) ) )
# Create backup folder
env . run ( "mkdir -p %s" % ( paths . get_backup_path ( ) ) )
env . run ( "chmod 750 %s" % ( paths . get_backup_path ( ) ) )
# Create uploads folder
env . run ( "mkdir -p %s" % ( paths . get_upload_path ( ) ) )
env . run ( "chmod 775 %s" % ( paths . get_upload_path ( ) ) )
run_hook ( "setup" )
run_hook ( "after_setup" )
|
def exists_course_list ( curriculum_abbr , course_number , section_id , quarter , year , joint = False ) :
"""Return True if the corresponding mailman list exists for the course"""
|
return exists ( get_course_list_name ( curriculum_abbr , course_number , section_id , quarter , year , joint ) )
|
def sub_for ( expr , substitutions ) :
"""Substitute subexpressions in ` expr ` with expression to expression
mapping ` substitutions ` .
Parameters
expr : ibis . expr . types . Expr
An Ibis expression
substitutions : List [ Tuple [ ibis . expr . types . Expr , ibis . expr . types . Expr ] ]
A mapping from expression to expression . If any subexpression of ` expr `
is equal to any of the keys in ` substitutions ` , the value for that key
will replace the corresponding expression in ` expr ` .
Returns
ibis . expr . types . Expr
An Ibis expression"""
|
mapping = { k . op ( ) : v for k , v in substitutions }
substitutor = Substitutor ( )
return substitutor . substitute ( expr , mapping )
|
def calc_motif_enrichment ( sample , background , mtc = None , len_sample = None , len_back = None ) :
"""Calculate enrichment based on hypergeometric distribution"""
|
INF = "Inf"
if mtc not in [ None , "Bonferroni" , "Benjamini-Hochberg" , "None" ] :
raise RuntimeError ( "Unknown correction: %s" % mtc )
sig = { }
p_value = { }
n_sample = { }
n_back = { }
if not ( len_sample ) :
len_sample = sample . seqn ( )
if not ( len_back ) :
len_back = background . seqn ( )
for motif in sample . motifs . keys ( ) :
p = "NA"
s = "NA"
q = len ( sample . motifs [ motif ] )
m = 0
if ( background . motifs . get ( motif ) ) :
m = len ( background . motifs [ motif ] )
n = len_back - m
k = len_sample
p = phyper ( q - 1 , m , n , k )
if p != 0 :
s = - ( log ( p ) / log ( 10 ) )
else :
s = INF
else :
s = INF
p = 0.0
sig [ motif ] = s
p_value [ motif ] = p
n_sample [ motif ] = q
n_back [ motif ] = m
if mtc == "Bonferroni" :
for motif in p_value . keys ( ) :
if p_value [ motif ] != "NA" :
p_value [ motif ] = p_value [ motif ] * len ( p_value . keys ( ) )
if p_value [ motif ] > 1 :
p_value [ motif ] = 1
elif mtc == "Benjamini-Hochberg" :
motifs = sorted ( p_value . keys ( ) , key = lambda x : - p_value [ x ] )
l = len ( p_value )
c = l
for m in motifs :
if p_value [ m ] != "NA" :
p_value [ m ] = p_value [ m ] * l / c
c -= 1
return ( sig , p_value , n_sample , n_back )
|
def get_properties ( self , packet , bt_addr ) :
"""Get properties of beacon depending on type ."""
|
if is_one_of ( packet , [ EddystoneTLMFrame , EddystoneURLFrame , EddystoneEncryptedTLMFrame , EddystoneEIDFrame ] ) : # here we retrieve the namespace and instance which corresponds to the
# eddystone beacon with this bt address
return self . properties_from_mapping ( bt_addr )
else :
return packet . properties
|
def wgs84_to_utm ( lng , lat , utm_crs = None ) :
"""Convert WGS84 coordinates to UTM . If UTM CRS is not set it will be calculated automatically .
: param lng : longitude in WGS84 system
: type lng : float
: param lat : latitude in WGS84 system
: type lat : float
: param utm _ crs : UTM coordinate reference system enum constants
: type utm _ crs : constants . CRS or None
: return : east , north coordinates in UTM system
: rtype : float , float"""
|
if utm_crs is None :
utm_crs = get_utm_crs ( lng , lat )
return transform_point ( ( lng , lat ) , CRS . WGS84 , utm_crs )
|
def energy_prof ( step ) :
"""Energy flux .
This computation takes sphericity into account if necessary .
Args :
step ( : class : ` ~ stagpy . stagyydata . _ Step ` ) : a step of a StagyyData
instance .
Returns :
tuple of : class : ` numpy . array ` : the energy flux and the radial position
at which it is evaluated ."""
|
diff , rad = diffs_prof ( step )
adv , _ = advts_prof ( step )
return ( diff + np . append ( adv , 0 ) ) , rad
|
def call_release ( self , arborted = False ) :
"""DEV : Must be call when the object becomes ready to read .
Relesases the lock of _ wait _ non _ ressources"""
|
self . was_ended = arborted
try :
self . trigger . release ( )
except ( threading . ThreadError , AttributeError ) :
pass
|
def dumps ( ms , single = False , pretty_print = False , ** kwargs ) :
"""Serialize an Xmrs object to the Prolog representation
Args :
ms : an iterator of Xmrs objects to serialize ( unless the
* single * option is ` True ` )
single : if ` True ` , treat * ms * as a single Xmrs object instead
of as an iterator
pretty _ print : if ` True ` , add newlines and indentation
Returns :
the Prolog string representation of a corpus of Xmrs"""
|
if single :
ms = [ ms ]
return serialize ( ms , pretty_print = pretty_print , ** kwargs )
|
def inherit ( name , objectType , clear_existing_acl = False ) :
'''Ensure an object is inheriting ACLs from its parent'''
|
ret = { 'name' : name , 'result' : True , 'changes' : { } , 'comment' : '' }
tRet = __salt__ [ 'win_dacl.check_inheritance' ] ( name , objectType )
if tRet [ 'result' ] :
if not tRet [ 'Inheritance' ] :
if __opts__ [ 'test' ] :
ret [ 'result' ] = None
ret [ 'changes' ] [ 'Inheritance' ] = "Enabled"
ret [ 'comment' ] = 'Inheritance is set to be enabled.'
ret [ 'changes' ] [ 'Existing ACLs' ] = ( 'Are set to be removed' if clear_existing_acl else 'Are set to be kept' )
return ret
eRet = __salt__ [ 'win_dacl.enable_inheritance' ] ( name , objectType , clear_existing_acl )
if eRet [ 'result' ] :
ret [ 'result' ] = True
ret [ 'changes' ] = dict ( ret [ 'changes' ] , ** eRet [ 'changes' ] )
else :
ret [ 'result' ] = False
ret [ 'comment' ] = ' ' . join ( [ ret [ 'comment' ] , eRet [ 'comment' ] ] )
else :
if __opts__ [ 'test' ] :
ret [ 'result' ] = None
ret [ 'comment' ] = 'Inheritance is enabled.'
else :
ret [ 'result' ] = False
ret [ 'comment' ] = tRet [ 'comment' ]
return ret
|
def reg_add ( self , reg , value ) :
"""Add a value to a register . The value can be another : class : ` Register ` ,
an : class : ` Offset ` , a : class : ` Buffer ` , an integer or ` ` None ` ` .
Arguments :
reg ( pwnypack . shellcode . types . Register ) : The register to add the
value to .
value : The value to add to the register .
Returns :
list : A list of mnemonics that will add ` ` value ` ` to ` ` reg ` ` ."""
|
if value is None :
return [ ]
elif isinstance ( value , Register ) :
return self . reg_add_reg ( reg , value )
elif isinstance ( value , ( Buffer , six . integer_types ) ) :
if isinstance ( reg , Buffer ) :
value = sum ( len ( v ) for v in six . iterkeys ( self . data ) ) + value . offset
if not value :
return [ ]
reg_width = self . REGISTER_WIDTH [ reg ]
if value < - 2 ** ( reg_width - 1 ) :
raise ValueError ( '%d does not fit %s' % ( value , reg ) )
elif value >= 2 ** reg_width :
raise ValueError ( '%d does not fit %s' % ( value , reg ) )
if value > 0 :
return self . reg_add_imm ( reg , value )
else :
return self . reg_sub_imm ( reg , - value )
else :
raise ValueError ( 'Invalid argument type "%s"' % repr ( value ) )
|
def _find_conferences ( self , year ) :
"""Retrieve the conferences and teams for the requested season .
Find and retrieve all conferences for a given season and parse all of
the teams that participated in the conference during that year .
Conference information includes abbreviation and full name for the
conference as well as the abbreviation and full name for each team in
the conference .
Parameters
year : string
A string of the requested year to pull conferences from ."""
|
if not year :
year = utils . _find_year_for_season ( 'ncaaf' )
page = self . _pull_conference_page ( year )
if not page :
output = ( "Can't pull requested conference page. Ensure the " "following URL exists: %s" % ( CONFERENCES_URL % year ) )
raise ValueError ( output )
conferences = page ( 'table#conferences tbody tr' ) . items ( )
for conference in conferences :
conference_abbreviation = self . _get_conference_id ( conference )
conference_name = conference ( 'td[data-stat="conf_name"]' ) . text ( )
teams_dict = Conference ( conference_abbreviation , year ) . teams
conference_dict = { 'name' : conference_name , 'teams' : teams_dict }
for team in teams_dict . keys ( ) :
self . _team_conference [ team ] = conference_abbreviation
self . _conferences [ conference_abbreviation ] = conference_dict
|
def rename_retract_ar_transition ( portal ) :
"""Renames retract _ ar transition to invalidate"""
|
logger . info ( "Renaming 'retract_ar' transition to 'invalidate'" )
wf_tool = api . get_tool ( "portal_workflow" )
workflow = wf_tool . getWorkflowById ( "bika_ar_workflow" )
if "invalidate" not in workflow . transitions :
workflow . transitions . addTransition ( "invalidate" )
transition = workflow . transitions . invalidate
transition . setProperties ( title = "Invalidate" , new_state_id = "invalid" , after_script_name = "" , actbox_name = "Invalidate" , )
guard = transition . guard or Guard ( )
guard_props = { "guard_permissions" : "BIKA: Retract" , "guard_roles" : "" , "guard_expr" : "python:here.guard_cancelled_object()" }
guard . changeFromProperties ( guard_props )
transition . guard = guard
for state in workflow . states . values ( ) :
if 'retract_ar' in state . transitions :
trans = filter ( lambda id : id != 'retract_ar' , state . transitions )
trans += ( 'invalidate' , )
state . transitions = trans
if "retract_ar" in workflow . transitions :
workflow . transitions . deleteTransitions ( [ "retract_ar" ] )
|
def set_audiorenderer ( self , renderer ) :
"""Sets the SoundRenderer object . This should take care of processing
the audioframes set in audioqueue .
Parameters
renderer : soundrenderers . SoundRenderer
A subclass of soundrenderers . SoundRenderer that takes care of the
audio rendering .
Raises
RuntimeError
If no information about the audiostream is available . This could be
because no video has been loaded yet , or because no embedded
audiostream could be detected in the video , or play _ sound was set
to False ."""
|
if not hasattr ( self , 'audioqueue' ) or self . audioqueue is None :
raise RuntimeError ( "No video has been loaded, or no audiostream " "was detected." )
if not isinstance ( renderer , SoundRenderer ) :
raise TypeError ( "Invalid renderer object. Not a subclass of " "SoundRenderer" )
self . soundrenderer = renderer
self . soundrenderer . queue = self . audioqueue
|
def copy ( self , deep = True ) :
"""Make a copy of this SparseDataFrame"""
|
result = super ( ) . copy ( deep = deep )
result . _default_fill_value = self . _default_fill_value
result . _default_kind = self . _default_kind
return result
|
def gen_random_name ( family_name = None , gender = None , length = None ) :
"""指定姓氏 、 性别 、 长度 , 返回随机人名 , 也可不指定生成随机人名
: param :
* family _ name : ( string ) 姓
* gender : ( string ) 性别 " 01 " 男性 , " 00 " 女性 , 默认 None : 随机
* length : ( int ) 大于等于 2 小于等于 10 的整数 , 默认 None : 随机 2 或者 3
: return :
* full _ name : ( string ) 随机人名
举例如下 : :
print ( ' - - - gen _ random _ name demo - - - ' )
print ( gen _ name ( ) )
print ( gen _ name ( " 赵 " , " 01 " , 3 ) )
print ( ' - - - ' )
执行结果 : :
- - - gen _ random _ name demo - - -
师艺
赵群腾"""
|
family_word = ( "赵钱孙李周吴郑王冯陈褚卫蒋沈韩杨朱秦尤许何吕施张孔曹严华金魏陶姜戚谢邹喻柏水窦章云苏潘葛" "奚范彭郎鲁韦昌马苗凤花方俞任袁柳酆鲍史唐费廉岑薛雷贺倪汤滕殷罗毕郝邬安常乐于时傅皮卞齐康" "伍余元卜顾孟平黄和穆萧尹姚邵湛汪祁毛禹狄米贝明臧计伏成戴谈宋茅庞熊纪舒屈项祝董梁杜阮蓝闵" "席季麻强贾路娄危江童颜郭梅盛林刁钟徐邱骆高夏蔡田樊胡凌霍虞万支柯咎管卢莫经房裘缪干解应宗" "宣丁贲邓郁单杭洪包诸左石崔吉钮龚程嵇邢滑裴陆荣翁荀羊於惠甄魏加封芮羿储靳汲邴糜松井段富巫" "乌焦巴弓牧隗山谷车侯宓蓬全郗班仰秋仲伊宫宁仇栾暴甘钭厉戎祖武符刘姜詹束龙叶幸司韶郜黎蓟薄" "印宿白怀蒲台从鄂索咸籍赖卓蔺屠蒙池乔阴郁胥能苍双闻莘党翟谭贡劳逄姬申扶堵冉宰郦雍却璩桑桂" "濮牛寿通边扈燕冀郏浦尚农温别庄晏柴瞿阎充慕连茹习宦艾鱼容向古易慎戈廖庚终暨居衡步都耿满弘" "匡国文寇广禄阙东殴殳沃利蔚越夔隆师巩厍聂晁勾敖融冷訾辛阚那简饶空曾毋沙乜养鞠须丰巢关蒯相" "查后江红游竺权逯盖益桓公万俟司马上官欧阳夏侯诸葛闻人东方赫连皇甫尉迟公羊澹台公冶宗政濮阳" "淳于仲孙太叔申屠公孙乐正轩辕令狐钟离闾丘长孙慕容鲜于宇文司徒司空亓官司寇仉督子车颛孙端木" "巫马公西漆雕乐正壤驷公良拓拔夹谷宰父谷粱晋楚阎法汝鄢涂钦段干百里东郭南门呼延归海羊舌微生" "岳帅缑亢况后有琴梁丘左丘东门西门商牟佘佴伯赏南宫墨哈谯笪年爱阳佟第五言福百家姓续" )
name_dict = { "00" : ( "秀娟英华慧巧美娜静淑惠珠翠雅芝玉萍红娥玲芬芳燕彩春菊兰凤洁梅琳素云莲真环雪荣爱妹霞" "香月莺媛艳瑞凡佳嘉琼勤珍贞莉桂娣叶璧璐娅琦晶妍茜秋珊莎锦黛青倩婷姣婉娴瑾颖露瑶怡婵" "雁蓓纨仪荷丹蓉眉君琴蕊薇菁梦岚苑婕馨瑗琰韵融园艺咏卿聪澜纯毓悦昭冰爽琬茗羽希宁欣飘" "育滢馥筠柔竹霭凝晓欢霄枫芸菲寒伊亚宜可姬舒影荔枝思丽" ) , "01" : ( "伟刚勇毅俊峰强军平保东文辉力明永健世广志义兴良海山仁波宁贵福生龙元全国胜学祥才发武" "新利清飞彬富顺信子杰涛昌成康星光天达安岩中茂进林有坚和彪博诚先敬震振壮会思群豪心邦" "承乐绍功松善厚庆磊民友裕河哲江超浩亮政谦亨奇固之轮翰朗伯宏言若鸣朋斌梁栋维启克伦翔" "旭鹏泽晨辰士以建家致树炎德行时泰盛雄琛钧冠策腾楠榕风航弘" ) }
if family_name is None :
family_name = random . choice ( family_word )
if gender is None or gender not in [ '00' , '01' ] :
gender = random . choice ( [ '00' , '01' ] )
if length is None or length not in [ 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ] :
length = random . choice ( [ 2 , 3 ] )
name = "" . join ( [ random . choice ( name_dict [ gender ] ) for _ in range ( length - 1 ) ] )
full_name = "{family_name}{name}" . format ( family_name = family_name , name = name )
return full_name
|
def write_file ( path : str , contents , mode : str = 'w' , retry_count : int = 3 , offset : int = 0 ) -> typing . Tuple [ bool , typing . Union [ None , Exception ] ] :
"""Writes the specified contents to a file , with retry attempts if the write
operation fails . This is useful to prevent OS related write collisions with
files that are regularly written to and read from quickly .
: param path :
The path to the file that will be written
: param contents :
The contents of the file to write
: param mode :
The mode in which the file will be opened when written
: param retry _ count :
The number of attempts to make before giving up and returning a
failed write .
: param offset :
The byte offset in the file where the contents should be written .
If the value is zero , the offset information will be ignored and the
operation will write entirely based on mode . Note that if you indicate
an append write mode and an offset , the mode will be forced to write
instead of append .
: return :
Returns two arguments . The first is a boolean specifying whether or
not the write operation succeeded . The second is the error result , which
is None if the write operation succeeded . Otherwise , it will be the
exception that was raised by the last failed write attempt ."""
|
error = None
for i in range ( retry_count ) :
error = attempt_file_write ( path , contents , mode , offset )
if error is None :
return True , None
time . sleep ( 0.2 )
return False , error
|
def bvlpdu_contents ( self , use_dict = None , as_class = dict ) :
"""Return the contents of an object as a dict ."""
|
broadcast_distribution_table = [ ]
for bdte in self . bvlciBDT :
broadcast_distribution_table . append ( str ( bdte ) )
return key_value_contents ( use_dict = use_dict , as_class = as_class , key_values = ( ( 'function' , 'ReadBroadcastDistributionTableAck' ) , ( 'bdt' , broadcast_distribution_table ) , ) )
|
def add_review ( self , review ) :
"""Add a release object if it does not already exist"""
|
for r in self . reviews :
if r . id == review . id :
return
self . reviews . append ( review )
|
def plot ( self , figure_list ) :
'''When each subscript is called , uses its standard plotting
Args :
figure _ list : list of figures passed from the guit'''
|
# TODO : be smarter about how we plot ScriptIterator
if self . _current_subscript_stage is not None :
if self . _current_subscript_stage [ 'current_subscript' ] is not None :
self . _current_subscript_stage [ 'current_subscript' ] . plot ( figure_list )
if ( self . is_running is False ) and not ( self . data == { } or self . data is None ) :
script_names = list ( self . settings [ 'script_order' ] . keys ( ) )
script_indices = [ self . settings [ 'script_order' ] [ name ] for name in script_names ]
_ , sorted_script_names = list ( zip ( * sorted ( zip ( script_indices , script_names ) ) ) )
last_script = self . scripts [ sorted_script_names [ - 1 ] ]
last_script . force_update ( )
# since we use the last script plot function we force it to refresh
axes_list = last_script . get_axes_layout ( figure_list )
# catch error is _ plot function doens ' t take optional data argument
try :
last_script . _plot ( axes_list , self . data )
except TypeError as err :
print ( ( warnings . warn ( 'can\'t plot average script data because script.plot function doens\'t take data as optional argument. Plotting last data set instead' ) ) )
print ( ( err . message ) )
last_script . plot ( figure_list )
|
def bots_get ( self , bot ) :
"""Fetch and fill Bot object
: param bot : empty bot object with name to search
: type bot : Bot
: rtype : Bot
: return : filled bot object"""
|
data = self . client . bots . __getattr__ ( bot . name ) . __call__ ( )
return Bot ( data )
|
def get_all ( self , ** kwargs ) :
"""Get all keys currently stored in etcd .
: param keys _ only : if True , retrieve only the keys , not the values
: returns : sequence of ( value , metadata ) tuples"""
|
range_response = self . get_all_response ( ** kwargs )
for kv in range_response . kvs :
yield ( kv . value , KVMetadata ( kv , range_response . header ) )
|
def start ( self ) -> None :
"""Start the internal control loop . Potentially blocking , depending on
the value of ` _ run _ control _ loop ` set by the initializer ."""
|
self . _setup ( )
if self . _run_control_loop :
asyncio . set_event_loop ( asyncio . new_event_loop ( ) )
self . _heartbeat_reciever . start ( )
self . _logger . info ( ' Start Loop' )
return self . loop . start ( )
else :
self . _logger . debug ( ' run_control_loop == False' )
|
def make_timebar ( progress = 0 , duration = 0 ) :
"""Makes a new time bar string
Args :
progress : How far through the current song we are ( in seconds )
duration : The duration of the current song ( in seconds )
Returns :
timebar ( str ) : The time bar string"""
|
duration_string = api_music . duration_to_string ( duration )
if duration <= 0 :
return "---"
time_counts = int ( round ( ( progress / duration ) * TIMEBAR_LENGTH ) )
if time_counts > TIMEBAR_LENGTH :
time_counts = TIMEBAR_LENGTH
if duration > 0 :
bar = "│" + ( TIMEBAR_PCHAR * time_counts ) + ( TIMEBAR_ECHAR * ( TIMEBAR_LENGTH - time_counts ) ) + "│"
time_bar = "{} {}" . format ( bar , duration_string )
else :
time_bar = duration_string
return time_bar
|
def _init_norm ( self , weights ) :
"""Set the norm of the weight vector ."""
|
with tf . variable_scope ( "init_norm" ) :
flat = tf . reshape ( weights , [ - 1 , self . layer_depth ] )
return tf . reshape ( tf . norm ( flat , axis = 0 ) , ( self . layer_depth , ) )
|
def remove_cpds ( self , * cpds ) :
"""Removes the cpds that are provided in the argument .
Parameters
* cpds : list , set , tuple ( array - like )
List of CPDs which are to be associated with the model . Each CPD
should be an instance of ` TabularCPD ` .
Examples
> > > from pgmpy . models import DynamicBayesianNetwork as DBN
> > > from pgmpy . factors . discrete import TabularCPD
> > > dbn = DBN ( )
> > > dbn . add _ edges _ from ( [ ( ( ' D ' , 0 ) , ( ' G ' , 0 ) ) , ( ( ' I ' , 0 ) , ( ' G ' , 0 ) ) , ( ( ' D ' , 0 ) , ( ' D ' , 1 ) ) , ( ( ' I ' , 0 ) , ( ' I ' , 1 ) ) ] )
> > > grade _ cpd = TabularCPD ( ( ' G ' , 0 ) , 3 , [ [ 0.3,0.05,0.9,0.5 ] ,
. . . [ 0.4,0.25,0.8,0.03 ] ,
. . . [ 0.3,0.7,0.02,0.2 ] ] , [ ( ' I ' , 0 ) , ( ' D ' , 0 ) ] , [ 2,2 ] )
> > > dbn . add _ cpds ( grade _ cpd )
> > > dbn . get _ cpds ( )
[ < TabularCPD representing P ( ( ' G ' , 0 ) : 3 | ( ' I ' , 0 ) : 2 , ( ' D ' , 0 ) : 2 ) at 0x3348ab0 > ]
> > > dbn . remove _ cpds ( grade _ cpd )
> > > dbn . get _ cpds ( )"""
|
for cpd in cpds :
if isinstance ( cpd , ( tuple , list ) ) :
cpd = self . get_cpds ( cpd )
self . cpds . remove ( cpd )
|
def add_basic_block ( self , basic_block ) :
"""Adds the given basic block in the function"""
|
assert ( isinstance ( basic_block , BasicBlock ) )
self . basic_block_list . append ( basic_block )
|
def _from_string ( cls , serialized ) :
"""Requests CourseLocator to deserialize its part and then adds the local deserialization of block"""
|
# Allow access to _ from _ string protected method
course_key = CourseLocator . _from_string ( serialized )
# pylint : disable = protected - access
parsed_parts = cls . parse_url ( serialized )
block_id = parsed_parts . get ( 'block_id' , None )
if block_id is None :
raise InvalidKeyError ( cls , serialized )
return cls ( course_key , parsed_parts . get ( 'block_type' ) , block_id )
|
def update_branding ( self , branding_id , params ) :
"""Update a existing branding
@ branding _ id : Id of the branding to update
@ params : Same params as method create _ branding , see above
@ return : A dict with updated branding data"""
|
connection = Connection ( self . token )
connection . add_header ( 'Content-Type' , 'application/json' )
connection . set_url ( self . production , self . BRANDINGS_ID_URL % branding_id )
connection . add_params ( params )
return connection . patch_request ( )
|
async def find_deleted ( self , seq_set : SequenceSet , selected : SelectedMailbox ) -> Sequence [ int ] :
"""Return all the active message UIDs that have the ` ` \\ Deleted ` ` flag .
Args :
seq _ set : The sequence set of the possible messages .
selected : The selected mailbox session ."""
|
session_flags = selected . session_flags
return [ msg . uid async for _ , msg in self . find ( seq_set , selected ) if Deleted in msg . get_flags ( session_flags ) ]
|
def stop ( self ) :
"""Stops the service ."""
|
if self . log_file != PIPE and not ( self . log_file == DEVNULL and _HAS_NATIVE_DEVNULL ) :
try :
self . log_file . close ( )
except Exception :
pass
if self . process is None :
return
try :
self . send_remote_shutdown_command ( )
except TypeError :
pass
try :
if self . process :
for stream in [ self . process . stdin , self . process . stdout , self . process . stderr ] :
try :
stream . close ( )
except AttributeError :
pass
self . process . terminate ( )
self . process . wait ( )
self . process . kill ( )
self . process = None
except OSError :
pass
|
def add_item ( self , path , name , icon = None , url = None , order = None , permission = None , active_regex = None ) :
"""Add new menu item to menu
: param path : Path of menu
: param name : Display name
: param icon : CSS icon
: param url : link to page
: param order : Sort order
: param permission :
: return :"""
|
if self . root_item is None :
self . root_item = MenuItem ( 'ROOT' , 'ROOT' )
root_item = self . root_item
current_path = ''
for node in path . split ( '/' ) [ : - 1 ] :
if not node :
continue
current_path = '/' + '{}/{}' . format ( current_path , node ) . strip ( '/' )
new_root = root_item . child_by_code ( node )
if not new_root : # Create menu item if not exists
new_root = MenuItem ( current_path , name = str ( node ) . capitalize ( ) )
root_item . add_child ( new_root )
root_item = new_root
new_item = MenuItem ( path , name , icon , url , order , permission , active_regex )
current_item = root_item . child_by_code ( path . split ( '/' ) [ - 1 ] )
if current_item :
current_item . merge ( new_item )
else :
root_item . add_child ( new_item )
|
def login ( self , username , password ) :
"""Login to the TS3 Server
@ param username : Username
@ type username : str
@ param password : Password
@ type password : str"""
|
d = self . send_command ( 'login' , keys = { 'client_login_name' : username , 'client_login_password' : password } )
if d == 0 :
self . _log . info ( 'Login Successful' )
return True
return False
|
def _get_user ( self , user : Union [ User , str ] ) -> User :
"""Creates an User from an user _ id , if none , or fetch a cached User
As all users are supposed to be in discovery room , its members dict is used for caching"""
|
user_id : str = getattr ( user , 'user_id' , user )
discovery_room = self . _global_rooms . get ( make_room_alias ( self . network_id , DISCOVERY_DEFAULT_ROOM ) , )
if discovery_room and user_id in discovery_room . _members :
duser = discovery_room . _members [ user_id ]
# if handed a User instance with displayname set , update the discovery room cache
if getattr ( user , 'displayname' , None ) :
assert isinstance ( user , User )
duser . displayname = user . displayname
user = duser
elif not isinstance ( user , User ) :
user = self . _client . get_user ( user_id )
return user
|
def _exec_loop ( self , a , bd_all , xyz , mask , n_withdrifts , spec_drift_grids ) :
"""Solves the kriging system by looping over all specified points .
Less memory - intensive , but involves a Python - level loop ."""
|
npt = bd_all . shape [ 0 ]
n = self . X_ADJUSTED . shape [ 0 ]
kvalues = np . zeros ( npt )
sigmasq = np . zeros ( npt )
a_inv = scipy . linalg . inv ( a )
for j in np . nonzero ( ~ mask ) [ 0 ] : # Note that this is the same thing as range ( npt ) if mask is not defined ,
bd = bd_all [ j ]
# otherwise it takes the non - masked elements .
if np . any ( np . absolute ( bd ) <= self . eps ) :
zero_value = True
zero_index = np . where ( np . absolute ( bd ) <= self . eps )
else :
zero_value = False
zero_index = None
if self . UNBIAS :
b = np . zeros ( ( n_withdrifts + 1 , 1 ) )
else :
b = np . zeros ( ( n_withdrifts , 1 ) )
b [ : n , 0 ] = - self . variogram_function ( self . variogram_model_parameters , bd )
if zero_value :
b [ zero_index [ 0 ] , 0 ] = 0.0
i = n
if self . regional_linear_drift :
b [ i , 0 ] = xyz [ j , 2 ]
i += 1
b [ i , 0 ] = xyz [ j , 1 ]
i += 1
b [ i , 0 ] = xyz [ j , 0 ]
i += 1
if self . specified_drift :
for spec_vals in spec_drift_grids :
b [ i , 0 ] = spec_vals . flatten ( ) [ i ]
i += 1
if self . functional_drift :
for func in self . functional_drift_terms :
b [ i , 0 ] = func ( xyz [ j , 2 ] , xyz [ j , 1 ] , xyz [ j , 0 ] )
i += 1
if i != n_withdrifts :
warnings . warn ( "Error in setting up kriging system. " "Kriging may fail." , RuntimeWarning )
if self . UNBIAS :
b [ n_withdrifts , 0 ] = 1.0
x = np . dot ( a_inv , b )
kvalues [ j ] = np . sum ( x [ : n , 0 ] * self . VALUES )
sigmasq [ j ] = np . sum ( x [ : , 0 ] * - b [ : , 0 ] )
return kvalues , sigmasq
|
def bytes2human ( n : Union [ int , float ] , format : str = '%(value).1f %(symbol)s' , symbols : str = 'customary' ) -> str :
"""Converts a number of bytes into a human - readable format .
From http : / / code . activestate . com / recipes / 578019 - bytes - to - human - human - to - bytes - converter / .
Args :
n : number of bytes
format : a format specification string
symbols : can be one of ` ` " customary " ` ` , ` ` " customary _ ext " ` ` , ` ` " iec " ` `
or ` ` " iec _ ext " ` ` ; see http : / / goo . gl / kTQMs
Returns :
the formatted number
Examples :
> > > bytes2human ( 0)
'0.0 B '
> > > bytes2human ( 0.9)
'0.0 B '
> > > bytes2human ( 1)
'1.0 B '
> > > bytes2human ( 1.9)
'1.0 B '
> > > bytes2human ( 1024)
'1.0 K '
> > > bytes2human ( 1048576)
'1.0 M '
> > > bytes2human ( 1099511627776127398123789121)
'909.5 Y '
> > > bytes2human ( 9856 , symbols = " customary " )
'9.6 K '
> > > bytes2human ( 9856 , symbols = " customary _ ext " )
'9.6 kilo '
> > > bytes2human ( 9856 , symbols = " iec " )
'9.6 Ki '
> > > bytes2human ( 9856 , symbols = " iec _ ext " )
'9.6 kibi '
> > > bytes2human ( 10000 , " % ( value ) . 1f % ( symbol ) s / sec " )
'9.8 K / sec '
> > > # precision can be adjusted by playing with % f operator
> > > bytes2human ( 10000 , format = " % ( value ) . 5f % ( symbol ) s " )
'9.76562 K '"""
|
# noqa
n = int ( n )
if n < 0 :
raise ValueError ( "n < 0" )
symbols = SYMBOLS [ symbols ]
prefix = { }
for i , s in enumerate ( symbols [ 1 : ] ) :
prefix [ s ] = 1 << ( i + 1 ) * 10
for symbol in reversed ( symbols [ 1 : ] ) :
if n >= prefix [ symbol ] :
value = float ( n ) / prefix [ symbol ]
return format % locals ( )
return format % dict ( symbol = symbols [ 0 ] , value = n )
|
def canonicalization ( self , method , node ) :
"""Canonicalizes a node following the method
: param method : Method identification
: type method : str
: param node : object to canonicalize
: type node : str
: return : Canonicalized node in a String"""
|
if method not in constants . TransformUsageC14NMethod :
raise Exception ( 'Method not allowed: ' + method )
c14n_method = constants . TransformUsageC14NMethod [ method ]
return etree . tostring ( node , method = c14n_method [ 'method' ] , with_comments = c14n_method [ 'comments' ] , exclusive = c14n_method [ 'exclusive' ] )
|
def get_slot ( handler_input , slot_name ) : # type : ( HandlerInput , str ) - > Optional [ Slot ]
"""Return the slot information from intent request .
The method retrieves the slot information
: py : class : ` ask _ sdk _ model . slot . Slot ` from the input intent request
for the given ` ` slot _ name ` ` . More information on the slots can be
found here :
https : / / developer . amazon . com / docs / custom - skills / request - types - reference . html # slot - object
If there is no such slot , then a ` ` None ` `
is returned . If the input request is not an
: py : class : ` ask _ sdk _ model . intent _ request . IntentRequest ` , a
: py : class : ` TypeError ` is raised .
: param handler _ input : The handler input instance that is generally
passed in the sdk ' s request and exception components
: type handler _ input : ask _ sdk _ core . handler _ input . HandlerInput
: param slot _ name : Name of the slot that needs to be retrieved
: type slot _ name : str
: return : Slot information for the provided slot name if it exists ,
or a ` None ` value
: rtype : Optional [ ask _ sdk _ model . slot . Slot ]
: raises : TypeError if the input is not an IntentRequest"""
|
request = handler_input . request_envelope . request
if isinstance ( request , IntentRequest ) :
if request . intent . slots is not None :
return request . intent . slots . get ( slot_name , None )
else :
return None
raise TypeError ( "The provided request is not an IntentRequest" )
|
def add_reverse_arcs ( graph , capac = None ) :
"""Utility function for flow algorithms that need for every arc ( u , v ) ,
the existence of an ( v , u ) arc , by default with zero capacity .
graph can be in adjacency list , possibly with capacity matrix capac .
or graph can be in adjacency dictionary , then capac parameter is ignored .
: param capac : arc capacity matrix
: param graph : in listlist representation , or in listdict representation , in this case capac is ignored
: complexity : linear
: returns : nothing , but graph is modified"""
|
for u in range ( len ( graph ) ) :
for v in graph [ u ] :
if u not in graph [ v ] :
if type ( graph [ v ] ) is list :
graph [ v ] . append ( u )
if capac :
capac [ v ] [ u ] = 0
else :
assert type ( graph [ v ] ) is dict
graph [ v ] [ u ] = 0
|
def _take_screenshot ( self , screenshot = False , name_prefix = 'unknown' ) :
"""This is different from _ save _ screenshot .
The return value maybe None or the screenshot path
Args :
screenshot : bool or PIL image"""
|
if isinstance ( screenshot , bool ) :
if not screenshot :
return
return self . _save_screenshot ( name_prefix = name_prefix )
if isinstance ( screenshot , Image . Image ) :
return self . _save_screenshot ( screen = screenshot , name_prefix = name_prefix )
raise TypeError ( "invalid type for func _take_screenshot: " + type ( screenshot ) )
|
def edges2nodes ( edges ) :
"""gather the nodes from the edges"""
|
nodes = [ ]
for e1 , e2 in edges :
nodes . append ( e1 )
nodes . append ( e2 )
nodedict = dict ( [ ( n , None ) for n in nodes ] )
justnodes = list ( nodedict . keys ( ) )
# justnodes . sort ( )
justnodes = sorted ( justnodes , key = lambda x : str ( x [ 0 ] ) )
return justnodes
|
def _chooseBestSegmentPerCell ( cls , connections , cells , allMatchingSegments , potentialOverlaps ) :
"""For each specified cell , choose its matching segment with largest number
of active potential synapses . When there ' s a tie , the first segment wins .
@ param connections ( SparseMatrixConnections )
@ param cells ( numpy array )
@ param allMatchingSegments ( numpy array )
@ param potentialOverlaps ( numpy array )
@ return ( numpy array )
One segment per cell"""
|
candidateSegments = connections . filterSegmentsByCell ( allMatchingSegments , cells )
# Narrow it down to one pair per cell .
onePerCellFilter = np2 . argmaxMulti ( potentialOverlaps [ candidateSegments ] , connections . mapSegmentsToCells ( candidateSegments ) )
learningSegments = candidateSegments [ onePerCellFilter ]
return learningSegments
|
def describe ( self , pid , vendorSpecific = None ) :
"""Note : If the server returns a status code other than 200 OK , a ServiceFailure
will be raised , as this method is based on a HEAD request , which cannot carry
exception information ."""
|
response = self . describeResponse ( pid , vendorSpecific = vendorSpecific )
return self . _read_header_response ( response )
|
def capture_moves ( self , position ) :
"""Finds out all possible capture moves
: rtype : list"""
|
try :
right_diagonal = self . square_in_front ( self . location . shift_right ( ) )
for move in self . _one_diagonal_capture_square ( right_diagonal , position ) :
yield move
except IndexError :
pass
try :
left_diagonal = self . square_in_front ( self . location . shift_left ( ) )
for move in self . _one_diagonal_capture_square ( left_diagonal , position ) :
yield move
except IndexError :
pass
|
def read_input_file ( filename , sep = '\t' , noquote = False ) :
"""Reads a given inputfile ( tab delimited ) and returns a matrix
( list of list ) .
arg : filename , the complete path to the inputfile to read"""
|
output = [ ]
stream = None
try :
stream = open ( filename , 'r' )
for row in stream :
row = row . strip ( )
if noquote :
row = row . replace ( '"' , '' )
output . append ( row . split ( sep ) )
except IOError as err : # pragma : no cover
LOG . info ( "Something wrong happend while reading the file %s " % filename )
LOG . debug ( "ERROR: %s" % err )
finally :
if stream :
stream . close ( )
return output
|
def rotateCD ( self , orient ) :
"""Rotates WCS CD matrix to new orientation given by ' orient '"""
|
# Determine where member CRVAL position falls in ref frame
# Find out whether this needs to be rotated to align with
# reference frame .
_delta = self . get_orient ( ) - orient
if _delta == 0. :
return
# Start by building the rotation matrix . . .
_rot = fileutil . buildRotMatrix ( _delta )
# . . . then , rotate the CD matrix and update the values . . .
_cd = N . array ( [ [ self . cd11 , self . cd12 ] , [ self . cd21 , self . cd22 ] ] , dtype = N . float64 )
_cdrot = N . dot ( _cd , _rot )
self . cd11 = _cdrot [ 0 ] [ 0 ]
self . cd12 = _cdrot [ 0 ] [ 1 ]
self . cd21 = _cdrot [ 1 ] [ 0 ]
self . cd22 = _cdrot [ 1 ] [ 1 ]
self . orient = orient
|
def get_grist ( value ) :
"""Returns the grist of a string .
If value is a sequence , does it for every value and returns the result as a sequence ."""
|
assert is_iterable_typed ( value , basestring ) or isinstance ( value , basestring )
def get_grist_one ( name ) :
split = __re_grist_and_value . match ( name )
if not split :
return ''
else :
return split . group ( 1 )
if isinstance ( value , str ) :
return get_grist_one ( value )
else :
return [ get_grist_one ( v ) for v in value ]
|
import math
def calculate_standard_deviation ( inputs ) :
"""Function to determine the standard deviation of a list of numbers .
The standard deviation is calculated as the square root of the average of
squared deviations from the mean .
Examples :
> > > calculate _ standard _ deviation ( [ 4 , 2 , 5 , 8 , 6 ] )
2.23606797749979
> > > calculate _ standard _ deviation ( [ 1 , 2 , 3 , 4 , 5 , 6 , 7 ] )
2.160246899469287
> > > calculate _ standard _ deviation ( [ 5 , 9 , 10 , 15 , 6 , 4 ] )
4.070217029430577
Args :
inputs : A list of numbers .
Returns :
Standard deviation of the numbers in the list ."""
|
count_numbers = len ( inputs )
if count_numbers <= 1 :
return 0.0
mean = sum ( inputs ) / count_numbers
squared_deviations = [ ( x - mean ) ** 2 for x in inputs ]
variance = sum ( squared_deviations ) / ( count_numbers - 1 )
return math . sqrt ( variance )
|
def get ( self , * args , ** kwargs ) :
"""Get an element from the iterable by an arg or kwarg .
Args can be a single positional argument that is an index
value to retrieve . If the specified index is out of range ,
None is returned . Otherwise use kwargs to provide a key / value .
The key is expected to be a valid attribute of the iterated class .
For example , to get an element that has a attribute name of ' foo ' ,
pass name = ' foo ' .
: raises ValueError : An argument was missing
: return : the specified item , type is based on what is
returned by this iterable , may be None"""
|
if self :
if args :
index = args [ 0 ]
if index <= len ( self ) - 1 :
return self [ args [ 0 ] ]
return None
elif kwargs :
key , value = kwargs . popitem ( )
for item in self . items :
if getattr ( item , key , None ) == value :
return item
else :
raise ValueError ( 'Missing argument. You must provide an ' 'arg or kwarg to fetch an element from the collection.' )
|
def save ( filepath , makedirs = True , title = u'IPyVolume Widget' , all_states = False , offline = False , scripts_path = 'js' , drop_defaults = False , template_options = ( ( "extra_script_head" , "" ) , ( "body_pre" , "" ) , ( "body_post" , "" ) ) , devmode = False , offline_cors = False , ) :
"""Save the current container to a HTML file .
By default the HTML file is not standalone and requires an internet connection to fetch a few javascript
libraries . Use offline = True to download these and make the HTML file work without an internet connection .
: param str filepath : The file to write the HTML output to .
: param bool makedirs : whether to make directories in the filename path , if they do not already exist
: param str title : title for the html page
: param bool all _ states : if True , the state of all widgets know to the widget manager is included , else only those in widgets
: param bool offline : if True , use local urls for required js / css packages and download all js / css required packages
( if not already available ) , such that the html can be viewed with no internet connection
: param str scripts _ path : the folder to save required js / css packages to ( relative to the filepath )
: param bool drop _ defaults : Whether to drop default values from the widget states
: param template _ options : list or dict of additional template options
: param bool devmode : if True , attempt to get index . js from local js / dist folder
: param bool offline _ cors : if True , sets crossorigin attribute of script tags to anonymous"""
|
ipyvolume . embed . embed_html ( filepath , current . container , makedirs = makedirs , title = title , all_states = all_states , offline = offline , scripts_path = scripts_path , drop_defaults = drop_defaults , template_options = template_options , devmode = devmode , offline_cors = offline_cors , )
|
def set_face_values ( self , front_face_value , side_face_value , top_face_value ) :
"""stub"""
|
if front_face_value is None or side_face_value is None or top_face_value is None :
raise NullArgument ( )
self . add_integer_value ( value = int ( front_face_value ) , label = 'frontFaceValue' )
self . add_integer_value ( value = int ( side_face_value ) , label = 'sideFaceValue' )
self . add_integer_value ( value = int ( top_face_value ) , label = 'topFaceValue' )
|
def _process_json ( data ) :
"""return a list of GradPetition objects ."""
|
requests = [ ]
for item in data :
petition = GradPetition ( )
petition . description = item . get ( 'description' )
petition . submit_date = parse_datetime ( item . get ( 'submitDate' ) )
petition . decision_date = parse_datetime ( item . get ( 'decisionDate' ) )
if item . get ( 'deptRecommend' ) and len ( item . get ( 'deptRecommend' ) ) :
petition . dept_recommend = item . get ( 'deptRecommend' ) . lower ( )
if item . get ( 'gradSchoolDecision' ) and len ( item . get ( 'gradSchoolDecision' ) ) :
petition . gradschool_decision = item . get ( 'gradSchoolDecision' ) . lower ( )
requests . append ( petition )
return requests
|
def get_requirements ( * args ) :
"""Get requirements from pip requirement files ."""
|
requirements = set ( )
with open ( get_absolute_path ( * args ) ) as handle :
for line in handle : # Strip comments .
line = re . sub ( r'^#.*|\s#.*' , '' , line )
# Ignore empty lines
if line and not line . isspace ( ) :
requirements . add ( re . sub ( r'\s+' , '' , line ) )
return sorted ( requirements )
|
def _to_dict ( self ) :
"""Return a json dictionary representing this model ."""
|
_dict = { }
if hasattr ( self , 'normalized_text' ) and self . normalized_text is not None :
_dict [ 'normalized_text' ] = self . normalized_text
if hasattr ( self , 'start_time' ) and self . start_time is not None :
_dict [ 'start_time' ] = self . start_time
if hasattr ( self , 'end_time' ) and self . end_time is not None :
_dict [ 'end_time' ] = self . end_time
if hasattr ( self , 'confidence' ) and self . confidence is not None :
_dict [ 'confidence' ] = self . confidence
return _dict
|
def idx_sequence ( self ) :
"""Indices of sentences when enumerating data set from batches .
Useful when retrieving the correct order of sentences
Returns
list
List of ids ranging from 0 to # sent - 1"""
|
return [ x [ 1 ] for x in sorted ( zip ( self . _record , list ( range ( len ( self . _record ) ) ) ) ) ]
|
def _measure ( self , weighted ) :
"""_ BaseMeasure subclass representing primary measure for this cube .
If the cube response includes a means measure , the return value is
means . Otherwise it is counts , with the choice between weighted or
unweighted determined by * weighted * .
Note that weighted counts are provided on an " as - available " basis .
When * weighted * is True and the cube response is not weighted ,
unweighted counts are returned ."""
|
return ( self . _measures . means if self . _measures . means is not None else self . _measures . weighted_counts if weighted else self . _measures . unweighted_counts )
|
def script_exists ( self , digest , * digests ) :
"""Check existence of scripts in the script cache ."""
|
return self . execute ( b'SCRIPT' , b'EXISTS' , digest , * digests )
|
def getLCDType ( self ) :
"""Returns LCD type as a string , either monochrome or rgb"""
|
command = '$GE'
settings = self . sendCommand ( command )
flags = int ( settings [ 2 ] , 16 )
if flags & 0x0100 :
lcdtype = 'monochrome'
else :
lcdtype = 'rgb'
return lcdtype
|
def p_preprocessor_line_line ( p ) :
"""preproc _ line : _ LINE INTEGER"""
|
p . lexer . lineno = int ( p [ 2 ] ) + p . lexer . lineno - p . lineno ( 2 )
|
def upsert ( cls , name , ** fields ) :
"""Insert or update an instance"""
|
instance = cls . get ( name )
if instance :
instance . _set_fields ( fields )
else :
instance = cls ( name = name , ** fields )
instance = failsafe_add ( cls . query . session , instance , name = name )
return instance
|
def setup_multiifo_interval_coinc_inj ( workflow , hdfbank , full_data_trig_files , inj_trig_files , stat_files , background_file , veto_file , veto_name , out_dir , pivot_ifo , fixed_ifo , tags = None ) :
"""This function sets up exact match multiifo coincidence for injections"""
|
if tags is None :
tags = [ ]
make_analysis_dir ( out_dir )
logging . info ( 'Setting up coincidence for injections' )
if len ( hdfbank ) != 1 :
raise ValueError ( 'Must use exactly 1 bank file for this coincidence ' 'method, I got %i !' % len ( hdfbank ) )
hdfbank = hdfbank [ 0 ]
# Wall time knob and memory knob
factor = int ( workflow . cp . get_opt_tags ( 'workflow-coincidence' , 'parallelization-factor' , tags ) )
ffiles = { }
ifiles = { }
for ifo , ffi in zip ( * full_data_trig_files . categorize_by_attr ( 'ifo' ) ) :
ffiles [ ifo ] = ffi [ 0 ]
for ifo , ifi in zip ( * inj_trig_files . categorize_by_attr ( 'ifo' ) ) :
ifiles [ ifo ] = ifi [ 0 ]
injinj_files = FileList ( )
injfull_files = FileList ( )
fullinj_files = FileList ( )
# For the injfull and fullinj separation we take the pivot _ ifo on one side ,
# and the rest that are attached to the fixed _ ifo on the other side
for ifo in ifiles : # ifiles is keyed on ifo
if ifo == pivot_ifo :
injinj_files . append ( ifiles [ ifo ] )
injfull_files . append ( ifiles [ ifo ] )
fullinj_files . append ( ffiles [ ifo ] )
else :
injinj_files . append ( ifiles [ ifo ] )
injfull_files . append ( ffiles [ ifo ] )
fullinj_files . append ( ifiles [ ifo ] )
combo = [ ( injinj_files , "injinj" ) , ( injfull_files , "injfull" ) , ( fullinj_files , "fullinj" ) , ]
bg_files = { 'injinj' : [ ] , 'injfull' : [ ] , 'fullinj' : [ ] }
for trig_files , ctag in combo :
findcoinc_exe = PyCBCFindMultiifoCoincExecutable ( workflow . cp , 'multiifo_coinc' , ifos = ifiles . keys ( ) , tags = tags + [ ctag ] , out_dir = out_dir )
for i in range ( factor ) :
group_str = '%s/%s' % ( i , factor )
coinc_node = findcoinc_exe . create_node ( trig_files , hdfbank , stat_files , veto_file , veto_name , group_str , pivot_ifo , fixed_ifo , tags = [ veto_name , str ( i ) ] )
bg_files [ ctag ] += coinc_node . output_files
workflow . add_node ( coinc_node )
logging . info ( '...leaving coincidence for injections' )
return setup_multiifo_statmap_inj ( workflow , ifiles . keys ( ) , bg_files , background_file , out_dir , tags = tags + [ veto_name ] )
|
def _randomized_hands ( ) :
''': return : 4 hands , obtained by shuffling the 28 dominoes used in
this variation of the game , and distributing them evenly'''
|
all_dominoes = [ dominoes . Domino ( i , j ) for i in range ( 7 ) for j in range ( i , 7 ) ]
random . shuffle ( all_dominoes )
return [ dominoes . Hand ( all_dominoes [ 0 : 7 ] ) , dominoes . Hand ( all_dominoes [ 7 : 14 ] ) , dominoes . Hand ( all_dominoes [ 14 : 21 ] ) , dominoes . Hand ( all_dominoes [ 21 : 28 ] ) ]
|
def get_completions ( self , candidates ) :
"""Given an iterable collection of block _ keys in the course , returns a
mapping of the block _ keys to the present completion values of their
associated blocks .
If a completion is not found for a given block in the current course ,
0.0 is returned . The service does not attempt to verify that the block
exists within the course .
Parameters :
candidates : collection of BlockKeys within the current course .
Note : Usage keys may not have the course run filled in for old mongo courses .
This method checks for completion records against a set of BlockKey candidates with the course run
filled in from self . _ course _ key .
Return value :
dict [ BlockKey ] - > float : Mapping blocks to their completion value ."""
|
queryset = BlockCompletion . user_course_completion_queryset ( self . _user , self . _course_key ) . filter ( block_key__in = candidates )
completions = BlockCompletion . completion_by_block_key ( queryset )
candidates_with_runs = [ candidate . replace ( course_key = self . _course_key ) for candidate in candidates ]
for candidate in candidates_with_runs :
if candidate not in completions :
completions [ candidate ] = 0.0
return completions
|
def set_n ( self , value ) :
'''setter'''
|
if isinstance ( value , int ) :
self . __n = value
else :
raise TypeError ( "The type of n must be int." )
|
def get_mapping ( session , table , candidates , generator , key_map ) :
"""Generate map of keys and values for the candidate from the generator .
: param session : The database session .
: param table : The table we will be inserting into ( i . e . Feature or Label ) .
: param candidates : The candidates to get mappings for .
: param generator : A generator yielding ( candidate _ id , key , value ) tuples .
: param key _ map : A mutable dict which values will be added to as { key :
[ relations ] } .
: type key _ map : Dict
: return : Generator of dictionaries of { " candidate _ id " : _ , " keys " : _ , " values " : _ }
: rtype : generator of dict"""
|
for cand in candidates : # Grab the old values currently in the DB
try :
temp = session . query ( table ) . filter ( table . candidate_id == cand . id ) . one ( )
cand_map = dict ( zip ( temp . keys , temp . values ) )
except NoResultFound :
cand_map = { }
map_args = { "candidate_id" : cand . id }
for cid , key , value in generator ( cand ) :
if value == 0 :
continue
cand_map [ key ] = value
# Assemble label arguments
map_args [ "keys" ] = [ * cand_map . keys ( ) ]
map_args [ "values" ] = [ * cand_map . values ( ) ]
# Update key _ map by adding the candidate class for each key
for key in map_args [ "keys" ] :
try :
key_map [ key ] . add ( cand . __class__ . __tablename__ )
except KeyError :
key_map [ key ] = { cand . __class__ . __tablename__ }
yield map_args
|
def check_key ( self , key , key_extra_len = 0 ) :
"""Checks sanity of key . Fails if :
Key length is > SERVER _ MAX _ KEY _ LENGTH ( Raises MemcachedKeyLength ) .
Contains control characters ( Raises MemcachedKeyCharacterError ) .
Is not a string ( Raises MemcachedStringEncodingError )
Is an unicode string ( Raises MemcachedStringEncodingError )
Is not a string ( Raises MemcachedKeyError )
Is None ( Raises MemcachedKeyError )"""
|
if isinstance ( key , tuple ) :
key = key [ 1 ]
if not key :
raise Client . MemcachedKeyNoneError ( "Key is None" )
if isinstance ( key , unicode ) :
raise Client . MemcachedStringEncodingError ( "Keys must be str()'s, not unicode. Convert your unicode " "strings using mystring.encode(charset)!" )
if not isinstance ( key , str ) :
raise Client . MemcachedKeyTypeError ( "Key must be str()'s" )
if isinstance ( key , basestring ) :
if self . server_max_key_length != 0 and len ( key ) + key_extra_len > self . server_max_key_length :
raise Client . MemcachedKeyLengthError ( "Key length is > %s" % self . server_max_key_length )
if not valid_key_chars_re . match ( key ) :
raise Client . MemcachedKeyCharacterError ( "Control characters not allowed" )
|
def write ( url , content , ** args ) :
"""Put the object / collection into a file URL ."""
|
with HTTPResource ( url , ** args ) as resource :
resource . write ( content )
|
def construct_headline ( need_data , app ) :
"""Constructs the node - structure for the headline / title container
: param need _ data : need _ info container
: return : node"""
|
# need title calculation
title_type = '{}: ' . format ( need_data [ "type_name" ] )
title_headline = need_data [ "title" ]
title_id = "{}" . format ( need_data [ "id" ] )
title_spacer = " "
# need title
node_type = nodes . inline ( title_type , title_type , classes = [ "needs-type" ] )
node_title = nodes . inline ( title_headline , title_headline , classes = [ "needs-title" ] )
nodes_id = nodes . inline ( classes = [ "needs-id" ] )
nodes_id_text = nodes . Text ( title_id , title_id )
id_ref = make_refnode ( app . builder , fromdocname = need_data [ 'docname' ] , todocname = need_data [ 'docname' ] , targetid = need_data [ 'id' ] , child = nodes_id_text . deepcopy ( ) , title = title_id )
nodes_id += id_ref
node_spacer = nodes . inline ( title_spacer , title_spacer , classes = [ "needs-spacer" ] )
headline_line = nodes . line ( classes = [ "headline" ] )
headline_line . append ( node_type )
headline_line . append ( node_spacer )
headline_line . append ( node_title )
headline_line . append ( node_spacer )
headline_line . append ( nodes_id )
return headline_line
|
def arrayCast ( source , dtype ) :
"""Casts a NumPy array to the specified datatype , storing the copy
in memory if there is sufficient available space or else using a
memory - mapped temporary file to provide the underlying buffer ."""
|
# Determine the number of bytes required to store the array
requiredBytes = _requiredSize ( source . shape , dtype )
# Determine if there is sufficient available memory
vmem = psutil . virtual_memory ( )
if vmem . available > requiredBytes :
return source . astype ( dtype , subok = False )
else :
dest = arrayFactory ( source . shape , dtype )
np . copyto ( dest , source , casting = 'unsafe' )
return dest
|
def add_service_group ( self , lb_id , allocation = 100 , port = 80 , routing_type = 2 , routing_method = 10 ) :
"""Adds a new service group to the load balancer .
: param int loadbal _ id : The id of the loadbal where the service resides
: param int allocation : percent of connections to allocate toward the
group
: param int port : the port of the service group
: param int routing _ type : the routing type to set on the service group
: param int routing _ method : The routing method to set on the group"""
|
mask = 'virtualServers[serviceGroups[services[groupReferences]]]'
load_balancer = self . lb_svc . getObject ( id = lb_id , mask = mask )
service_template = { 'port' : port , 'allocation' : allocation , 'serviceGroups' : [ { 'routingTypeId' : routing_type , 'routingMethodId' : routing_method } ] }
load_balancer [ 'virtualServers' ] . append ( service_template )
return self . lb_svc . editObject ( load_balancer , id = lb_id )
|
def complete ( self ) :
"""Mark the task complete .
> > > from pytodoist import todoist
> > > user = todoist . login ( ' john . doe @ gmail . com ' , ' password ' )
> > > project = user . get _ project ( ' PyTodoist ' )
> > > task = project . add _ task ( ' Install PyTodoist ' )
> > > task . complete ( )"""
|
args = { 'id' : self . id }
_perform_command ( self . project . owner , 'item_close' , args )
|
def get_balance ( self ) :
"""Retrieves the balance for the configured account"""
|
self . br . open ( self . MOBILE_WEB_URL % { 'accountno' : self . account } )
try : # Search for the existence of the Register link - indicating a new account
self . br . find_link ( text = 'Register' )
raise InvalidAccountException
except mechanize . LinkNotFoundError :
pass
self . br . follow_link ( text = 'My sarafu' )
self . br . follow_link ( text = 'Balance Inquiry' )
self . br . select_form ( nr = 0 )
self . br [ 'pin' ] = self . pin
r = self . br . submit ( ) . read ( )
# Pin valid ?
if re . search ( r'Invalid PIN' , r ) :
raise AuthDeniedException
# An error could occur for other reasons
if re . search ( r'Error occured' , r ) :
raise RequestErrorException
# If it was successful , we extract the balance
if re . search ( r'Your balance is TSH (?P<balance>[\d\.]+)' , r ) :
match = re . search ( r'Your balance is TSH (?P<balance>[\d\.]+)' , r )
return match . group ( 'balance' )
|
def main ( ) :
"""Purge a single fastly url"""
|
parser = OptionParser ( description = "Purge a single url from fastly." )
parser . add_option ( "-k" , "--key" , dest = "apikey" , default = "" , help = "fastly api key" )
parser . add_option ( "-H" , "--host" , dest = "host" , help = "host to purge from" )
parser . add_option ( "-p" , "--path" , dest = "path" , help = "path to purge" )
( options , args ) = parser . parse_args ( )
for val in options . __dict__ . values ( ) :
if val is None :
print "Missing required options"
parser . print_help ( )
sys . exit ( 1 )
client = fastly . connect ( options . apikey )
purge = client . purge_url ( options . host , options . path )
print purge
|
def make_schema_from ( value , env ) :
"""Make a Schema object from the given spec .
The input and output types of this function are super unclear , and are held together by ponies ,
wishes , duct tape , and a load of tests . See the comments for horrific entertainment ."""
|
# So this thing may not need to evaluate anything [ 0]
if isinstance ( value , framework . Thunk ) :
value = framework . eval ( value , env )
# We ' re a bit messy . In general , this has evaluated to a Schema object , but not necessarily :
# for tuples and lists , we still need to treat the objects as specs .
if isinstance ( value , schema . Schema ) :
return value
if framework . is_tuple ( value ) : # If it so happens that the thing is a tuple , we need to pass in the data in a bit of a
# different way into the schema factory ( in a dictionary with { fields , required } keys ) .
return schema_spec_from_tuple ( value )
if framework . is_list ( value ) : # [0 ] This list may contain tuples , which oughta be treated as specs , or already - resolved schema
# objects ( as returned by ' int ' and ' string ' literals ) . make _ schema _ from
# deals with both .
return schema . from_spec ( [ make_schema_from ( x , env ) for x in value ] )
raise exceptions . EvaluationError ( 'Can\'t make a schema from %r' % value )
|
def check_url ( url ) :
"""Check whether the given URL is dead or alive .
Returns a dict with four keys :
" url " : The URL that was checked ( string )
" alive " : Whether the URL was working , True or False
" status " : The HTTP status code of the response from the URL ,
e . g . 200 , 401 , 500 ( int )
" reason " : The reason for the success or failure of the check ,
e . g . " OK " , " Unauthorized " , " Internal Server Error " ( string )
The " status " may be None if we did not get a valid HTTP response ,
e . g . in the event of a timeout , DNS failure or invalid HTTP response .
The " reason " will always be a string , but may be a requests library
exception string rather than an HTTP reason string if we did not get a valid
HTTP response ."""
|
result = { "url" : url }
try :
response = requests . get ( url )
result [ "status" ] = response . status_code
result [ "reason" ] = response . reason
response . raise_for_status ( )
# Raise if status _ code is not OK .
result [ "alive" ] = True
except AttributeError as err :
if err . message == "'NoneType' object has no attribute 'encode'" : # requests seems to throw these for some invalid URLs .
result [ "alive" ] = False
result [ "reason" ] = "Invalid URL"
result [ "status" ] = None
else :
raise
except requests . exceptions . RequestException as err :
result [ "alive" ] = False
if "reason" not in result :
result [ "reason" ] = str ( err )
if "status" not in result : # This can happen if the response is invalid HTTP , if we get a DNS
# failure , or a timeout , etc .
result [ "status" ] = None
# We should always have these four fields in the result .
assert "url" in result
assert result . get ( "alive" ) in ( True , False )
assert "status" in result
assert "reason" in result
return result
|
def clean_super_features ( self ) :
"""Removes any null & non - integer values from the super feature list"""
|
if self . super_features :
self . super_features = [ int ( sf ) for sf in self . super_features if sf is not None and is_valid_digit ( sf ) ]
|
def delete_translations_for_item_and_its_children ( self , item , languages = None ) :
"""deletes the translations task of an item and its children
used when a model is not enabled anymore
: param item :
: param languages :
: return :"""
|
self . log ( '--- Deleting translations ---' )
if not self . master :
self . set_master ( item )
object_name = '{} - {}' . format ( item . _meta . app_label . lower ( ) , item . _meta . verbose_name )
object_class = item . __class__ . __name__
object_pk = item . pk
filter_by = { 'object_class' : object_class , 'object_name' : object_name , 'object_pk' : object_pk , 'done' : False }
if languages :
filter_by . update ( { 'language__code__in' : languages } )
TransTask . objects . filter ( ** filter_by ) . delete ( )
# then process child objects from main
children = self . get_translatable_children ( item )
for child in children :
self . delete_translations_for_item_and_its_children ( child , languages )
|
def remove ( self , indices ) :
"""Remove the fragments corresponding to the given list of indices .
: param indices : the list of indices to be removed
: type indices : list of int
: raises ValueError : if one of the indices is not valid"""
|
if not self . _is_valid_index ( indices ) :
self . log_exc ( u"The given list of indices is not valid" , None , True , ValueError )
new_fragments = [ ]
sorted_indices = sorted ( indices )
i = 0
j = 0
while ( i < len ( self ) ) and ( j < len ( sorted_indices ) ) :
if i != sorted_indices [ j ] :
new_fragments . append ( self [ i ] )
else :
j += 1
i += 1
while i < len ( self ) :
new_fragments . append ( self [ i ] )
i += 1
self . __fragments = new_fragments
|
def unset_config_value ( self , name , quiet = False ) :
"""unset a configuration value
Parameters
name : the name of the value to unset ( remove key in dictionary )
quiet : disable verbose output if True ( default is False )"""
|
config_data = self . _read_config_file ( )
if name in config_data :
del config_data [ name ]
self . _write_config_file ( config_data )
if not quiet :
self . print_config_value ( name , separator = ' is now set to: ' )
|
def setup_proxies_or_exit ( config : Dict [ str , Any ] , tokennetwork_registry_contract_address : str , secret_registry_contract_address : str , endpoint_registry_contract_address : str , user_deposit_contract_address : str , service_registry_contract_address : str , blockchain_service : BlockChainService , contracts : Dict [ str , Any ] , routing_mode : RoutingMode , pathfinding_service_address : str , pathfinding_eth_address : str , ) -> Proxies :
"""Initialize and setup the contract proxies .
Depending on the provided contract addresses via the CLI , the routing mode ,
the environment type and the network id try to initialize the proxies .
Returns the initialized proxies or exits the application with an error if
there is a problem .
Also depending on the given arguments populate config with PFS related settings"""
|
node_network_id = config [ 'chain_id' ]
environment_type = config [ 'environment_type' ]
contract_addresses_given = ( tokennetwork_registry_contract_address is not None and secret_registry_contract_address is not None and endpoint_registry_contract_address is not None )
if not contract_addresses_given and not bool ( contracts ) :
click . secho ( f"There are no known contract addresses for network id '{node_network_id}'. and " f"environment type {environment_type}. Please provide them on the command line or " f"in the configuration file." , fg = 'red' , )
sys . exit ( 1 )
try :
token_network_registry = blockchain_service . token_network_registry ( tokennetwork_registry_contract_address or to_canonical_address ( contracts [ CONTRACT_TOKEN_NETWORK_REGISTRY ] [ 'address' ] , ) , )
except ContractVersionMismatch as e :
handle_contract_version_mismatch ( e )
except AddressWithoutCode :
handle_contract_no_code ( 'token network registry' , tokennetwork_registry_contract_address )
except AddressWrongContract :
handle_contract_wrong_address ( 'token network registry' , tokennetwork_registry_contract_address , )
try :
secret_registry = blockchain_service . secret_registry ( secret_registry_contract_address or to_canonical_address ( contracts [ CONTRACT_SECRET_REGISTRY ] [ 'address' ] , ) , )
except ContractVersionMismatch as e :
handle_contract_version_mismatch ( e )
except AddressWithoutCode :
handle_contract_no_code ( 'secret registry' , secret_registry_contract_address )
except AddressWrongContract :
handle_contract_wrong_address ( 'secret registry' , secret_registry_contract_address )
# If services contracts are provided via the CLI use them instead
if user_deposit_contract_address is not None :
contracts [ CONTRACT_USER_DEPOSIT ] = user_deposit_contract_address
if service_registry_contract_address is not None :
contracts [ CONTRACT_SERVICE_REGISTRY ] = ( service_registry_contract_address )
user_deposit = None
should_use_user_deposit = ( environment_type == Environment . DEVELOPMENT and ID_TO_NETWORKNAME . get ( node_network_id ) != 'smoketest' and CONTRACT_USER_DEPOSIT in contracts )
if should_use_user_deposit :
try :
user_deposit = blockchain_service . user_deposit ( user_deposit_contract_address or to_canonical_address ( contracts [ CONTRACT_USER_DEPOSIT ] [ 'address' ] , ) , )
except ContractVersionMismatch as e :
handle_contract_version_mismatch ( e )
except AddressWithoutCode :
handle_contract_no_code ( 'user deposit' , user_deposit_contract_address )
except AddressWrongContract :
handle_contract_wrong_address ( 'user_deposit' , user_deposit_contract_address )
service_registry = None
if CONTRACT_SERVICE_REGISTRY in contracts or service_registry_contract_address :
try :
service_registry = blockchain_service . service_registry ( service_registry_contract_address or to_canonical_address ( contracts [ CONTRACT_SERVICE_REGISTRY ] [ 'address' ] , ) , )
except ContractVersionMismatch as e :
handle_contract_version_mismatch ( e )
except AddressWithoutCode :
handle_contract_no_code ( 'service registry' , service_registry_contract_address )
except AddressWrongContract :
handle_contract_wrong_address ( 'secret registry' , service_registry_contract_address )
if routing_mode == RoutingMode . PFS :
if environment_type == Environment . PRODUCTION :
click . secho ( 'Requested production mode and PFS routing mode. This is not supported' , fg = 'red' , )
sys . exit ( 1 )
if not service_registry and not pathfinding_service_address :
click . secho ( 'Requested PFS routing mode but no service registry or no specific pathfinding ' ' service address is provided. Please provide it via either the ' '--service-registry-contract-address or the --pathfinding-service-address ' 'argument' , fg = 'red' , )
sys . exit ( 1 )
pfs_config = configure_pfs_or_exit ( pfs_address = pathfinding_service_address , pfs_eth_address = pathfinding_eth_address , routing_mode = routing_mode , service_registry = service_registry , )
msg = 'Eth address of selected pathfinding service is unknown.'
assert pfs_config . eth_address is not None , msg
config [ 'services' ] [ 'pathfinding_service_address' ] = pfs_config . url
config [ 'services' ] [ 'pathfinding_eth_address' ] = pfs_config . eth_address
config [ 'services' ] [ 'pathfinding_fee' ] = pfs_config . fee
else :
config [ 'services' ] [ 'pathfinding_service_address' ] = None
config [ 'services' ] [ 'pathfinding_eth_address' ] = None
proxies = Proxies ( token_network_registry = token_network_registry , secret_registry = secret_registry , user_deposit = user_deposit , service_registry = service_registry , )
return proxies
|
def _copy_scratch_to_state ( args : Dict [ str , Any ] ) :
"""Copes scratch shards to state shards ."""
|
np . copyto ( _state_shard ( args ) , _scratch_shard ( args ) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.