signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def save_method_args ( method ) :
"""Wrap a method such that when it is called , the args and kwargs are
saved on the method .
> > > class MyClass :
. . . @ save _ method _ args
. . . def method ( self , a , b ) :
. . . print ( a , b )
> > > my _ ob = MyClass ( )
> > > my _ ob . method ( 1 , 2)
1 2
> > > my _ ob . _ saved _ method . args
(1 , 2)
> > > my _ ob . _ saved _ method . kwargs
> > > my _ ob . method ( a = 3 , b = ' foo ' )
3 foo
> > > my _ ob . _ saved _ method . args
> > > my _ ob . _ saved _ method . kwargs = = dict ( a = 3 , b = ' foo ' )
True
The arguments are stored on the instance , allowing for
different instance to save different args .
> > > your _ ob = MyClass ( )
> > > your _ ob . method ( { str ( ' x ' ) : 3 } , b = [ 4 ] )
{ ' x ' : 3 } [ 4]
> > > your _ ob . _ saved _ method . args
( { ' x ' : 3 } , )
> > > my _ ob . _ saved _ method . args"""
|
args_and_kwargs = collections . namedtuple ( 'args_and_kwargs' , 'args kwargs' )
@ functools . wraps ( method )
def wrapper ( self , * args , ** kwargs ) :
attr_name = '_saved_' + method . __name__
attr = args_and_kwargs ( args , kwargs )
setattr ( self , attr_name , attr )
return method ( self , * args , ** kwargs )
return wrapper
|
def add_task_group ( self , task_group , project ) :
"""AddTaskGroup .
[ Preview API ] Create a task group .
: param : class : ` < TaskGroupCreateParameter > < azure . devops . v5_0 . task _ agent . models . TaskGroupCreateParameter > ` task _ group : Task group object to create .
: param str project : Project ID or project name
: rtype : : class : ` < TaskGroup > < azure . devops . v5_0 . task _ agent . models . TaskGroup > `"""
|
route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
content = self . _serialize . body ( task_group , 'TaskGroupCreateParameter' )
response = self . _send ( http_method = 'POST' , location_id = '6c08ffbf-dbf1-4f9a-94e5-a1cbd47005e7' , version = '5.0-preview.1' , route_values = route_values , content = content )
return self . _deserialize ( 'TaskGroup' , response )
|
def debug_reduce ( self , rule , tokens , parent , i ) :
"""Customized format and print for our kind of tokens
which gets called in debugging grammar reduce rules"""
|
prefix = ' '
if parent and tokens :
p_token = tokens [ parent ]
if hasattr ( p_token , 'line' ) :
prefix = 'L.%3d.%03d: ' % ( p_token . line , p_token . column )
pass
pass
print ( "%s%s ::= %s" % ( prefix , rule [ 0 ] , ' ' . join ( rule [ 1 ] ) ) )
|
def write ( self , source = None , ** kwargs ) :
'''Wrappe r to call the writer ' s write method if present .
Args :
source ( pandasdmx . model . Message , iterable ) : stuff to be written .
If a : class : ` pandasdmx . model . Message ` is given , the writer
itself must determine what to write unless specified in the
keyword arguments . If an iterable is given ,
the writer should write each item . Keyword arguments may
specify what to do with the output depending on the writer ' s API . Defaults to self . msg .
Returns :
type : anything the writer returns .'''
|
if not source :
source = self . msg
return self . _writer . write ( source = source , ** kwargs )
|
def start ( config , args ) :
"""Start Glances ."""
|
# Load mode
global mode
if core . is_standalone ( ) :
from glances . standalone import GlancesStandalone as GlancesMode
elif core . is_client ( ) :
if core . is_client_browser ( ) :
from glances . client_browser import GlancesClientBrowser as GlancesMode
else :
from glances . client import GlancesClient as GlancesMode
elif core . is_server ( ) :
from glances . server import GlancesServer as GlancesMode
elif core . is_webserver ( ) :
from glances . webserver import GlancesWebServer as GlancesMode
# Init the mode
logger . info ( "Start {} mode" . format ( GlancesMode . __name__ ) )
mode = GlancesMode ( config = config , args = args )
# Start the main loop
mode . serve_forever ( )
# Shutdown
mode . end ( )
|
def _set_config_keystone ( self , username , password ) :
"""Set config to Keystone"""
|
self . _keystone_auth = KeystoneAuth ( settings . KEYSTONE_AUTH_URL , settings . KEYSTONE_PROJECT_NAME , username , password , settings . KEYSTONE_USER_DOMAIN_NAME , settings . KEYSTONE_PROJECT_DOMAIN_NAME , settings . KEYSTONE_TIMEOUT )
|
def get ( self , ldap_dn ) :
'''Return an LDAP entry by DN
Args :
ldap _ dn ( str ) : LDAP DN'''
|
self . base_dn = ldap_dn
self . sub_tree = BASE
return self . first ( )
|
def update_routing_table_from ( self , * routers ) :
"""Try to update routing tables with the given routers .
: return : True if the routing table is successfully updated , otherwise False"""
|
for router in routers :
new_routing_table = self . fetch_routing_table ( router )
if new_routing_table is not None :
self . routing_table . update ( new_routing_table )
return True
return False
|
def run ( self , b , compute , times = [ ] , ** kwargs ) :
"""if within mpirun , workers should call _ run _ worker instead of run"""
|
self . run_checks ( b , compute , times , ** kwargs )
logger . debug ( "rank:{}/{} calling get_packet_and_syns" . format ( mpi . myrank , mpi . nprocs ) )
packet , new_syns = self . get_packet_and_syns ( b , compute , times , ** kwargs )
if mpi . enabled : # broadcast the packet to ALL workers
mpi . comm . bcast ( packet , root = 0 )
# now even the master can become a worker and take on a chunk
packet [ 'b' ] = b
rpacketlists = self . _run_chunk ( ** packet )
# now receive all packetlists
rpacketlists_per_worker = mpi . comm . gather ( rpacketlists , root = 0 )
else :
rpacketlists_per_worker = [ self . _run_chunk ( ** packet ) ]
return self . _fill_syns ( new_syns , rpacketlists_per_worker )
|
def round_down ( x , decimal_places ) :
"""Round a float down to decimal _ places .
Parameters
x : float
decimal _ places : int
Returns
rounded _ float : float
Examples
> > > round _ down ( 1.23456 , 3)
1.234
> > > round _ down ( 1.23456 , 2)
1.23"""
|
from math import floor
d = int ( '1' + ( '0' * decimal_places ) )
return floor ( x * d ) / d
|
def _marshaled_dispatch ( self , data , dispatch_method = None ) :
"""Dispatches an XML - RPC method from marshalled ( XML ) data .
XML - RPC methods are dispatched from the marshalled ( XML ) data
using the _ dispatch method and the result is returned as
marshalled data . For backwards compatibility , a dispatch
function can be provided as an argument ( see comment in
SimpleXMLRPCRequestHandler . do _ POST ) but overriding the
existing method through subclassing is the prefered means
of changing method dispatch behavior ."""
|
try :
params , method = xmlrpclib . loads ( data )
# generate response
if dispatch_method is not None :
response = dispatch_method ( method , params )
else :
response = self . _dispatch ( method , params )
# wrap response in a singleton tuple
response = ( response , )
response = xmlrpclib . dumps ( response , methodresponse = 1 , allow_none = self . allow_none , encoding = self . encoding )
except Fault , fault :
response = xmlrpclib . dumps ( fault , allow_none = self . allow_none , encoding = self . encoding )
except : # report exception back to server
response = xmlrpclib . dumps ( xmlrpclib . Fault ( 1 , "%s:%s" % ( sys . exc_type , sys . exc_value ) ) , # @ UndefinedVariable exc _ value only available when we actually have an exception
encoding = self . encoding , allow_none = self . allow_none , )
return response
|
def format_subpages ( self , page , subpages ) :
"""Banana banana"""
|
if not subpages :
return None
try :
template = self . get_template ( 'subpages.html' )
except IOError :
return None
ret = etree . XML ( template . render ( { 'page' : page , 'subpages' : subpages } ) )
assets = ret . xpath ( './/*[@src]' )
for asset in assets :
self . __lookup_asset ( asset , self . extension . project , page )
return ret
|
def _remove_broken_links ( ) :
'''Remove broken links in ` < conda prefix > / etc / microdrop / plugins / enabled / ` .
Returns
list
List of links removed ( if any ) .'''
|
enabled_dir = MICRODROP_CONDA_PLUGINS . joinpath ( 'enabled' )
if not enabled_dir . isdir ( ) :
return [ ]
broken_links = [ ]
for dir_i in enabled_dir . walkdirs ( errors = 'ignore' ) :
if platform . system ( ) == 'Windows' :
if dir_i . isjunction ( ) and not dir_i . readlink ( ) . isdir ( ) : # Junction / link target no longer exists .
broken_links . append ( dir_i )
else :
raise NotImplementedError ( 'Unsupported platform' )
removed_links = [ ]
for link_i in broken_links :
try :
link_i . unlink ( )
except :
pass
else :
removed_links . append ( link_i )
return removed_links
|
def density ( self , R , z , nsigma = None , mc = False , nmc = 10000 , gl = True , ngl = _DEFAULTNGL , ** kwargs ) :
"""NAME :
density
PURPOSE :
calculate the density at R , z by marginalizing over velocity
INPUT :
R - radius at which to calculate the density ( can be Quantity )
z - height at which to calculate the density ( can be Quantity )
OPTIONAL INPUT :
nsigma - number of sigma to integrate the velocities over
scipy . integrate . tplquad kwargs epsabs and epsrel
mc = if True , calculate using Monte Carlo integration
nmc = if mc , use nmc samples
gl = if True , calculate using Gauss - Legendre integration
ngl = if gl , use ngl - th order Gauss - Legendre integration for each dimension
OUTPUT :
density at ( R , z )
HISTORY :
2012-07-26 - Written - Bovy ( IAS @ MPIA )"""
|
return self . _vmomentdensity ( R , z , 0. , 0. , 0. , nsigma = nsigma , mc = mc , nmc = nmc , gl = gl , ngl = ngl , ** kwargs )
|
def _read_signer ( key_filename ) :
"""Reads the given file as a hex key .
Args :
key _ filename : The filename where the key is stored . If None ,
defaults to the default key for the current user .
Returns :
Signer : the signer
Raises :
CliException : If unable to read the file ."""
|
filename = key_filename
if filename is None :
filename = os . path . join ( os . path . expanduser ( '~' ) , '.sawtooth' , 'keys' , getpass . getuser ( ) + '.priv' )
try :
with open ( filename , 'r' ) as key_file :
signing_key = key_file . read ( ) . strip ( )
except IOError as e :
raise CliException ( 'Unable to read key file: {}' . format ( str ( e ) ) )
try :
private_key = Secp256k1PrivateKey . from_hex ( signing_key )
except ParseError as e :
raise CliException ( 'Unable to read key in file: {}' . format ( str ( e ) ) )
context = create_context ( 'secp256k1' )
crypto_factory = CryptoFactory ( context )
return crypto_factory . new_signer ( private_key )
|
def _run_vardict_caller ( align_bams , items , ref_file , assoc_files , region = None , out_file = None ) :
"""Detect SNPs and indels with VarDict .
var2vcf _ valid uses - A flag which reports all alleles and improves sensitivity :
https : / / github . com / AstraZeneca - NGS / VarDict / issues / 35 # issuecomment - 276738191"""
|
config = items [ 0 ] [ "config" ]
if out_file is None :
out_file = "%s-variants.vcf.gz" % os . path . splitext ( align_bams [ 0 ] ) [ 0 ]
if not utils . file_exists ( out_file ) :
with file_transaction ( items [ 0 ] , out_file ) as tx_out_file :
vrs = bedutils . population_variant_regions ( items )
target = shared . subset_variant_regions ( vrs , region , out_file , items = items , do_merge = False )
num_bams = len ( align_bams )
sample_vcf_names = [ ]
# for individual sample names , given batch calling may be required
for bamfile , item in zip ( align_bams , items ) : # prepare commands
sample = dd . get_sample_name ( item )
vardict = get_vardict_command ( items [ 0 ] )
opts , var2vcf_opts = _vardict_options_from_config ( items , config , out_file , target )
vcfstreamsort = config_utils . get_program ( "vcfstreamsort" , config )
compress_cmd = "| bgzip -c" if tx_out_file . endswith ( "gz" ) else ""
fix_ambig_ref = vcfutils . fix_ambiguous_cl ( )
fix_ambig_alt = vcfutils . fix_ambiguous_cl ( 5 )
remove_dup = vcfutils . remove_dup_cl ( )
py_cl = os . path . join ( utils . get_bcbio_bin ( ) , "py" )
jvm_opts = _get_jvm_opts ( items [ 0 ] , tx_out_file )
setup = ( "%s && unset JAVA_HOME &&" % utils . get_R_exports ( ) )
contig_cl = vcfutils . add_contig_to_header_cl ( ref_file , tx_out_file )
lowfreq_filter = _lowfreq_linear_filter ( 0 , False )
cmd = ( "{setup}{jvm_opts}{vardict} -G {ref_file} " "-N {sample} -b {bamfile} {opts} " "| teststrandbias.R " "| var2vcf_valid.pl -A -N {sample} -E {var2vcf_opts} " "| {contig_cl} | bcftools filter -i 'QUAL >= 0' | {lowfreq_filter} " "| {fix_ambig_ref} | {fix_ambig_alt} | {remove_dup} | {vcfstreamsort} {compress_cmd}" )
if num_bams > 1 :
temp_file_prefix = out_file . replace ( ".gz" , "" ) . replace ( ".vcf" , "" ) + item [ "name" ] [ 1 ]
tmp_out = temp_file_prefix + ".temp.vcf"
tmp_out += ".gz" if out_file . endswith ( "gz" ) else ""
sample_vcf_names . append ( tmp_out )
with file_transaction ( item , tmp_out ) as tx_tmp_file :
if not _is_bed_file ( target ) :
vcfutils . write_empty_vcf ( tx_tmp_file , config , samples = [ sample ] )
else :
cmd += " > {tx_tmp_file}"
do . run ( cmd . format ( ** locals ( ) ) , "Genotyping with VarDict: Inference" , { } )
else :
if not _is_bed_file ( target ) :
vcfutils . write_empty_vcf ( tx_out_file , config , samples = [ sample ] )
else :
cmd += " > {tx_out_file}"
do . run ( cmd . format ( ** locals ( ) ) , "Genotyping with VarDict: Inference" , { } )
if num_bams > 1 : # N . B . merge _ variant _ files wants region in 1 - based end - inclusive
# coordinates . Thus use bamprep . region _ to _ gatk
vcfutils . merge_variant_files ( orig_files = sample_vcf_names , out_file = tx_out_file , ref_file = ref_file , config = config , region = bamprep . region_to_gatk ( region ) )
return out_file
|
def _FormatSocketUnixToken ( self , token_data ) :
"""Formats an Unix socket token as a dictionary of values .
Args :
token _ data ( bsm _ token _ data _ sockunix ) : AUT _ SOCKUNIX token data .
Returns :
dict [ str , str ] : token values ."""
|
protocol = bsmtoken . BSM_PROTOCOLS . get ( token_data . socket_family , 'UNKNOWN' )
return { 'protocols' : protocol , 'family' : token_data . socket_family , 'path' : token_data . socket_path }
|
def comp_listing ( request , directory_slug = None ) :
"""Output the list of HTML templates and subdirectories in the COMPS _ DIR"""
|
context = { }
working_dir = settings . COMPS_DIR
if directory_slug :
working_dir = os . path . join ( working_dir , directory_slug )
dirnames = [ ]
templates = [ ]
items = os . listdir ( working_dir )
templates = [ x for x in items if os . path . splitext ( x ) [ 1 ] == '.html' ]
dirnames = [ x for x in items if not os . path . isfile ( os . path . join ( working_dir , x ) ) ]
templates . sort ( )
dirnames . sort ( )
context [ 'directories' ] = dirnames
context [ 'templates' ] = templates
context [ 'subdirectory' ] = directory_slug
return render ( request , "comps/comp_listing.html" , context )
|
def display_movements_stats ( ct , base_assignment ) :
"""Display how the amount of movement between two assignments .
: param ct : The cluster ' s ClusterTopology .
: param base _ assignment : The cluster assignment to compare against ."""
|
movement_count , movement_size , leader_changes = stats . get_partition_movement_stats ( ct , base_assignment )
print ( 'Total partition movements: {movement_count}\n' 'Total partition movement size: {movement_size}\n' 'Total leader changes: {leader_changes}' . format ( movement_count = movement_count , movement_size = movement_size , leader_changes = leader_changes , ) )
|
def check_image_state ( self , image_id , wait = True ) :
'''method for checking the state of an image on AWS EC2
: param image _ id : string with AWS id of image
: param wait : [ optional ] boolean to wait for image while pending
: return : string reporting state of image'''
|
title = '%s.check_image_state' % self . __class__ . __name__
# validate inputs
input_fields = { 'image_id' : image_id }
for key , value in input_fields . items ( ) :
object_title = '%s(%s=%s)' % ( title , key , str ( value ) )
self . fields . validate ( value , '.%s' % key , object_title )
# notify state check
self . iam . printer ( 'Querying AWS region %s for state of image %s.' % ( self . iam . region_name , image_id ) )
# check connection to API
try :
self . connection . describe_instances ( )
except :
raise AWSConnectionError ( title )
# check existence of image
try :
response = self . connection . describe_images ( ImageIds = [ image_id ] )
except :
raise ValueError ( '\nImage %s does not exist in your permission scope.' % image_id )
if not 'Images' in response . keys ( ) :
raise ValueError ( '\nImage %s does not exist in your permission scope.' % image_id )
elif not response [ 'Images' ] [ 0 ] :
raise ValueError ( '\nImage %s does not exist in your permission scope.' % image_id )
# check into state of image
elif not 'State' in response [ 'Images' ] [ 0 ] . keys ( ) :
from time import sleep
from timeit import timeit as timer
self . iam . printer ( 'Checking into the status of image %s' % image_id , flush = True )
state_timeout = 0
while not 'State' in response [ 'Images' ] [ 0 ] . keys ( ) :
self . iam . printer ( '.' , flush = True )
sleep ( 3 )
state_timeout += 1
response = self . connection . describe_images ( ImageIds = [ image_id ] )
if state_timeout > 3 :
raise Exception ( '\nFailure to determine status of image %s.' % image_id )
self . iam . printer ( ' done.' )
image_state = response [ 'Images' ] [ 0 ] [ 'State' ]
# return None if image has already been deregistered or is invalid
if image_state == 'deregistered' :
self . iam . printer ( 'Image %s has already been deregistered.' % image_id )
return None
elif image_state == 'invalid' or image_state == 'transient' or image_state == 'failed' :
self . iam . printer ( 'Image %s is %s.' % ( image_id , image_state ) )
return None
# wait while image is pending
elif image_state == 'pending' :
self . iam . printer ( 'Image %s is %s.' % ( image_id , image_state ) , flush = True )
if not wait :
return image_state
else :
from time import sleep
from timeit import timeit as timer
delay = 3
state_timeout = 0
while image_state != 'available' :
self . iam . printer ( '.' , flush = True )
sleep ( delay )
t3 = timer ( )
response = self . connection . describe_images ( ImageIds = [ image_id ] )
t4 = timer ( )
state_timeout += 1
response_time = t4 - t3
if 3 - response_time > 0 :
delay = 3 - response_time
else :
delay = 0
if state_timeout > 300 :
raise Exception ( '\nTimeout. Failure initializing image %s on AWS in less than 15min' % image_id )
image_state = response [ 'Images' ] [ 0 ] [ 'State' ]
self . iam . printer ( ' done.' )
# report outcome
self . iam . printer ( 'Image %s is %s.' % ( image_id , image_state ) )
return image_state
|
def mediatype_delete ( mediatypeids , ** kwargs ) :
'''Delete mediatype
: param interfaceids : IDs of the mediatypes to delete
: param _ connection _ user : Optional - zabbix user ( can also be set in opts or pillar , see module ' s docstring )
: param _ connection _ password : Optional - zabbix password ( can also be set in opts or pillar , see module ' s docstring )
: param _ connection _ url : Optional - url of zabbix frontend ( can also be set in opts , pillar , see module ' s docstring )
: return : ID of deleted mediatype , False on failure .
CLI Example :
. . code - block : : bash
salt ' * ' zabbix . mediatype _ delete 3'''
|
conn_args = _login ( ** kwargs )
ret = { }
try :
if conn_args :
method = 'mediatype.delete'
if isinstance ( mediatypeids , list ) :
params = mediatypeids
else :
params = [ mediatypeids ]
ret = _query ( method , params , conn_args [ 'url' ] , conn_args [ 'auth' ] )
return ret [ 'result' ] [ 'mediatypeids' ]
else :
raise KeyError
except KeyError :
return ret
|
def plot_mds ( self , rank = "auto" , metric = "braycurtis" , method = "pcoa" , title = None , xlabel = None , ylabel = None , color = None , size = None , tooltip = None , return_chart = False , label = None , ) :
"""Plot beta diversity distance matrix using multidimensional scaling ( MDS ) .
Parameters
rank : { ' auto ' , ' kingdom ' , ' phylum ' , ' class ' , ' order ' , ' family ' , ' genus ' , ' species ' } , optional
Analysis will be restricted to abundances of taxa at the specified level .
metric : { ' braycurtis ' , ' manhattan ' , ' jaccard ' , ' unifrac ' , ' unweighted _ unifrac } , optional
Function to use when calculating the distance between two samples .
method : { ' pcoa ' , ' smacof ' }
Algorithm to use for ordination . PCoA uses eigenvalue decomposition and is not well
suited to non - euclidean distance functions . SMACOF is an iterative optimization strategy
that can be used as an alternative .
title : ` string ` , optional
Text label at the top of the plot .
xlabel : ` string ` , optional
Text label along the horizontal axis .
ylabel : ` string ` , optional
Text label along the vertical axis .
size : ` string ` or ` tuple ` , optional
A string or a tuple containing strings representing metadata fields . The size of points
in the resulting plot will change based on the metadata associated with each sample .
color : ` string ` or ` tuple ` , optional
A string or a tuple containing strings representing metadata fields . The color of points
in the resulting plot will change based on the metadata associated with each sample .
tooltip : ` string ` or ` list ` , optional
A string or list containing strings representing metadata fields . When a point in the
plot is hovered over , the value of the metadata associated with that sample will be
displayed in a modal .
label : ` string ` or ` callable ` , optional
A metadata field ( or function ) used to label each analysis . If passing a function , a
dict containing the metadata for each analysis is passed as the first and only
positional argument . The callable function must return a string .
Examples
Scatter plot of weighted UniFrac distance between all our samples , using counts at the genus
level .
> > > plot _ mds ( rank = ' genus ' , metric = ' unifrac ' )
Notes
* * For ` smacof ` * * : The values reported on the axis labels are Pearson ' s correlations between
the distances between points on each axis alone , and the corresponding distances in the
distance matrix calculated using the user - specified metric . These values are related to the
effectiveness of the MDS algorithm in placing points on the scatter plot in such a way that
they truly represent the calculated distances . They do not reflect how well the distance
metric captures similarities between the underlying data ( in this case , an OTU table ) ."""
|
if len ( self . _results ) < 2 :
raise OneCodexException ( "`plot_mds` requires 2 or more valid classification results." )
dists = self . _compute_distance ( rank , metric ) . to_data_frame ( )
# here we figure out what to put in the tooltips and get the appropriate data
if tooltip :
if not isinstance ( tooltip , list ) :
tooltip = [ tooltip ]
else :
tooltip = [ ]
tooltip . insert ( 0 , "Label" )
if color and color not in tooltip :
tooltip . insert ( 1 , color )
if size and size not in tooltip :
tooltip . insert ( 2 , size )
magic_metadata , magic_fields = self . _metadata_fetch ( tooltip , label = label )
if method == "smacof" : # adapted from https : / / scikit - learn . org / stable / auto _ examples / manifold / plot _ mds . html
x_field = "MDS1"
y_field = "MDS2"
seed = np . random . RandomState ( seed = 3 )
mds = manifold . MDS ( max_iter = 3000 , eps = 1e-12 , random_state = seed , dissimilarity = "precomputed" , n_jobs = 1 )
pos = mds . fit ( dists ) . embedding_
plot_data = pd . DataFrame ( pos , columns = [ x_field , y_field ] , index = dists . index )
plot_data = plot_data . div ( plot_data . abs ( ) . max ( axis = 0 ) , axis = 1 )
# normalize to [ 0,1]
# determine how much of the original distance is captured by each of the axes after MDS .
# this implementation of MDS does not use eigen decomposition and so there ' s no simple
# way of returning a ' percent of variance explained ' value
r_squared = [ ]
for axis in [ 0 , 1 ] :
mds_dist = pos . copy ( )
mds_dist [ : : , axis ] = 0
mds_dist = squareform ( euclidean_distances ( mds_dist ) . round ( 6 ) )
r_squared . append ( pearsonr ( mds_dist , squareform ( dists ) ) [ 0 ] )
# label the axes
x_extra_label = "r² = %.02f" % ( r_squared [ 0 ] , )
y_extra_label = "r² = %.02f" % ( r_squared [ 1 ] , )
elif method == "pcoa" : # suppress eigenvalue warning from skbio - - not because it ' s an invalid warning , but
# because lots of folks in the field run pcoa on these distances functions , even if
# statistically inappropriate . perhaps this will change if we ever become more
# opinionated about the analyses that we allow our users to do ( roo )
with warnings . catch_warnings ( ) :
warnings . simplefilter ( "ignore" )
ord_result = ordination . pcoa ( dists . round ( 6 ) )
# round to avoid float precision errors
plot_data = ord_result . samples . iloc [ : , [ 0 , 1 ] ]
# get first two components
plot_data = plot_data . div ( plot_data . abs ( ) . max ( axis = 0 ) , axis = 1 )
# normalize to [ 0,1]
plot_data . index = dists . index
x_field , y_field = plot_data . columns . tolist ( )
# name of first two components
x_extra_label = "%0.02f%%" % ( ord_result . proportion_explained [ 0 ] * 100 , )
y_extra_label = "%0.02f%%" % ( ord_result . proportion_explained [ 1 ] * 100 , )
else :
raise OneCodexException ( "MDS method must be one of: smacof, pcoa" )
# label the axes
if xlabel is None :
xlabel = "{} ({})" . format ( x_field , x_extra_label )
if ylabel is None :
ylabel = "{} ({})" . format ( y_field , y_extra_label )
plot_data = pd . concat ( [ plot_data , magic_metadata ] , axis = 1 ) . reset_index ( )
alt_kwargs = dict ( x = alt . X ( x_field , axis = alt . Axis ( title = xlabel ) ) , y = alt . Y ( y_field , axis = alt . Axis ( title = ylabel ) ) , tooltip = [ magic_fields [ t ] for t in tooltip ] , href = "url:N" , url = "https://app.onecodex.com/classification/" + alt . datum . classification_id , )
# only add these parameters if they are in use
if color :
alt_kwargs [ "color" ] = magic_fields [ color ]
if size :
alt_kwargs [ "size" ] = magic_fields [ size ]
chart = ( alt . Chart ( plot_data ) . transform_calculate ( url = alt_kwargs . pop ( "url" ) ) . mark_circle ( ) . encode ( ** alt_kwargs ) )
if title :
chart = chart . properties ( title = title )
if return_chart :
return chart
else :
chart . interactive ( ) . display ( )
|
def exclude ( prop ) :
'''Don ' t replicate property that is normally replicated : ordering column ,
many - to - one relation that is marked for replication from other side .'''
|
if isinstance ( prop , QueryableAttribute ) :
prop = prop . property
assert isinstance ( prop , ( Column , ColumnProperty , RelationshipProperty ) )
_excluded . add ( prop )
if isinstance ( prop , RelationshipProperty ) : # Also exclude columns that participate in this relationship
for local in prop . local_columns :
_excluded . add ( local )
|
def get_next_id ( self ) :
"""Gets the next Id in this list .
return : ( osid . id . Id ) - the next Id in this list . The has _ next ( )
method should be used to test that a next Id is
available before calling this method .
raise : IllegalState - no more elements available in this list
raise : OperationFailed - unable to complete request
compliance : mandatory - This method must be implemented ."""
|
try :
next_item = next ( self )
except StopIteration :
raise IllegalState ( 'no more elements available in this list' )
except Exception : # Need to specify exceptions here !
raise OperationFailed ( )
else :
return next_item
|
def decompose_nfkd ( text ) :
"""Perform unicode compatibility decomposition .
This will replace some non - standard value representations in unicode and
normalise them , while also separating characters and their diacritics into
two separate codepoints ."""
|
if text is None :
return None
if not hasattr ( decompose_nfkd , '_tr' ) :
decompose_nfkd . _tr = Transliterator . createInstance ( 'Any-NFKD' )
return decompose_nfkd . _tr . transliterate ( text )
|
def _serialize ( self , include_run_logs = False , strict_json = False ) :
"""Serialize a representation of this Job to a Python dict object ."""
|
# return tasks in sorted order if graph is in a valid state
try :
topo_sorted = self . topological_sort ( )
t = [ self . tasks [ task ] . _serialize ( include_run_logs = include_run_logs , strict_json = strict_json ) for task in topo_sorted ]
except :
t = [ task . _serialize ( include_run_logs = include_run_logs , strict_json = strict_json ) for task in self . tasks . itervalues ( ) ]
dependencies = { }
for k , v in self . graph . iteritems ( ) :
dependencies [ k ] = list ( v )
result = { 'job_id' : self . job_id , 'name' : self . name , 'parent_id' : self . parent . dagobah_id , 'tasks' : t , 'dependencies' : dependencies , 'status' : self . state . status , 'cron_schedule' : self . cron_schedule , 'next_run' : self . next_run , 'notes' : self . notes }
if strict_json :
result = json . loads ( json . dumps ( result , cls = StrictJSONEncoder ) )
return result
|
def _wrap_section ( source , width ) : # type : ( str , int ) - > str
"""Wrap the given section string to the current terminal size .
Intelligently wraps the section string to the given width . When wrapping
section lines , it auto - adjusts the spacing between terms and definitions .
It also adjusts commands the fit the correct length for the arguments .
Args :
source : The section string to wrap .
Returns :
The wrapped section string ."""
|
if _get_section ( 'usage' , source ) :
return _wrap_usage_section ( source , width )
if _is_definition_section ( source ) :
return _wrap_definition_section ( source , width )
lines = inspect . cleandoc ( source ) . splitlines ( )
paragraphs = ( textwrap . wrap ( line , width , replace_whitespace = False ) for line in lines )
return '\n' . join ( line for paragraph in paragraphs for line in paragraph )
|
def digital_channel_air ( self , opt1 = '?' , opt2 = '?' ) :
"""Description :
Change Channel ( Digital )
Pass Channels " XX . YY " as TV . digital _ channel _ air ( XX , YY )
Arguments :
opt1 : integer
1-99 : Major Channel
opt2 : integer ( optional )
1-99 : Minor Channel"""
|
if opt1 == '?' :
parameter = '?'
elif opt2 == '?' :
parameter = str ( opt1 ) . rjust ( 4 , "0" )
else :
parameter = '{:02d}{:02d}' . format ( opt1 , opt2 )
return self . _send_command ( 'digital_channel_air' , parameter )
|
def course_register_user ( self , course , username = None , password = None , force = False ) :
"""Register a user to the course
: param course : a Course object
: param username : The username of the user that we want to register . If None , uses self . session _ username ( )
: param password : Password for the course . Needed if course . is _ password _ needed _ for _ registration ( ) and force ! = True
: param force : Force registration
: return : True if the registration succeeded , False else"""
|
if username is None :
username = self . session_username ( )
user_info = self . _database . users . find_one ( { "username" : username } )
# Do not continue registering the user in the course if username is empty .
if not username :
return False
if not force :
if not course . is_registration_possible ( user_info ) :
return False
if course . is_password_needed_for_registration ( ) and course . get_registration_password ( ) != password :
return False
if self . course_is_user_registered ( course , username ) :
return False
# already registered ?
aggregation = self . _database . aggregations . find_one ( { "courseid" : course . get_id ( ) , "default" : True } )
if aggregation is None :
self . _database . aggregations . insert ( { "courseid" : course . get_id ( ) , "description" : "Default classroom" , "students" : [ username ] , "tutors" : [ ] , "groups" : [ ] , "default" : True } )
else :
self . _database . aggregations . find_one_and_update ( { "courseid" : course . get_id ( ) , "default" : True } , { "$push" : { "students" : username } } )
self . _logger . info ( "User %s registered to course %s" , username , course . get_id ( ) )
return True
|
def __field_callback ( self , field , event , * args , ** kwargs ) : # type : ( str , str , * Any , * * Any ) - > Any
"""Calls the registered method in the component for the given field event
: param field : A field name
: param event : An event ( IPOPO _ CALLBACK _ VALIDATE , . . . )
: return : The callback result , or None
: raise Exception : Something went wrong"""
|
# Get the field callback info
cb_info = self . context . get_field_callback ( field , event )
if not cb_info : # No registered callback
return True
# Extract information
callback , if_valid = cb_info
if if_valid and self . state != StoredInstance . VALID : # Don ' t call the method if the component state isn ' t satisfying
return True
# Call it
result = callback ( self . instance , field , * args , ** kwargs )
if result is None : # Special case , if the call back returns nothing
return True
return result
|
def call_api_fetch ( self , params , get_latest_only = True ) :
"""GET https : / / myserver / piwebapi / assetdatabases / D0NxzXSxtlKkGzAhZfHOB - KAQLhZ5wrU - UyRDQnzB _ zGVAUEhMQUZTMDRcTlVHUkVFTg HTTP / 1.1
Host : myserver
Accept : application / json"""
|
output_format = 'application/json'
url_string = self . request_info . url_string ( )
# passing the username and required output format
headers_list = { "Accept" : output_format , "Host" : self . request_info . host }
try :
hub_result = requests . get ( url_string , headers = headers_list , timeout = 10.000 , verify = False )
if hub_result . ok == False :
raise ConnectionRefusedError ( "Connection to Triangulum hub refused: " + hub_result . reason )
except :
raise ConnectionError ( "Error connecting to Triangulum hub - check internet connection." )
result = { }
result_content_json = hub_result . json ( )
result [ 'ok' ] = hub_result . ok
result [ 'content' ] = json . dumps ( result_content_json )
if "Items" in result_content_json :
available_matches = len ( result_content_json [ 'Items' ] )
else :
available_matches = 1
# No Date params allowed in call to hub , so apply get latest only to hub results here . . .
if ( get_latest_only and self . request_info . last_fetch_time != None ) :
try : # Filter python objects with list comprehensions
new_content = [ x for x in result_content_json [ 'Items' ] if self . get_date_time ( x [ 'Timestamp' ] ) > self . request_info . last_fetch_time ]
result_content_json [ 'Items' ] = new_content
result [ 'content' ] = json . dumps ( result_content_json )
result [ 'ok' ] = True
except ValueError as e :
result [ 'ok' ] = False
result [ 'reason' ] = str ( e )
except Exception as e :
result [ 'ok' ] = False
result [ 'reason' ] = 'Problem sorting results by date to get latest only. ' + str ( e )
result [ 'available_matches' ] = available_matches
if 'Items' in result_content_json :
result [ 'returned_matches' ] = len ( result_content_json [ 'Items' ] )
else :
result [ 'returned_matches' ] = 1
# Set last _ fetch _ time for next call
if ( get_latest_only ) :
if ( len ( result_content_json [ 'Items' ] ) > 0 ) :
try :
newlist = sorted ( result_content_json [ 'Items' ] , key = lambda k : self . get_date_time ( k [ "Timestamp" ] ) , reverse = True )
most_recent = newlist [ 0 ] [ "Timestamp" ]
self . request_info . last_fetch_time = self . get_date_time ( most_recent )
except ValueError as e :
result [ 'ok' ] = False
result [ 'reason' ] = str ( e )
except Exception as e :
result [ 'ok' ] = False
result [ 'reason' ] = 'Problem sorting results by date to get latest only. ' + str ( e )
return result
|
def jupytext ( args = None ) :
"""Internal implementation of Jupytext command line"""
|
args = parse_jupytext_args ( args )
def log ( text ) :
if not args . quiet :
sys . stdout . write ( text + '\n' )
if args . version :
log ( __version__ )
return 0
if args . pre_commit :
if args . notebooks :
raise ValueError ( '--pre-commit takes notebooks from the git index. Do not pass any notebook here.' )
args . notebooks = notebooks_in_git_index ( args . input_format )
log ( '[jupytext] Notebooks in git index are:' )
for nb_file in args . notebooks :
log ( nb_file )
def writef_git_add ( notebook_ , nb_file_ , fmt_ ) :
writef ( notebook_ , nb_file_ , fmt_ )
if args . pre_commit :
system ( 'git' , 'add' , nb_file_ )
# Read notebook from stdin
if not args . notebooks :
if not args . pre_commit :
args . notebooks = [ '-' ]
if args . set_formats is not None : # Replace empty string with None
args . update_metadata = recursive_update ( args . update_metadata , { 'jupytext' : { 'formats' : args . set_formats or None } } )
if args . paired_paths :
if len ( args . notebooks ) != 1 :
raise ValueError ( '--paired-paths applies to a single notebook' )
print_paired_paths ( args . notebooks [ 0 ] , args . input_format )
return 1
if not args . to and not args . output and not args . sync and not args . pipe and not args . check and not args . test and not args . test_strict and not args . update_metadata :
raise ValueError ( 'Please select an action' )
if args . output and len ( args . notebooks ) != 1 :
raise ValueError ( 'Please input a single notebook when using --output' )
if args . input_format :
args . input_format = long_form_one_format ( args . input_format )
if args . to :
args . to = long_form_one_format ( args . to )
set_format_options ( args . to , args . format_options )
# Main loop
round_trip_conversion_errors = 0
# Wildcard extension on Windows # 202
notebooks = [ ]
for pattern in args . notebooks :
if '*' in pattern or '?' in pattern :
notebooks . extend ( glob . glob ( pattern ) )
else :
notebooks . append ( pattern )
for nb_file in notebooks :
if nb_file == '-' and args . sync :
raise ValueError ( 'Cannot sync a notebook on stdin' )
nb_dest = args . output or ( None if not args . to else ( '-' if nb_file == '-' else full_path ( base_path ( nb_file , args . input_format ) , args . to ) ) )
# Just acting on metadata / pipe = > save in place
if not nb_dest and not args . sync :
nb_dest = nb_file
if nb_dest == '-' :
args . quiet = True
# I . # # # Read the notebook # # #
fmt = copy ( args . input_format ) or { }
set_format_options ( fmt , args . format_options )
log ( '[jupytext] Reading {}{}' . format ( nb_file if nb_file != '-' else 'stdin' , ' in format {}' . format ( short_form_one_format ( fmt ) ) if 'extension' in fmt else '' ) )
notebook = readf ( nb_file , fmt )
if not fmt :
text_representation = notebook . metadata . get ( 'jupytext' , { } ) . get ( 'text_representation' , { } )
ext = os . path . splitext ( nb_file ) [ 1 ]
if text_representation . get ( 'extension' ) == ext :
fmt = { key : text_representation [ key ] for key in text_representation if key in [ 'extension' , 'format_name' ] }
elif ext :
fmt = { 'extension' : ext }
# Update the metadata
if args . update_metadata :
log ( '[jupytext] Updating notebook metadata with {}' . format ( args . update_metadata ) )
# Are we updating a text file that has a metadata filter ? # 212
if fmt [ 'extension' ] != '.ipynb' and notebook . metadata . get ( 'jupytext' , { } ) . get ( 'notebook_metadata_filter' ) == '-all' :
notebook . metadata . get ( 'jupytext' , { } ) . pop ( 'notebook_metadata_filter' )
recursive_update ( notebook . metadata , args . update_metadata )
# Read paired notebooks
if args . sync :
set_prefix_and_suffix ( fmt , notebook , nb_file )
notebook , inputs_nb_file , outputs_nb_file = load_paired_notebook ( notebook , fmt , nb_file , log )
# II . # # # Apply commands onto the notebook # # #
# Pipe the notebook into the desired commands
for cmd in args . pipe or [ ] :
notebook = pipe_notebook ( notebook , cmd , args . pipe_fmt )
# and / or test the desired commands onto the notebook
for cmd in args . check or [ ] :
pipe_notebook ( notebook , cmd , args . pipe_fmt , update = False )
# III . # # # Possible actions # # #
modified = args . update_metadata or args . pipe
# a . Test round trip conversion
if args . test or args . test_strict :
try :
test_round_trip_conversion ( notebook , args . to , update = args . update , allow_expected_differences = not args . test_strict , stop_on_first_error = args . stop_on_first_error )
except NotebookDifference as err :
round_trip_conversion_errors += 1
sys . stdout . write ( '{}: {}' . format ( nb_file , str ( err ) ) )
continue
# b . Output to the desired file or format
if nb_dest :
if nb_dest == nb_file and not args . to :
args . to = fmt
# Test consistency between dest name and output format
if args . to and nb_dest != '-' :
base_path ( nb_dest , args . to )
# Describe what jupytext is doing
if os . path . isfile ( nb_dest ) and args . update :
if not nb_dest . endswith ( '.ipynb' ) :
raise ValueError ( '--update is only for ipynb files' )
action = ' (destination file updated)'
check_file_version ( notebook , nb_file , nb_dest )
combine_inputs_with_outputs ( notebook , readf ( nb_dest ) , fmt )
elif os . path . isfile ( nb_dest ) :
action = ' (destination file replaced)'
else :
action = ''
log ( '[jupytext] Writing {nb_dest}{format}{action}' . format ( nb_dest = nb_dest , format = ' in format ' + short_form_one_format ( args . to ) if args . to and 'format_name' in args . to else '' , action = action ) )
writef_git_add ( notebook , nb_dest , args . to )
# c . Synchronize paired notebooks
if args . sync : # Also update the original notebook if the notebook was modified
if modified :
inputs_nb_file = outputs_nb_file = None
formats = notebook . metadata [ 'jupytext' ] [ 'formats' ]
for ipynb in [ True , False ] : # Write first format last so that it is the most recent file
for alt_path , alt_fmt in paired_paths ( nb_file , fmt , formats ) [ : : - 1 ] : # Write ipynb first for compatibility with our contents manager
if alt_path . endswith ( '.ipynb' ) != ipynb :
continue
# Do not write the ipynb file if it was not modified
# But , always write text representations to make sure they are the most recent
if alt_path == inputs_nb_file and alt_path == outputs_nb_file :
continue
log ( "[jupytext] Updating '{}'" . format ( alt_path ) )
writef_git_add ( notebook , alt_path , alt_fmt )
return round_trip_conversion_errors
|
def plot_reaction_scheme ( df , temperature , pressure , potential , pH , e_lim = None ) :
"""Returns a matplotlib object with the plotted reaction path .
Parameters
df : Pandas DataFrame generated by reaction _ network
temperature : numeric
temperature in K
pressure : numeric
pressure in mbar
pH : PH in bulk solution
potential : Electric potential vs . SHE in eV
e _ lim : Limits for the energy axis .
Returns
fig : matplotlib object ."""
|
ncols = int ( ( df . shape [ 0 ] / 20 ) ) + 1
fig_width = ncols + 1.5 * len ( df [ 'intermediate_labels' ] [ 0 ] )
figsize = ( fig_width , 6 )
fig , ax = plt . subplots ( figsize = figsize )
if pressure == None :
pressure_label = '0'
else :
pressure_label = str ( pressure )
lines = [ ]
for j , energy_list in enumerate ( df [ 'reaction_energy' ] ) :
ts = df [ 'transition_states' ] [ j ]
R = df [ 'reaction_coordinate' ] [ j ]
E = [ [ x , x ] for x in energy_list ]
labels = df [ 'system_label' ]
for i , n in enumerate ( R ) :
if i == 0 :
line = Line2D ( [ 0 ] , [ 0 ] , color = colors [ j ] , lw = 4 )
lines . append ( line )
ax . plot ( n , E [ i ] , ls = '-' , color = colors [ j ] , linewidth = 3.25 , solid_capstyle = 'round' , path_effects = [ pe . Stroke ( linewidth = 6 , foreground = edge_colors [ j ] ) , pe . Normal ( ) ] , label = labels [ j ] )
ax . plot ( [ n [ 1 ] , n [ 1 ] + 0.5 ] , [ E [ i ] , E [ i + 1 ] ] , ls = '--' , dashes = ( 3 , 2 ) , color = colors [ j ] , linewidth = 1. )
else :
if ts [ i ] :
xts = [ R [ i - 1 ] [ 1 ] , R [ i ] [ 0 ] , R [ i + 1 ] [ 0 ] ]
yts = [ energy_list [ i - 1 ] , energy_list [ i ] , energy_list [ i + 1 ] ]
z1 = np . polyfit ( xts , yts , 2 )
xp1 = np . linspace ( xts [ 0 ] , xts [ 2 ] , 100 )
p1 = np . poly1d ( z1 )
ax . plot ( xp1 , p1 ( xp1 ) , ls = '--' , color = colors [ j ] , linewidth = 2. )
ax . plot ( xts [ 1 ] , yts [ 1 ] , marker = 'o' , c = colors [ j ] , mec = edge_colors [ j ] , lw = 1.5 , markersize = 7 )
else :
ax . plot ( n , E [ i ] , ls = '-' , color = colors [ j ] , linewidth = 3.25 , solid_capstyle = 'round' , path_effects = [ pe . Stroke ( linewidth = 6 , foreground = edge_colors [ j ] ) , pe . Normal ( ) ] )
if i < len ( R ) - 1 :
ax . plot ( [ n [ 1 ] , n [ 1 ] + 0.5 ] , [ E [ i ] , E [ i + 1 ] ] , ls = '--' , dashes = ( 3 , 2 ) , color = colors [ j ] , linewidth = 1. )
ax . legend ( handlelength = 0.4 , ncol = ncols , loc = 2 , frameon = False , bbox_to_anchor = ( 1.05 , 1 ) , borderaxespad = 0. , fontsize = 12 )
if e_lim :
ax . set_ylim ( e_lim )
ax . set_xlabel ( 'Reaction coordinate' )
ax . set_ylabel ( 'Reaction free energy (eV)' )
reaction_labels = df [ 'intermediate_labels' ] [ 0 ]
reaction_labels = [ sub ( w ) for w in reaction_labels ]
plt . xticks ( np . arange ( len ( reaction_labels ) ) + 0.25 , tuple ( reaction_labels ) , rotation = 45 )
# plt . tight _ layout ( )
a = ax . get_xlim ( ) [ 1 ] + 0.05 * ax . get_xlim ( ) [ 1 ]
b = ax . get_ylim ( ) [ 0 ] + 0.05 * ax . get_ylim ( ) [ 1 ]
if potential is not None and pH is not None :
ax . text ( a , b , 'U = ' + str ( potential ) + ' eV vs. SHE \n pH = ' + str ( pH ) + ' \n T = ' + str ( temperature ) + ' K \n p = ' + pressure_label + ' mbar' , fontsize = 12 )
else :
ax . text ( a , b , 'T = ' + str ( temperature ) + ' \n p = ' + pressure_label + ' mbar' , fontsize = 12 )
plt . tight_layout ( )
return ( fig )
|
def get ( self , object_name ) :
"""Return an object or None if it doesn ' t exist
: param object _ name :
: return : Object"""
|
if object_name in self :
return Object ( obj = self . container . get_object ( object_name ) )
return None
|
def gpu_iuwt_decomposition ( in1 , scale_count , scale_adjust , store_smoothed , store_on_gpu ) :
"""This function calls the a trous algorithm code to decompose the input into its wavelet coefficients . This is
the isotropic undecimated wavelet transform implemented for a GPU .
INPUTS :
in1 ( no default ) : Array on which the decomposition is to be performed .
scale _ count ( no default ) : Maximum scale to be considered .
scale _ adjust ( no default ) : Adjustment to scale value if first scales are of no interest .
store _ smoothed ( no default ) : Boolean specifier for whether the smoothed image is stored or not .
store _ on _ gpu ( no default ) : Boolean specifier for whether the decomposition is stored on the gpu or not .
OUTPUTS :
detail _ coeffs Array containing the detail coefficients .
C0 ( optional ) : Array containing the smoothest version of the input ."""
|
# The following simple kernel just allows for the construction of a 3D decomposition on the GPU .
ker = SourceModule ( """
__global__ void gpu_store_detail_coeffs(float *in1, float *in2, float* out1, int *scale, int *adjust)
{
const int len = gridDim.x*blockDim.x;
const int i = (blockDim.x * blockIdx.x + threadIdx.x);
const int j = (blockDim.y * blockIdx.y + threadIdx.y)*len;
const int k = (blockDim.z * blockIdx.z + threadIdx.z)*(len*len);
const int tid2 = i + j;
const int tid3 = i + j + k;
if ((blockIdx.z + adjust[0])==scale[0])
{ out1[tid3] = in1[tid2] - in2[tid2]; }
}
""" )
wavelet_filter = ( 1. / 16 ) * np . array ( [ 1 , 4 , 6 , 4 , 1 ] , dtype = np . float32 )
# Filter - bank for use in the a trous algorithm .
wavelet_filter = gpuarray . to_gpu_async ( wavelet_filter )
# Initialises an empty array to store the detail coefficients .
detail_coeffs = gpuarray . empty ( [ scale_count - scale_adjust , in1 . shape [ 0 ] , in1 . shape [ 1 ] ] , np . float32 )
# Determines whether the array is already on the GPU or not . If not , moves it to the GPU .
try :
gpu_in1 = gpuarray . to_gpu_async ( in1 . astype ( np . float32 ) )
except :
gpu_in1 = in1
# Sets up some working arrays on the GPU to prevent memory transfers .
gpu_tmp = gpuarray . empty_like ( gpu_in1 )
gpu_out1 = gpuarray . empty_like ( gpu_in1 )
gpu_out2 = gpuarray . empty_like ( gpu_in1 )
# Sets up some parameters required by the algorithm on the GPU .
gpu_scale = gpuarray . zeros ( [ 1 ] , np . int32 )
gpu_adjust = gpuarray . zeros ( [ 1 ] , np . int32 )
gpu_adjust += scale_adjust
# Fetches the a trous kernels and sets up the unique storing kernel .
gpu_a_trous_row_kernel , gpu_a_trous_col_kernel = gpu_a_trous ( )
gpu_store_detail_coeffs = ker . get_function ( "gpu_store_detail_coeffs" )
grid_rows = int ( in1 . shape [ 0 ] // 32 )
grid_cols = int ( in1 . shape [ 1 ] // 32 )
# The following loop , which iterates up to scale _ adjust , applies the a trous algorithm to the scales which are
# considered insignificant . This is important as each set of wavelet coefficients depends on the last smoothed
# version of the input .
if scale_adjust > 0 :
for i in range ( 0 , scale_adjust ) :
gpu_a_trous_row_kernel ( gpu_in1 , gpu_tmp , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) )
gpu_a_trous_col_kernel ( gpu_tmp , gpu_out1 , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) )
gpu_in1 , gpu_out1 = gpu_out1 , gpu_in1
gpu_scale += 1
# The meat of the algorithm - two sequential applications fo the a trous followed by determination and storing of
# the detail coefficients . C0 is reassigned the value of C on each loop - C0 is always the smoothest version of the
# input image .
for i in range ( scale_adjust , scale_count ) :
gpu_a_trous_row_kernel ( gpu_in1 , gpu_tmp , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) )
gpu_a_trous_col_kernel ( gpu_tmp , gpu_out1 , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) )
# Approximation coefficients .
gpu_a_trous_row_kernel ( gpu_out1 , gpu_tmp , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) )
gpu_a_trous_col_kernel ( gpu_tmp , gpu_out2 , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) )
# Approximation coefficients .
gpu_store_detail_coeffs ( gpu_in1 , gpu_out2 , detail_coeffs , gpu_scale , gpu_adjust , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows , int ( scale_count ) ) )
# Detail coefficients .
gpu_in1 , gpu_out1 = gpu_out1 , gpu_in1
gpu_scale += 1
# Return values depend on mode . NOTE : store _ smoothed does not work if the result stays on the gpu .
if store_on_gpu :
return detail_coeffs
elif store_smoothed :
return detail_coeffs . get ( ) , gpu_in1 . get ( )
else :
return detail_coeffs . get ( )
|
def ConsultarDepositosAcopio ( self , sep = "||" ) :
"Retorna los depósitos de acopio pertenencientes al contribuyente"
|
ret = self . client . consultarDepositosAcopio ( auth = { 'token' : self . Token , 'sign' : self . Sign , 'cuit' : self . Cuit , } , ) [ 'respuesta' ]
self . __analizar_errores ( ret )
array = ret . get ( 'acopio' , [ ] )
if sep is None :
return array
else :
return [ ( "%s %%s %s %%s %s %%s %s %%s %s" % ( sep , sep , sep , sep , sep ) ) % ( it [ 'codigo' ] , it [ 'direccion' ] , it [ 'localidad' ] , it [ 'codigoPostal' ] ) for it in array ]
|
def reinforce_lv_branches_overloading ( grid , crit_branches ) :
"""Choose appropriate cable type for branches with line overloading
Parameters
grid : LVGridDing0
Ding0 LV grid object
crit _ branches : : any : ` list `
List of critical branches incl . its line loading
Notes
If maximum size cable is not capable to resolve issue due to line
overloading largest available cable type is assigned to branch .
Returns
: any : ` list `
unsolved _ branches : List of braches no suitable cable could be found"""
|
unsolved_branches = [ ]
cable_lf = cfg_ding0 . get ( 'assumptions' , 'load_factor_lv_cable_lc_normal' )
cables = grid . network . static_data [ 'LV_cables' ]
# resolve overloading issues for each branch segment
for branch in crit_branches :
I_max_branch_load = branch [ 's_max' ] [ 0 ]
I_max_branch_gen = branch [ 's_max' ] [ 1 ]
I_max_branch = max ( [ I_max_branch_load , I_max_branch_gen ] )
suitable_cables = cables [ ( cables [ 'I_max_th' ] * cable_lf ) > I_max_branch ]
if not suitable_cables . empty :
cable_type = suitable_cables . loc [ suitable_cables [ 'I_max_th' ] . idxmin ( ) , : ]
branch [ 'branch' ] . type = cable_type
crit_branches . remove ( branch )
else :
cable_type_max = cables . loc [ cables [ 'I_max_th' ] . idxmax ( ) , : ]
unsolved_branches . append ( branch )
branch [ 'branch' ] . type = cable_type_max
logger . error ( "No suitable cable type could be found for {branch} " "with I_th_max = {current}. " "Cable of type {cable} is chosen during " "reinforcement." . format ( branch = branch [ 'branch' ] , cable = cable_type_max . name , current = I_max_branch ) )
return unsolved_branches
|
def clear ( self ) :
"""Clears the information for this edit ."""
|
self . uiQueryTXT . setText ( '' )
self . uiQueryTREE . clear ( )
self . uiGroupingTXT . setText ( '' )
self . uiSortingTXT . setText ( '' )
|
def parallel_map ( func , iterable , args = None , kwargs = None , workers = None ) :
"""Map func on a list using gevent greenlets .
: param func : function applied on iterable elements
: type func : function
: param iterable : elements to map the function over
: type iterable : iterable
: param args : arguments of func
: type args : tuple
: param kwargs : keyword arguments of func
: type kwargs : dict
: param workers : limit the number of greenlets
running in parrallel
: type workers : int"""
|
if args is None :
args = ( )
if kwargs is None :
kwargs = { }
if workers is not None :
pool = Pool ( workers )
else :
pool = Group ( )
iterable = [ pool . spawn ( func , i , * args , ** kwargs ) for i in iterable ]
pool . join ( raise_error = True )
for idx , i in enumerate ( iterable ) :
i_type = type ( i . get ( ) )
i_value = i . get ( )
if issubclass ( i_type , BaseException ) :
raise i_value
iterable [ idx ] = i_value
return iterable
|
def set ( self , value , pos = None ) :
"""Set one or many bits to 1 or 0.
value - - If True bits are set to 1 , otherwise they are set to 0.
pos - - Either a single bit position or an iterable of bit positions .
Negative numbers are treated in the same way as slice indices .
Defaults to the entire bitstring .
Raises IndexError if pos < - self . len or pos > = self . len ."""
|
f = self . _set if value else self . _unset
if pos is None :
pos = xrange ( self . len )
try :
length = self . len
for p in pos :
if p < 0 :
p += length
if not 0 <= p < length :
raise IndexError ( "Bit position {0} out of range." . format ( p ) )
f ( p )
except TypeError : # Single pos
if pos < 0 :
pos += self . len
if not 0 <= pos < length :
raise IndexError ( "Bit position {0} out of range." . format ( pos ) )
f ( pos )
|
def pre_run_hook ( self , func , prefix = None ) :
"""Decorator to add a pre - run hook to this ingredient .
Pre - run hooks are captured functions that are run , just before the
main function is executed ."""
|
cf = self . capture ( func , prefix = prefix )
self . pre_run_hooks . append ( cf )
return cf
|
def weight_field ( self , f ) :
"""Select one field as the weight field .
Note that this field will be exclude from feature fields .
: param f : Selected weight field
: type f : str
: rtype : DataFrame"""
|
if f is None :
raise ValueError ( "Field name cannot be None." )
self . _assert_ml_fields_valid ( f )
return _change_singleton_roles ( self , { f : FieldRole . WEIGHT } , clear_feature = True )
|
def total_supply ( self , block_identifier = 'latest' ) :
"""Return the total supply of the token at the given block identifier ."""
|
return self . proxy . contract . functions . totalSupply ( ) . call ( block_identifier = block_identifier )
|
def record ( session_file , shell , prompt , alias , envvar ) :
"""Record a session file . If no argument is passed , commands are written to
. / session . sh .
When you are finished recording , run the " stop " command ."""
|
if os . path . exists ( session_file ) :
click . confirm ( 'File "{0}" already exists. Overwrite?' . format ( session_file ) , abort = True , default = False , )
secho ( "We'll do it live!" , fg = "red" , bold = True )
filename = click . format_filename ( session_file )
secho ( "RECORDING SESSION: {}" . format ( filename ) , fg = "yellow" , bold = True )
print_recorder_instructions ( )
click . pause ( )
click . clear ( )
cwd = os . getcwd ( )
# Save cwd
# Run the recorder
commands = run_recorder ( shell , prompt , aliases = alias , envvars = envvar )
os . chdir ( cwd )
# Reset cwd
secho ( "FINISHED RECORDING SESSION" , fg = "yellow" , bold = True )
secho ( "Writing to {0}..." . format ( filename ) , fg = "cyan" )
with open ( session_file , "w" , encoding = "utf-8" ) as fp :
fp . write ( HEADER_TEMPLATE . format ( shell = shell , prompt = prompt ) )
write_directives ( fp , "alias" , alias )
write_directives ( fp , "env" , envvar )
fp . write ( "\n" )
fp . write ( "" . join ( commands ) )
fp . write ( "\n" )
play_cmd = style ( "doitlive play {}" . format ( filename ) , bold = True )
echo ( "Done. Run {} to play back your session." . format ( play_cmd ) )
|
def write_newick_ott ( out , ott , ott_id2children , root_ott_id , label_style , prune_flags , create_log_dict = False ) :
"""` out ` is an output stream
` ott ` is an OTT instance used for translating labels
` ott _ id2children ` is a dict mapping an OTT ID to the IDs of its children
` root _ ott _ id ` is the root of the subtree to write .
` label _ style ` is a facet of OTULabelStyleEnum
` prune _ flags ` is a set strings ( flags ) or OTTFlagUnion instance or None
if ` create _ log _ dict ` is True , a dict will be returned that contains statistics
about the pruning ."""
|
# create to _ prune _ fsi _ set a set of flag set indices to prune . . .
if prune_flags :
flags_to_prune_list = list ( prune_flags )
to_prune_fsi_set = ott . convert_flag_string_set_to_union ( flags_to_prune_list )
else :
flags_to_prune_list = [ ]
to_prune_fsi_set = None
flags_to_prune_set = frozenset ( flags_to_prune_list )
pfd = { }
log_dict = None
if create_log_dict :
log_dict = { 'version' : ott . version , 'flags_to_prune' : flags_to_prune_list }
fsi_to_str_flag_set = { }
for k , v in dict ( ott . flag_set_id_to_flag_set ) . items ( ) :
fsi_to_str_flag_set [ k ] = frozenset ( list ( v ) )
if to_prune_fsi_set :
pfd = { }
for f in to_prune_fsi_set . keys ( ) :
s = fsi_to_str_flag_set [ f ]
str_flag_intersection = flags_to_prune_set . intersection ( s )
pfd [ f ] = list ( str_flag_intersection )
pfd [ f ] . sort ( )
# log _ dict [ ' prune _ flags _ d ' ] = d
# log _ dict [ ' pfd ' ] = pfd
pruned_dict = { }
num_tips = 0
num_pruned_anc_nodes = 0
num_nodes = 0
num_monotypic_nodes = 0
if to_prune_fsi_set and ott . has_flag_set_key_intersection ( root_ott_id , to_prune_fsi_set ) : # entire taxonomy is pruned off
if log_dict is not None :
fsi = ott . get_flag_set_key ( root_ott_id )
pruned_dict [ fsi ] = { '' : [ root_ott_id ] }
num_pruned_anc_nodes += 1
else :
stack = [ root_ott_id ]
first_children = set ( stack )
last_children = set ( )
while stack :
ott_id = stack . pop ( )
if isinstance ( ott_id , tuple ) :
ott_id = ott_id [ 0 ]
else :
num_nodes += 1
children = ott_id2children [ ott_id ]
if to_prune_fsi_set is not None :
c = [ ]
for child_id in children :
if ott . has_flag_set_key_intersection ( child_id , to_prune_fsi_set ) :
if log_dict is not None :
fsi = ott . get_flag_set_key ( child_id )
fd = pruned_dict . get ( fsi )
if fd is None :
pruned_dict [ fsi ] = { 'anc_ott_id_pruned' : [ child_id ] }
else :
fd [ 'anc_ott_id_pruned' ] . append ( child_id )
num_pruned_anc_nodes += 1
else :
c . append ( child_id )
children = c
nc = len ( children )
if nc < 2 :
if nc == 1 :
num_monotypic_nodes += 1
else :
num_tips += 1
if ott_id not in first_children :
out . write ( ',' )
else :
first_children . remove ( ott_id )
if bool ( children ) :
out . write ( '(' )
first_children . add ( children [ 0 ] )
last_children . add ( children [ - 1 ] )
stack . append ( ( ott_id , ) )
# a tuple will signal exiting a node . . .
stack . extend ( [ i for i in reversed ( children ) ] )
continue
n = ott . get_label ( ott_id , label_style )
n = quote_newick_name ( n )
out . write ( n )
if ott_id in last_children :
out . write ( ')' )
last_children . remove ( ott_id )
out . write ( ';' )
if create_log_dict :
log_dict [ 'pruned' ] = { }
for fsi , obj in pruned_dict . items ( ) :
f = pfd [ fsi ]
f . sort ( )
obj [ 'flags_causing_prune' ] = f
nk = ',' . join ( f )
log_dict [ 'pruned' ] [ nk ] = obj
log_dict [ 'num_tips' ] = num_tips
log_dict [ 'num_pruned_anc_nodes' ] = num_pruned_anc_nodes
log_dict [ 'num_nodes' ] = num_nodes
log_dict [ 'num_non_leaf_nodes' ] = num_nodes - num_tips
log_dict [ 'num_non_leaf_nodes_with_multiple_children' ] = num_nodes - num_tips - num_monotypic_nodes
log_dict [ 'num_monotypic_nodes' ] = num_monotypic_nodes
return log_dict
|
def _init_ui ( self ) -> VBox :
"Initialize the widget UI and return the UI ."
|
self . _search_input = Text ( placeholder = "What images to search for?" )
self . _count_input = BoundedIntText ( placeholder = "How many pics?" , value = 10 , min = 1 , max = 5000 , step = 1 , layout = Layout ( width = '60px' ) )
self . _size_input = Dropdown ( options = _img_sizes . keys ( ) , value = '>400*300' , layout = Layout ( width = '120px' ) )
self . _download_button = Button ( description = "Search & Download" , icon = "download" , layout = Layout ( width = '200px' ) )
self . _download_button . on_click ( self . on_download_button_click )
self . _output = Output ( )
self . _controls_pane = HBox ( [ self . _search_input , self . _count_input , self . _size_input , self . _download_button ] , layout = Layout ( width = 'auto' , height = '40px' ) )
self . _heading = ""
self . _download_complete_heading = "<h3>Download complete. Here are a few images</h3>"
self . _preview_header = widgets . HTML ( self . _heading , layout = Layout ( height = '60px' ) )
self . _img_pane = Box ( layout = Layout ( display = 'inline' ) )
return VBox ( [ self . _controls_pane , self . _preview_header , self . _img_pane ] )
|
def setup_asset_page ( self , ) :
"""Create and set the model on the asset page
: returns : None
: rtype : None
: raises : None"""
|
self . asset_asset_treev . header ( ) . setResizeMode ( QtGui . QHeaderView . ResizeToContents )
self . asset_task_tablev . horizontalHeader ( ) . setResizeMode ( QtGui . QHeaderView . ResizeToContents )
|
def delete ( self , client = None ) :
"""Delete this notification .
See :
https : / / cloud . google . com / storage / docs / json _ api / v1 / notifications / delete
If : attr : ` user _ project ` is set on the bucket , bills the API request
to that project .
: type client : : class : ` ~ google . cloud . storage . client . Client ` or
` ` NoneType ` `
: param client : Optional . The client to use . If not passed , falls back
to the ` ` client ` ` stored on the current bucket .
: raises : : class : ` google . api _ core . exceptions . NotFound ` :
if the notification does not exist .
: raises ValueError : if the notification has no ID ."""
|
if self . notification_id is None :
raise ValueError ( "Notification not intialized by server" )
client = self . _require_client ( client )
query_params = { }
if self . bucket . user_project is not None :
query_params [ "userProject" ] = self . bucket . user_project
client . _connection . api_request ( method = "DELETE" , path = self . path , query_params = query_params )
|
def vms ( message , level = 1 ) :
"""Writes the specified message * only * if verbose output is enabled ."""
|
if verbose is not None and verbose != False :
if isinstance ( verbose , bool ) or ( isinstance ( verbose , int ) and level <= verbose ) :
std ( message )
|
def _plot ( self , xticks = [ ] , yticks = [ ] , minor_xticks = [ ] , minor_yticks = [ ] , xlabel = 'Longitude' , ylabel = 'Latitude' , ax = None , ax2 = None , colorbar = None , cb_orientation = None , cb_label = None , grid = False , axes_labelsize = None , tick_labelsize = None , ** kwargs ) :
"""Plot the raw data using a simply cylindrical projection ."""
|
if ax is None :
if colorbar is True :
if cb_orientation == 'horizontal' :
scale = 0.67
else :
scale = 0.5
else :
scale = 0.55
figsize = ( _mpl . rcParams [ 'figure.figsize' ] [ 0 ] , _mpl . rcParams [ 'figure.figsize' ] [ 0 ] * scale )
fig , axes = _plt . subplots ( 1 , 1 , figsize = figsize )
else :
axes = ax
deg = '$^{\circ}$'
xticklabels = [ str ( int ( y ) ) + deg for y in xticks ]
yticklabels = [ str ( int ( y ) ) + deg for y in yticks ]
cim = axes . imshow ( self . data , origin = 'upper' , extent = ( 0. , 360. , - 90. , 90. ) , ** kwargs )
axes . set ( xticks = xticks , yticks = yticks )
axes . set_xlabel ( xlabel , fontsize = axes_labelsize )
axes . set_ylabel ( ylabel , fontsize = axes_labelsize )
axes . set_xticklabels ( xticklabels , fontsize = tick_labelsize )
axes . set_yticklabels ( yticklabels , fontsize = tick_labelsize )
axes . set_xticks ( minor_xticks , minor = True )
axes . set_yticks ( minor_yticks , minor = True )
axes . grid ( grid , which = 'major' )
if colorbar is True :
if cb_orientation == 'vertical' :
divider = _make_axes_locatable ( axes )
cax = divider . append_axes ( "right" , size = "2.5%" , pad = 0.15 )
cbar = _plt . colorbar ( cim , cax = cax , orientation = cb_orientation )
else :
divider = _make_axes_locatable ( axes )
cax = divider . append_axes ( "bottom" , size = "5%" , pad = 0.5 )
cbar = _plt . colorbar ( cim , cax = cax , orientation = cb_orientation )
if cb_label is not None :
cbar . set_label ( cb_label , fontsize = axes_labelsize )
cbar . ax . tick_params ( labelsize = tick_labelsize )
if ax is None :
return fig , axes
|
def sub ( self , num ) :
"""Subtracts num from the current value"""
|
try :
val = self . value ( ) - num
except :
val = - num
self . set ( max ( 0 , val ) )
|
def _compute_soil_linear_factor ( cls , pga_rock , imt ) :
"""Compute soil linear factor as explained in paragraph ' Functional
Form ' , page 1706."""
|
if imt . period >= 1 :
return np . ones_like ( pga_rock )
else :
sl = np . zeros_like ( pga_rock )
pga_between_100_500 = ( pga_rock > 100 ) & ( pga_rock < 500 )
pga_greater_equal_500 = pga_rock >= 500
is_SA_between_05_1 = 0.5 < imt . period < 1
is_SA_less_equal_05 = imt . period <= 0.5
if is_SA_between_05_1 :
sl [ pga_between_100_500 ] = ( 1 - ( 1. / imt . period - 1 ) * ( pga_rock [ pga_between_100_500 ] - 100 ) / 400 )
sl [ pga_greater_equal_500 ] = 1 - ( 1. / imt . period - 1 )
if is_SA_less_equal_05 or imt . period == 0 :
sl [ pga_between_100_500 ] = ( 1 - ( pga_rock [ pga_between_100_500 ] - 100 ) / 400 )
sl [ pga_rock <= 100 ] = 1
return sl
|
def add_tags ( self , archive_name , tags ) :
'''Add tags to an archive
Parameters
archive _ name : s tr
Name of archive
tags : list or tuple of strings
tags to add to the archive'''
|
updated_tag_list = list ( self . _get_tags ( archive_name ) )
for tag in tags :
if tag not in updated_tag_list :
updated_tag_list . append ( tag )
self . _set_tags ( archive_name , updated_tag_list )
|
def _get_mechanism ( self , rup , C ) :
"""Compute the fourth term of the equation described on p . 199:
` ` f1 * Fn + f2 * Fr ` `"""
|
Fn , Fr = self . _get_fault_type_dummy_variables ( rup )
return ( C [ 'f1' ] * Fn ) + ( C [ 'f2' ] * Fr )
|
def _hz_to_semitones ( self , hz ) :
"""Convert hertz into a number of semitones above or below some reference
value , in this case , A440"""
|
return np . log ( hz / self . _a440 ) / np . log ( self . _a )
|
def publishing_set_update_time ( sender , instance , ** kwargs ) :
"""Update the time modified before saving a publishable object ."""
|
if hasattr ( instance , 'publishing_linked' ) : # Hack to avoid updating ` publishing _ modified _ at ` field when a draft
# publishable item is saved as part of a ` publish ` operation . This
# ensures that the ` publishing _ published _ at ` timestamp is later than
# the ` publishing _ modified _ at ` timestamp when we publish , which is
# vital for us to correctly detect whether a draft is " dirty " .
if getattr ( instance , '_skip_update_publishing_modified_at' , False ) : # Reset flag , in case instance is re - used ( e . g . in tests )
instance . _skip_update_publishing_modified_at = False
return
instance . publishing_modified_at = timezone . now ( )
|
def load_configuration ( ) :
"""Load the configuration"""
|
( belbio_conf_fp , belbio_secrets_fp ) = get_belbio_conf_files ( )
log . info ( f"Using conf: {belbio_conf_fp} and secrets files: {belbio_secrets_fp} " )
config = { }
if belbio_conf_fp :
with open ( belbio_conf_fp , "r" ) as f :
config = yaml . load ( f , Loader = yaml . SafeLoader )
config [ "source_files" ] = { }
config [ "source_files" ] [ "conf" ] = belbio_conf_fp
if belbio_secrets_fp :
with open ( belbio_secrets_fp , "r" ) as f :
secrets = yaml . load ( f , Loader = yaml . SafeLoader )
config [ "secrets" ] = copy . deepcopy ( secrets )
if "source_files" in config :
config [ "source_files" ] [ "secrets" ] = belbio_secrets_fp
get_versions ( config )
# TODO - needs to be completed
# add _ environment _ vars ( config )
return config
|
def get_all_rrsets ( self , hosted_zone_id , type = None , name = None , identifier = None , maxitems = None ) :
"""Retrieve the Resource Record Sets defined for this Hosted Zone .
Returns the raw XML data returned by the Route53 call .
: type hosted _ zone _ id : str
: param hosted _ zone _ id : The unique identifier for the Hosted Zone
: type type : str
: param type : The type of resource record set to begin the record
listing from . Valid choices are :
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
Valid values for weighted resource record sets :
* AAAA
* CNAME
* TXT
Valid values for Zone Apex Aliases :
* AAAA
: type name : str
: param name : The first name in the lexicographic ordering of domain
names to be retrieved
: type identifier : str
: param identifier : In a hosted zone that includes weighted resource
record sets ( multiple resource record sets with the same DNS
name and type that are differentiated only by SetIdentifier ) ,
if results were truncated for a given DNS name and type ,
the value of SetIdentifier for the next resource record
set that has the current DNS name and type
: type maxitems : int
: param maxitems : The maximum number of records"""
|
from boto . route53 . record import ResourceRecordSets
params = { 'type' : type , 'name' : name , 'Identifier' : identifier , 'maxitems' : maxitems }
uri = '/%s/hostedzone/%s/rrset' % ( self . Version , hosted_zone_id )
response = self . make_request ( 'GET' , uri , params = params )
body = response . read ( )
boto . log . debug ( body )
if response . status >= 300 :
raise exception . DNSServerError ( response . status , response . reason , body )
rs = ResourceRecordSets ( connection = self , hosted_zone_id = hosted_zone_id )
h = handler . XmlHandler ( rs , self )
xml . sax . parseString ( body , h )
return rs
|
def addQuery ( self ) :
"""Sets the query for this widget from the quick query text builder ."""
|
insert_item = self . uiQueryTREE . currentItem ( )
if ( insert_item and not insert_item . isSelected ( ) ) :
insert_item = None
# create the query
if ( self . uiQueryTXT . text ( ) ) :
query = Q . fromString ( nativestring ( self . uiQueryTXT . text ( ) ) )
self . uiQueryTXT . setText ( '' )
else :
query = Q ( )
# determine where to create the item at
tree = self . uiQueryTREE
if ( not insert_item ) : # determine if we are already joining queries together
count = tree . topLevelItemCount ( )
# create the first item
if ( not count ) :
XQueryItem ( tree , query )
else :
if ( 1 < count ) :
join = tree . topLevelItem ( count - 2 ) . text ( 0 )
else :
join = 'and'
# create the join item & query item
XJoinItem ( tree , join )
XQueryItem ( tree , query )
# add a query into a group
elif ( insert_item . childCount ( ) ) :
count = insert_item . childCount ( )
join = insert_item . child ( count - 2 ) . text ( 0 )
# add the query to the group
XJoinItem ( insert_item , join )
XQueryItem ( tree , query )
# add a query underneath another item
else :
parent_item = insert_item . parent ( )
# add into the tree
if ( not parent_item ) :
count = tree . topLevelItemCount ( )
index = tree . indexOfTopLevelItem ( insert_item )
# add to the end
if ( index == count - 1 ) :
if ( 1 < count ) :
join = tree . topLevelItem ( count - 2 ) . text ( 0 )
else :
join = 'and'
XJoinItem ( tree , join )
XQueryItem ( tree , query )
# insert in the middle
else :
join_item = tree . topLevelItem ( index + 1 )
join = join_item . text ( 0 )
XJoinItem ( tree , join , preceeding = join_item )
XQueryItem ( tree , query , preceeding = join_item )
else :
count = parent_item . childCount ( )
index = parent_item . indexOfChild ( insert_item )
# add to the end
if ( index == count - 1 ) :
if ( 1 < count ) :
join = parent_item . child ( count - 2 ) . text ( 0 )
else :
join = 'and'
XJoinItem ( parent_item , join )
XQueryItem ( parent_item , query )
# insert in the middle
else :
join_item = parent_item . child ( index + 1 )
join = join_item . text ( 0 )
XJoinItem ( parent_item , join , preceeding = join_item )
XQueryItem ( parent_item , join , preceeding = join_item )
|
def key_type ( self , type_ ) :
"""returns reference to the class key type declaration"""
|
if not self . is_mapping ( type_ ) :
raise TypeError ( 'Type "%s" is not "mapping" container' % str ( type_ ) )
return self . __find_xxx_type ( type_ , self . key_type_index , self . key_type_typedef , 'container_key_type' )
|
def columns_formatter ( cls , colname ) :
"""Decorator to mark a function as columns formatter ."""
|
def wrapper ( func ) :
cls . columns_formatters [ colname ] = func
return func
return wrapper
|
def cli ( ctx , user , organism , administrate = False , write = False , export = False , read = False ) :
"""Update the permissions of a user on a specified organism
Output :
a dictionary containing user ' s organism permissions"""
|
return ctx . gi . users . update_organism_permissions ( user , organism , administrate = administrate , write = write , export = export , read = read )
|
def makeImages ( self ) :
"""Make spiral images in sectors and steps .
Plain , reversed ,
sectorialized , negative sectorialized
outline , outline reversed , lonely
only nodes , only edges , both"""
|
# make layout
self . makeLayout ( )
self . setAgraph ( )
# make function that accepts a mode , a sector
# and nodes and edges True and False
self . plotGraph ( )
self . plotGraph ( "reversed" , filename = "tgraphR.png" )
agents = n . concatenate ( self . np . sectorialized_agents__ )
for i , sector in enumerate ( self . np . sectorialized_agents__ ) :
self . plotGraph ( "plain" , sector , "sector{:02}.png" . format ( i ) )
self . plotGraph ( "reversed" , sector , "sector{:02}R.png" . format ( i ) )
self . plotGraph ( "plain" , n . setdiff1d ( agents , sector ) , "sector{:02}N.png" . format ( i ) )
self . plotGraph ( "reversed" , n . setdiff1d ( agents , sector ) , "sector{:02}RN.png" . format ( i ) )
self . plotGraph ( "plain" , [ ] , "BLANK.png" )
|
def subset ( self , used_indices , params = None ) :
"""Get subset of current Dataset .
Parameters
used _ indices : list of int
Indices used to create the subset .
params : dict or None , optional ( default = None )
These parameters will be passed to Dataset constructor .
Returns
subset : Dataset
Subset of the current Dataset ."""
|
if params is None :
params = self . params
ret = Dataset ( None , reference = self , feature_name = self . feature_name , categorical_feature = self . categorical_feature , params = params , free_raw_data = self . free_raw_data )
ret . _predictor = self . _predictor
ret . pandas_categorical = self . pandas_categorical
ret . used_indices = used_indices
return ret
|
def pieces ( self , piece_type : PieceType , color : Color ) -> "SquareSet" :
"""Gets pieces of the given type and color .
Returns a : class : ` set of squares < chess . SquareSet > ` ."""
|
return SquareSet ( self . pieces_mask ( piece_type , color ) )
|
def create_SMS ( self , files , recipients , body , params = { } ) :
"""Create a new certified sms
@ files
Files to send
ex : [ ' / documents / internet _ contract . pdf ' , . . . ]
@ recipients
A dictionary with the phone and name of the person you want to sign . Phone must be always with prefix
If you wanna send only to one person :
- [ { " phone " : " 34123456 " , " name " : " John " } ]
For multiple recipients , yo need to submit a list of dicts :
- [ { " email " : " 34123456 , " name " : " John " } , { " email " : " 34654321 " , " name " : " Bob " } ]
@ body
Email body
@ params"""
|
parameters = { }
parser = Parser ( )
documents = { }
parser . fill_array ( documents , files , 'files' )
recipients = recipients if isinstance ( recipients , list ) else [ recipients ]
index = 0
for recipient in recipients :
parser . fill_array ( parameters , recipient , 'recipients[%i]' % index )
index += 1
parser . fill_array ( parameters , params , '' )
parameters [ 'body' ] = body
connection = Connection ( self . token )
connection . set_url ( self . production , self . SMS_URL )
connection . add_params ( parameters )
connection . add_files ( documents )
return connection . post_request ( )
|
def set_windows_env_var ( key , value ) :
"""Set an env var .
Raises :
WindowsError"""
|
if not isinstance ( key , text_type ) :
raise TypeError ( "%r not of type %r" % ( key , text_type ) )
if not isinstance ( value , text_type ) :
raise TypeError ( "%r not of type %r" % ( value , text_type ) )
status = winapi . SetEnvironmentVariableW ( key , value )
if status == 0 :
raise ctypes . WinError ( )
|
def color_print ( * args , ** kwargs ) :
"""Prints colors and styles to the terminal uses ANSI escape
sequences .
color _ print ( ' This is the color ' , ' default ' , ' GREEN ' , ' green ' )
Parameters
positional args : str
The positional arguments come in pairs ( * msg * , * color * ) , where
* msg * is the string to display and * color * is the color to
display it in .
* color * is an ANSI terminal color name . Must be one of :
black , red , green , brown , blue , magenta , cyan , lightgrey ,
default , darkgrey , lightred , lightgreen , yellow , lightblue ,
lightmagenta , lightcyan , white , or ' ' ( the empty string ) .
file : writeable file - like object , optional
Where to write to . Defaults to ` sys . stdout ` . If file is not
a tty ( as determined by calling its ` isatty ` member , if one
exists ) , no coloring will be included .
end : str , optional
The ending of the message . Defaults to ` ` \\ n ` ` . The end will
be printed after resetting any color or font state ."""
|
file = kwargs . get ( 'file' , _get_stdout ( ) )
end = kwargs . get ( 'end' , '\n' )
write = file . write
if isatty ( file ) and options . console . use_color :
for i in range ( 0 , len ( args ) , 2 ) :
msg = args [ i ]
if i + 1 == len ( args ) :
color = ''
else :
color = args [ i + 1 ]
if color :
msg = _color_text ( msg , color )
# Some file objects support writing unicode sensibly on some Python
# versions ; if this fails try creating a writer using the locale ' s
# preferred encoding . If that fails too give up .
if not six . PY3 and isinstance ( msg , bytes ) :
msg = _decode_preferred_encoding ( msg )
write = _write_with_fallback ( msg , write , file )
write ( end )
else :
for i in range ( 0 , len ( args ) , 2 ) :
msg = args [ i ]
if not six . PY3 and isinstance ( msg , bytes ) : # Support decoding bytes to unicode on Python 2 ; use the
# preferred encoding for the locale ( which is * sometimes *
# sensible )
msg = _decode_preferred_encoding ( msg )
write ( msg )
write ( end )
|
def dump_yaml ( testcase , yaml_file ) :
"""dump HAR entries to yaml testcase"""
|
logging . info ( "dump testcase to YAML format." )
with io . open ( yaml_file , 'w' , encoding = "utf-8" ) as outfile :
yaml . dump ( testcase , outfile , allow_unicode = True , default_flow_style = False , indent = 4 )
logging . info ( "Generate YAML testcase successfully: {}" . format ( yaml_file ) )
|
def packet_in_handler ( self , evt ) :
"""PacketIn event handler . when the received packet was IGMP ,
proceed it . otherwise , send a event ."""
|
msg = evt . msg
dpid = msg . datapath . id
req_pkt = packet . Packet ( msg . data )
req_igmp = req_pkt . get_protocol ( igmp . igmp )
if req_igmp :
if self . _querier . dpid == dpid :
self . _querier . packet_in_handler ( req_igmp , msg )
else :
self . _snooper . packet_in_handler ( req_pkt , req_igmp , msg )
else :
self . send_event_to_observers ( EventPacketIn ( msg ) )
|
def as_sql ( self , qn , connection ) :
"""Create the proper SQL fragment . This inserts something like
" ( T0 . flags & value ) ! = 0 " .
This will be called by Where . as _ sql ( )"""
|
engine = connection . settings_dict [ 'ENGINE' ] . rsplit ( '.' , - 1 ) [ - 1 ]
if engine . startswith ( 'postgres' ) :
XOR_OPERATOR = '#'
elif engine . startswith ( 'sqlite' ) :
raise NotImplementedError
else :
XOR_OPERATOR = '^'
if self . bit :
return ( "%s.%s | %d" % ( qn ( self . table_alias ) , qn ( self . column ) , self . bit . mask ) , [ ] )
return ( "%s.%s %s %d" % ( qn ( self . table_alias ) , qn ( self . column ) , XOR_OPERATOR , self . bit . mask ) , [ ] )
|
def fetch_package_version ( dist_name ) :
"""> > > fetch _ package _ version ( ' sentry ' )"""
|
try : # Importing pkg _ resources can be slow , so only import it
# if we need it .
import pkg_resources
except ImportError : # pkg _ resource is not available on Google App Engine
raise NotImplementedError ( 'pkg_resources is not available ' 'on this Python install' )
dist = pkg_resources . get_distribution ( dist_name )
return dist . version
|
def tablecopy ( tablename , newtablename , deep = False , valuecopy = False , dminfo = { } , endian = 'aipsrc' , memorytable = False , copynorows = False ) :
"""Copy a table .
It is the same as : func : ` table . copy ` , but without the need to open
the table first ."""
|
t = table ( tablename , ack = False )
return t . copy ( newtablename , deep = deep , valuecopy = valuecopy , dminfo = dminfo , endian = endian , memorytable = memorytable , copynorows = copynorows )
|
def setup_seasonal ( self ) :
"""Check if there ' s some seasonal holiday going on , setup appropriate
Shibe picture and load holiday words .
Note : if there are two or more holidays defined for a certain date ,
the first one takes precedence ."""
|
# If we ' ve specified a season , just run that one
if self . ns . season :
return self . load_season ( self . ns . season )
# If we ' ve specified another doge or no doge at all , it does not make
# sense to use seasons .
if self . ns . doge_path is not None and not self . ns . no_shibe :
return
now = datetime . datetime . now ( )
for season , data in wow . SEASONS . items ( ) :
start , end = data [ 'dates' ]
start_dt = datetime . datetime ( now . year , start [ 0 ] , start [ 1 ] )
# Be sane if the holiday season spans over New Year ' s day .
end_dt = datetime . datetime ( now . year + ( start [ 0 ] > end [ 0 ] and 1 or 0 ) , end [ 0 ] , end [ 1 ] )
if start_dt <= now <= end_dt : # Wow , much holiday !
return self . load_season ( season )
|
def one_instance ( function = None , key = '' , timeout = DEFAULT_ONE_INSTANCE_TIMEOUT ) :
"""Decorator to enforce only one Celery task execution at a time when multiple
workers are available .
See http : / / loose - bits . com / 2010/10 / distributed - task - locking - in - celery . html"""
|
def _dec ( run_func ) :
def _caller ( * args , ** kwargs ) :
ret_value = None
have_lock = False
# Use Redis AOF for persistent lock .
if REDIS_CLIENT . config_get ( 'appendonly' ) . values ( ) [ 0 ] == 'no' :
REDIS_CLIENT . config_set ( 'appendonly' , 'yes' )
lock = REDIS_CLIENT . lock ( key , timeout = timeout )
try :
logger . debug ( '%s: trying to acquire lock (PID %d).' % ( key , os . getpid ( ) ) )
have_lock = lock . acquire ( blocking = False )
if have_lock :
logger . debug ( '%s: acquired lock (PID %d).' % ( key , os . getpid ( ) ) )
ret_value = run_func ( * args , ** kwargs )
else :
logger . error ( '%s: did not acquire lock (PID %d).' % ( key , os . getpid ( ) ) )
finally :
if have_lock :
lock . release ( )
logger . debug ( '%s: released lock (PID %d).' % ( key , os . getpid ( ) ) )
return ret_value
return _caller
return _dec ( function ) if function is not None else _dec
|
def _onsuccess ( self , result ) :
"""To execute on execution success
: param cdumay _ result . Result result : Execution result
: return : Execution result
: rtype : cdumay _ result . Result"""
|
self . _set_status ( "SUCCESS" , result )
logger . info ( "{}.Success: {}[{}]: {}" . format ( self . __class__ . __name__ , self . __class__ . path , self . uuid , result ) , extra = dict ( kmsg = Message ( self . uuid , entrypoint = self . __class__ . path , params = self . params ) . dump ( ) , kresult = ResultSchema ( ) . dump ( result ) if result else dict ( ) ) )
return self . onsuccess ( result )
|
def set_qos ( self , prefetch_size = 0 , prefetch_count = 0 , apply_globally = False ) :
"""Specify quality of service by requesting that messages be pre - fetched
from the server . Pre - fetching means that the server will deliver messages
to the client while the client is still processing unacknowledged messages .
This method is a : ref : ` coroutine < coroutine > ` .
: param int prefetch _ size : Specifies a prefetch window in bytes .
Messages smaller than this will be sent from the server in advance .
This value may be set to 0 , which means " no specific limit " .
: param int prefetch _ count : Specifies a prefetch window in terms of whole messages .
: param bool apply _ globally : If true , apply these QoS settings on a global level .
The meaning of this is implementation - dependent . From the
` RabbitMQ documentation < https : / / www . rabbitmq . com / amqp - 0-9-1 - reference . html # basic . qos . global > ` _ :
RabbitMQ has reinterpreted this field . The original specification said :
" By default the QoS settings apply to the current channel only .
If this field is set , they are applied to the entire connection . "
Instead , RabbitMQ takes global = false to mean that the QoS settings should apply
per - consumer ( for new consumers on the channel ; existing ones being unaffected ) and
global = true to mean that the QoS settings should apply per - channel ."""
|
self . sender . send_BasicQos ( prefetch_size , prefetch_count , apply_globally )
yield from self . synchroniser . wait ( spec . BasicQosOK )
self . reader . ready ( )
|
def _bb_intensity ( self , Teff , photon_weighted = False ) :
"""Computes mean passband intensity using blackbody atmosphere :
I _ pb ^ E = \ int _ \ lambda I ( \ lambda ) P ( \ lambda ) d \ lambda / \ int _ \ lambda P ( \ lambda ) d \ lambda
I _ pb ^ P = \ int _ \ lambda \ lambda I ( \ lambda ) P ( \ lambda ) d \ lambda / \ int _ \ lambda \ lambda P ( \ lambda ) d \ lambda
Superscripts E and P stand for energy and photon , respectively .
@ Teff : effective temperature in K
@ photon _ weighted : photon / energy switch
Returns : mean passband intensity using blackbody atmosphere ."""
|
if photon_weighted :
pb = lambda w : w * self . _planck ( w , Teff ) * self . ptf ( w )
return integrate . quad ( pb , self . wl [ 0 ] , self . wl [ - 1 ] ) [ 0 ] / self . ptf_photon_area
else :
pb = lambda w : self . _planck ( w , Teff ) * self . ptf ( w )
return integrate . quad ( pb , self . wl [ 0 ] , self . wl [ - 1 ] ) [ 0 ] / self . ptf_area
|
async def send_photo ( self , path , entity ) :
"""Sends the file located at path to the desired entity as a photo"""
|
await self . send_file ( entity , path , progress_callback = self . upload_progress_callback )
print ( 'Photo sent!' )
|
def set_agent_settings ( contact = None , location = None , services = None ) :
'''Manage the SNMP sysContact , sysLocation , and sysServices settings .
Args :
contact ( str , optional ) : The SNMP contact .
location ( str , optional ) : The SNMP location .
services ( list , optional ) : A list of selected services . The possible
service names can be found via ` ` win _ snmp . get _ agent _ service _ types ` ` .
To disable all services pass a list of None , ie : [ ' None ' ]
Returns :
bool : True if successful , otherwise False
CLI Example :
. . code - block : : bash
salt ' * ' win _ snmp . set _ agent _ settings contact = ' Contact Name ' location = ' Place ' services = " [ ' Physical ' ] "'''
|
if services is not None : # Filter services for unique items , and sort them for comparison
# purposes .
services = sorted ( set ( services ) )
# Validate the services .
for service in services :
if service not in _SERVICE_TYPES :
message = ( "Invalid service '{0}' specified. Valid services:" ' {1}' ) . format ( service , get_agent_service_types ( ) )
raise SaltInvocationError ( message )
if six . PY2 :
contact = _to_unicode ( contact )
location = _to_unicode ( location )
settings = { 'contact' : contact , 'location' : location , 'services' : services }
current_settings = get_agent_settings ( )
if settings == current_settings :
_LOG . debug ( 'Agent settings already contain the provided values.' )
return True
if contact is not None :
if contact != current_settings [ 'contact' ] :
__utils__ [ 'reg.set_value' ] ( _HKEY , _AGENT_KEY , 'sysContact' , contact , 'REG_SZ' )
if location is not None :
if location != current_settings [ 'location' ] :
__utils__ [ 'reg.set_value' ] ( _HKEY , _AGENT_KEY , 'sysLocation' , location , 'REG_SZ' )
if services is not None :
if set ( services ) != set ( current_settings [ 'services' ] ) : # Calculate the total value . Produces 0 if an empty list was provided ,
# corresponding to the None _ SERVICE _ TYPES value .
vdata = sum ( _SERVICE_TYPES [ service ] for service in services )
_LOG . debug ( 'Setting sysServices vdata to: %s' , vdata )
__utils__ [ 'reg.set_value' ] ( _HKEY , _AGENT_KEY , 'sysServices' , vdata , 'REG_DWORD' )
# Get the fields post - change so that we can verify tht all values
# were modified successfully . Track the ones that weren ' t .
new_settings = get_agent_settings ( )
failed_settings = dict ( )
for setting in settings :
if settings [ setting ] is not None and settings [ setting ] != new_settings [ setting ] :
failed_settings [ setting ] = settings [ setting ]
if failed_settings :
_LOG . error ( 'Unable to configure agent settings: %s' , failed_settings )
return False
_LOG . debug ( 'Agent settings configured successfully: %s' , settings . keys ( ) )
return True
|
def save_dir ( key , dir_path , * refs ) :
"""Convert the given parameters to a special JSON object .
JSON object is of the form :
{ key : { " dir " : dir _ path } } , or
{ key : { " dir " : dir _ path , " refs " : [ refs [ 0 ] , refs [ 1 ] , . . . ] } }"""
|
if not os . path . isdir ( dir_path ) :
return error ( "Output '{}' set to a missing directory: '{}'." . format ( key , dir_path ) )
result = { key : { "dir" : dir_path } }
if refs :
missing_refs = [ ref for ref in refs if not ( os . path . isfile ( ref ) or os . path . isdir ( ref ) ) ]
if len ( missing_refs ) > 0 :
return error ( "Output '{}' set to missing references: '{}'." . format ( key , ', ' . join ( missing_refs ) ) )
result [ key ] [ "refs" ] = refs
return json . dumps ( result )
|
def get_network ( self ) :
"""Get the network that this resource attribute is in ."""
|
ref_key = self . ref_key
if ref_key == 'NETWORK' :
return self . network
elif ref_key == 'NODE' :
return self . node . network
elif ref_key == 'LINK' :
return self . link . network
elif ref_key == 'GROUP' :
return self . group . network
elif ref_key == 'PROJECT' :
return None
|
def draw_arc ( self , color , world_loc , world_radius , start_angle , stop_angle , thickness = 1 ) :
"""Draw an arc using world coordinates , radius , start and stop angles ."""
|
center = self . world_to_surf . fwd_pt ( world_loc ) . round ( )
radius = max ( 1 , int ( self . world_to_surf . fwd_dist ( world_radius ) ) )
rect = pygame . Rect ( center - radius , ( radius * 2 , radius * 2 ) )
pygame . draw . arc ( self . surf , color , rect , start_angle , stop_angle , thickness if thickness < radius else 0 )
|
def find_path ( dirs , path_to_find ) :
"""Go through a bunch of dirs and see if dir + path _ to _ find exists there .
Returns the first dir that matches . Otherwise , return None ."""
|
for dir in dirs :
if os . path . exists ( os . path . join ( dir , path_to_find ) ) :
return dir
return None
|
def _lemmatise_desassims ( self , f , * args , ** kwargs ) :
"""Lemmatise un mot f avec sa désassimilation
: param f : Mot à lemmatiser
: yield : Match formated like in _ lemmatise ( )"""
|
forme_assimilee = self . desassims ( f )
if forme_assimilee != f :
for proposal in self . _lemmatise ( forme_assimilee , * args , ** kwargs ) :
yield proposal
|
def filepaths ( self ) :
"""Yield absolute paths to existing files in : attr : ` path `
Any files that match patterns in : attr : ` exclude ` as well as hidden and
empty files are not included ."""
|
if self . path is not None :
yield from utils . filepaths ( self . path , exclude = self . exclude , hidden = False , empty = False )
|
def upload_slice_file ( self , real_file_path , slice_size , file_name , offset = 0 , dir_name = None ) :
"""此分片上传代码由GitHub用户a270443177 ( https : / / github . com / a270443177 ) 友情提供
: param real _ file _ path :
: param slice _ size :
: param file _ name :
: param offset :
: param dir _ name :
: return :"""
|
if dir_name is not None and dir_name [ 0 ] == '/' :
dir_name = dir_name [ 1 : len ( dir_name ) ]
if dir_name is None :
dir_name = ""
self . url = 'http://' + self . config . region + '.file.myqcloud.com/files/v2/' + str ( self . config . app_id ) + '/' + self . config . bucket
if dir_name is not None :
self . url = self . url + '/' + dir_name
self . url = self . url + '/' + file_name
file_size = os . path . getsize ( real_file_path )
session = self . _upload_slice_control ( file_size = file_size , slice_size = slice_size )
with open ( real_file_path , 'rb' ) as local_file :
while offset < file_size :
file_content = local_file . read ( slice_size )
self . _upload_slice_data ( filecontent = file_content , session = session , offset = offset )
offset += slice_size
r = self . _upload_slice_finish ( session = session , file_size = file_size )
return r
|
def from_jd ( jd ) :
'''Calculate Islamic date from Julian day'''
|
jd = trunc ( jd ) + 0.5
year = trunc ( ( ( 30 * ( jd - EPOCH ) ) + 10646 ) / 10631 )
month = min ( 12 , ceil ( ( jd - ( 29 + to_jd ( year , 1 , 1 ) ) ) / 29.5 ) + 1 )
day = int ( jd - to_jd ( year , month , 1 ) ) + 1
return ( year , month , day )
|
def _DecodeUnknownMessages ( message , encoded_message , pair_type ) :
"""Process unknown fields in encoded _ message of a message type ."""
|
field_type = pair_type . value . type
new_values = [ ]
all_field_names = [ x . name for x in message . all_fields ( ) ]
for name , value_dict in six . iteritems ( encoded_message ) :
if name in all_field_names :
continue
value = PyValueToMessage ( field_type , value_dict )
if pair_type . value . repeated :
value = _AsMessageList ( value )
new_pair = pair_type ( key = name , value = value )
new_values . append ( new_pair )
return new_values
|
def reload ( name = DEFAULT , all_names = False ) :
"""Reload one or all : class : ` ConfigNamespace ` . Reload clears the cache of
: mod : ` staticconf . schema ` and : mod : ` staticconf . getters ` , allowing them to
pickup the latest values in the namespace .
Defaults to reloading just the DEFAULT namespace .
: param name : the name of the : class : ` ConfigNamespace ` to reload
: param all _ names : If True , reload all namespaces , and ignore ` name `"""
|
for namespace in get_namespaces_from_names ( name , all_names ) :
for value_proxy in namespace . get_value_proxies ( ) :
value_proxy . reset ( )
|
def create_review ( self , commit = github . GithubObject . NotSet , body = None , event = github . GithubObject . NotSet , comments = github . GithubObject . NotSet ) :
""": calls : ` POST / repos / : owner / : repo / pulls / : number / reviews < https : / / developer . github . com / v3 / pulls / reviews / > ` _
: param commit : github . Commit . Commit
: param body : string
: param event : string
: param comments : list
: rtype : : class : ` github . PullRequestReview . PullRequestReview `"""
|
assert commit is github . GithubObject . NotSet or isinstance ( commit , github . Commit . Commit ) , commit
assert isinstance ( body , str ) , body
assert event is github . GithubObject . NotSet or isinstance ( event , str ) , event
assert comments is github . GithubObject . NotSet or isinstance ( comments , list ) , comments
post_parameters = dict ( )
if commit is not github . GithubObject . NotSet :
post_parameters [ 'commit_id' ] = commit . sha
post_parameters [ 'body' ] = body
post_parameters [ 'event' ] = 'COMMENT' if event == github . GithubObject . NotSet else event
if comments is github . GithubObject . NotSet :
post_parameters [ 'comments' ] = [ ]
else :
post_parameters [ 'comments' ] = comments
headers , data = self . _requester . requestJsonAndCheck ( "POST" , self . url + "/reviews" , input = post_parameters )
self . _useAttributes ( data )
return github . PullRequestReview . PullRequestReview ( self . _requester , headers , data , completed = True )
|
def delete_contacts ( self , ids ) :
"""Delete selected contacts for the current user
: param ids : list of ids"""
|
str_ids = self . _return_comma_list ( ids )
self . request ( 'ContactAction' , { 'action' : { 'op' : 'delete' , 'id' : str_ids } } )
|
def decode ( self , encoded_payload ) :
"""Decode a transmitted payload ."""
|
self . packets = [ ]
while encoded_payload :
if six . byte2int ( encoded_payload [ 0 : 1 ] ) <= 1 :
packet_len = 0
i = 1
while six . byte2int ( encoded_payload [ i : i + 1 ] ) != 255 :
packet_len = packet_len * 10 + six . byte2int ( encoded_payload [ i : i + 1 ] )
i += 1
self . packets . append ( packet . Packet ( encoded_packet = encoded_payload [ i + 1 : i + 1 + packet_len ] ) )
else :
i = encoded_payload . find ( b':' )
if i == - 1 :
raise ValueError ( 'invalid payload' )
# extracting the packet out of the payload is extremely
# inefficient , because the payload needs to be treated as
# binary , but the non - binary packets have to be parsed as
# unicode . Luckily this complication only applies to long
# polling , as the websocket transport sends packets
# individually wrapped .
packet_len = int ( encoded_payload [ 0 : i ] )
pkt = encoded_payload . decode ( 'utf-8' , errors = 'ignore' ) [ i + 1 : i + 1 + packet_len ] . encode ( 'utf-8' )
self . packets . append ( packet . Packet ( encoded_packet = pkt ) )
# the engine . io protocol sends the packet length in
# utf - 8 characters , but we need it in bytes to be able to
# jump to the next packet in the payload
packet_len = len ( pkt )
encoded_payload = encoded_payload [ i + 1 + packet_len : ]
|
def filter_words ( tokenized_obj , valid_pos , stopwords , check_field_name = 'stem' ) : # type : ( TokenizedSenetence , List [ Tuple [ text _ type , . . . ] ] , List [ text _ type ] , text _ type ) - > FilteredObject
"""This function filter token that user don ' t want to take .
Condition is stopword and pos .
* Input
- valid _ pos
- List of Tuple which has POS element to keep .
- Keep in your mind , each tokenizer has different POS structure .
> > > [ ( ' 名詞 ' , ' 固有名詞 ' ) , ( ' 動詞 ' , ) ]
- stopwords
- List of str , which you ' d like to remove
> > > [ ' 残念 ' , ' 今日 ' ]"""
|
assert isinstance ( tokenized_obj , TokenizedSenetence )
assert isinstance ( valid_pos , list )
assert isinstance ( stopwords , list )
filtered_tokens = [ ]
for token_obj in tokenized_obj . tokenized_objects :
assert isinstance ( token_obj , TokenizedResult )
if check_field_name == 'stem' :
res_stopwords = __is_sotpwords ( token_obj . word_stem , stopwords )
else :
res_stopwords = __is_sotpwords ( token_obj . word_surface , stopwords )
res_pos_condition = __is_valid_pos ( token_obj . tuple_pos , valid_pos )
# case1 : only pos filtering is ON
if valid_pos != [ ] and stopwords == [ ] :
if res_pos_condition :
filtered_tokens . append ( token_obj )
# case2 : only stopwords filtering is ON
if valid_pos == [ ] and stopwords != [ ] :
if res_stopwords is False :
filtered_tokens . append ( token_obj )
# case3 : both condition is ON
if valid_pos != [ ] and stopwords != [ ] :
if res_stopwords is False and res_pos_condition :
filtered_tokens . append ( token_obj )
filtered_object = FilteredObject ( sentence = tokenized_obj . sentence , tokenized_objects = filtered_tokens , pos_condition = valid_pos , stopwords = stopwords )
return filtered_object
|
def build_model ( self , n_features , n_classes ) :
"""Create the computational graph of the model .
: param n _ features : Number of features .
: param n _ classes : number of classes .
: return : self"""
|
self . _create_placeholders ( n_features , n_classes )
self . _create_layers ( n_classes )
self . cost = self . loss . compile ( self . mod_y , self . input_labels )
self . train_step = self . trainer . compile ( self . cost )
self . accuracy = Evaluation . accuracy ( self . mod_y , self . input_labels )
|
def metrics ( self ) -> list :
"""List of metrics to track for this learning process"""
|
my_metrics = [ FramesMetric ( "frames" ) , FPSMetric ( "fps" ) , EpisodeRewardMetric ( 'PMM:episode_rewards' ) , EpisodeRewardMetricQuantile ( 'P09:episode_rewards' , quantile = 0.9 ) , EpisodeRewardMetricQuantile ( 'P01:episode_rewards' , quantile = 0.1 ) , EpisodeLengthMetric ( "episode_length" ) ]
return my_metrics + self . algo . metrics ( ) + self . env_roller . metrics ( )
|
def model_select ( self , score_function , alleles = None , min_models = 1 , max_models = 10000 ) :
"""Perform model selection using a user - specified scoring function .
Model selection is done using a " step up " variable selection procedure ,
in which models are repeatedly added to an ensemble until the score
stops improving .
Parameters
score _ function : Class1AffinityPredictor - > float function
Scoring function
alleles : list of string , optional
If not specified , model selection is performed for all alleles .
min _ models : int , optional
Min models to select per allele
max _ models : int , optional
Max models to select per allele
Returns
Class1AffinityPredictor : predictor containing the selected models"""
|
if alleles is None :
alleles = self . supported_alleles
dfs = [ ]
allele_to_allele_specific_models = { }
for allele in alleles :
df = pandas . DataFrame ( { 'model' : self . allele_to_allele_specific_models [ allele ] } )
df [ "model_num" ] = df . index
df [ "allele" ] = allele
df [ "selected" ] = False
round_num = 1
while not df . selected . all ( ) and sum ( df . selected ) < max_models :
score_col = "score_%2d" % round_num
prev_score_col = "score_%2d" % ( round_num - 1 )
existing_selected = list ( df [ df . selected ] . model )
df [ score_col ] = [ numpy . nan if row . selected else score_function ( Class1AffinityPredictor ( allele_to_allele_specific_models = { allele : [ row . model ] + existing_selected } ) ) for ( _ , row ) in df . iterrows ( ) ]
if round_num > min_models and ( df [ score_col ] . max ( ) < df [ prev_score_col ] . max ( ) ) :
break
# In case of a tie , pick a model at random .
( best_model_index , ) = df . loc [ ( df [ score_col ] == df [ score_col ] . max ( ) ) ] . sample ( 1 ) . index
df . loc [ best_model_index , "selected" ] = True
round_num += 1
dfs . append ( df )
allele_to_allele_specific_models [ allele ] = list ( df . loc [ df . selected ] . model )
df = pandas . concat ( dfs , ignore_index = True )
new_predictor = Class1AffinityPredictor ( allele_to_allele_specific_models , metadata_dataframes = { "model_selection" : df , } )
return new_predictor
|
def notify ( self , method_name_or_object , params = None ) :
"""Sends a notification to the service by calling the ` ` method _ name ` `
method with the ` ` params ` ` parameters . Does not wait for a response , even
if the response triggers an error .
: param method _ name _ or _ object : the name of the method to be called or a ` ` Notification ` `
instance
: param params : a list of dict representing the parameters for the call
: return : None"""
|
if isinstance ( method_name_or_object , Notification ) :
req_obj = method_name_or_object
else :
req_obj = Notification ( method_name_or_object , params )
self . handle_single_request ( req_obj )
|
def _at_for ( self , calculator , rule , scope , block ) :
"""Implements @ for"""
|
var , _ , name = block . argument . partition ( ' from ' )
frm , _ , through = name . partition ( ' through ' )
if through :
inclusive = True
else :
inclusive = False
frm , _ , through = frm . partition ( ' to ' )
frm = calculator . calculate ( frm )
through = calculator . calculate ( through )
try :
frm = int ( float ( frm ) )
through = int ( float ( through ) )
except ValueError :
return
if frm > through : # DEVIATION : allow reversed ' @ for . . from . . through ' ( same as enumerate ( ) and range ( ) )
frm , through = through , frm
rev = reversed
else :
rev = lambda x : x
var = var . strip ( )
var = calculator . do_glob_math ( var )
var = normalize_var ( var )
inner_rule = rule . copy ( )
inner_rule . unparsed_contents = block . unparsed_contents
if not self . should_scope_loop_in_rule ( inner_rule ) : # DEVIATION : Allow not creating a new namespace
inner_rule . namespace = rule . namespace
if inclusive :
through += 1
for i in rev ( range ( frm , through ) ) :
inner_rule . namespace . set_variable ( var , Number ( i ) )
self . manage_children ( inner_rule , scope )
|
def set_quality_index ( self ) :
"""Set the current signal quality in combobox ."""
|
window_start = self . parent . value ( 'window_start' )
window_length = self . parent . value ( 'window_length' )
qual = self . annot . get_stage_for_epoch ( window_start , window_length , attr = 'quality' )
# lg . info ( ' winstart : ' + str ( window _ start ) + ' , quality : ' + str ( qual ) )
if qual is None :
self . idx_quality . setCurrentIndex ( - 1 )
else :
self . idx_quality . setCurrentIndex ( QUALIFIERS . index ( qual ) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.