signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def honeypot_equals ( val ) :
"""Default verifier used if HONEYPOT _ VERIFIER is not specified .
Ensures val = = HONEYPOT _ VALUE or HONEYPOT _ VALUE ( ) if it ' s a callable ."""
|
expected = getattr ( settings , 'HONEYPOT_VALUE' , '' )
if callable ( expected ) :
expected = expected ( )
return val == expected
|
def get_lib_module_dict ( self ) :
"""Load the ' lib ' directory as a python module , so it can be used to provide functions
for rowpipe transforms . This only works filesystem packages"""
|
from importlib import import_module
if not self . ref :
return { }
u = parse_app_url ( self . ref )
if u . scheme == 'file' :
if not self . set_sys_path ( ) :
return { }
for module_name in self . lib_dir_names :
try :
m = import_module ( module_name )
return { k : v for k , v in m . __dict__ . items ( ) if not k . startswith ( '__' ) }
except ModuleNotFoundError as e : # We need to know if it is the datapackage ' s module that is missing
# or if it is a module that it imported
if not module_name in str ( e ) :
raise
# If not our module , it ' s a real error .
continue
else :
return { }
|
def has_main_target ( self , name ) :
"""Tells if a main target with the specified name exists ."""
|
assert isinstance ( name , basestring )
if not self . built_main_targets_ :
self . build_main_targets ( )
return name in self . main_target_
|
def _find_aux_coord_vars ( self , ds , refresh = False ) :
'''Returns a list of auxiliary coordinate variables
An auxiliary coordinate variable is any netCDF variable that contains
coordinate data , but is not a coordinate variable ( in the sense of the term
defined by CF ) .
: param netCDF4 . Dataset ds : An open netCDF dataset
: param bool refresh : if refresh is set to True , the cache is
invalidated .
: rtype : list
: return : List of variable names ( str ) that are defined to be auxiliary
coordinate variables .'''
|
if self . _aux_coords . get ( ds , None ) and refresh is False :
return self . _aux_coords [ ds ]
self . _aux_coords [ ds ] = cfutil . get_auxiliary_coordinate_variables ( ds )
return self . _aux_coords [ ds ]
|
def speech ( self ) -> str :
"""Report summary designed to be read by a text - to - speech program"""
|
if not self . data :
self . update ( )
return speech . metar ( self . data , self . units )
|
def get_sky_coords ( self ) :
"""Get the sky coordinates of all the pixels in this pixelization"""
|
if self . _ipix is None :
theta , phi = hp . pix2ang ( self . _nside , list ( range ( self . _npix ) ) , self . _nest )
else :
theta , phi = hp . pix2ang ( self . _nside , self . _ipix , self . _nest )
lat = np . degrees ( ( np . pi / 2 ) - theta )
lon = np . degrees ( phi )
return np . vstack ( [ lon , lat ] ) . T
|
def preprocess ( self , source , name = None , filename = None ) :
"""Preprocesses the source with all extensions . This is automatically
called for all parsing and compiling methods but * not * for : meth : ` lex `
because there you usually only want the actual source tokenized ."""
|
return reduce ( lambda s , e : e . preprocess ( s , name , filename ) , self . iter_extensions ( ) , unicode ( source ) )
|
def iostat ( interval = 1 , count = 5 , disks = None ) :
'''Gather and return ( averaged ) IO stats .
. . versionadded : : 2016.3.0
. . versionchanged : : 2016.11.4
Added support for AIX
CLI Example :
. . code - block : : bash
salt ' * ' disk . iostat 1 5 disks = sda'''
|
if salt . utils . platform . is_linux ( ) :
return _iostat_linux ( interval , count , disks )
elif salt . utils . platform . is_freebsd ( ) :
return _iostat_fbsd ( interval , count , disks )
elif salt . utils . platform . is_aix ( ) :
return _iostat_aix ( interval , count , disks )
|
def KnowsFile ( self , filename ) :
"""Looks at extension and decides if it knows
how to manage this file"""
|
if self . _isMediaFile ( filename ) or self . _isConfigFile ( filename ) :
return True
return False
|
def find_existing_split_discordants ( data ) :
"""Check for pre - calculated split reads and discordants done as part of alignment streaming ."""
|
in_bam = dd . get_align_bam ( data )
sr_file = "%s-sr.bam" % os . path . splitext ( in_bam ) [ 0 ]
disc_file = "%s-disc.bam" % os . path . splitext ( in_bam ) [ 0 ]
if utils . file_exists ( sr_file ) and utils . file_exists ( disc_file ) :
return sr_file , disc_file
else :
sr_file = dd . get_sr_bam ( data )
disc_file = dd . get_disc_bam ( data )
if sr_file and utils . file_exists ( sr_file ) and disc_file and utils . file_exists ( disc_file ) :
return sr_file , disc_file
else :
return None , None
|
def rpc_get_DID_record ( self , did , ** con_info ) :
"""Given a DID , return the name or subdomain it corresponds to"""
|
if not isinstance ( did , ( str , unicode ) ) :
return { 'error' : 'Invalid DID: not a string' , 'http_status' : 400 }
try :
did_info = parse_DID ( did )
except :
return { 'error' : 'Invalid DID' , 'http_status' : 400 }
res = None
if did_info [ 'name_type' ] == 'name' :
res = self . get_name_DID_record ( did )
elif did_info [ 'name_type' ] == 'subdomain' :
res = self . get_subdomain_DID_record ( did )
if 'error' in res :
return { 'error' : res [ 'error' ] , 'http_status' : res . get ( 'http_status' , 404 ) }
return self . success_response ( { 'record' : res [ 'record' ] } )
|
def setblocking ( self , blocking ) :
'''Set whether or not this message is blocking'''
|
for sock in self . socket ( ) :
sock . setblocking ( blocking )
self . _blocking = blocking
|
def _job_completed ( self , job_name , success , message ) :
"""Internal method .
Called when a job of a long running task completes ."""
|
job = self . _objects [ job_name ] [ Interface [ 'Job' ] ]
action = self . _action_by_operation . get ( job [ 'Operation' ] )
if not action :
return
# We only handle events , which are associated to exactly one object :
object_path , = job [ 'Objects' ]
device = self [ object_path ]
if success : # It rarely happens , but sometimes UDisks posts the
# Job . Completed event before PropertiesChanged , so we have to
# check if the operation has been carried out yet :
if self . _check_action_success [ action ] ( device ) :
event_name = self . _event_by_action [ action ]
self . trigger ( event_name , device )
else :
self . trigger ( 'job_failed' , device , action , message )
|
def prepare_denovo_input_narrowpeak ( inputfile , params , outdir ) :
"""Prepare a narrowPeak file for de novo motif prediction .
All regions to same size ; split in test and validation set ;
converted to FASTA .
Parameters
inputfile : str
BED file with input regions .
params : dict
Dictionary with parameters .
outdir : str
Output directory to save files ."""
|
bedfile = os . path . join ( outdir , "input.from.narrowpeak.bed" )
p = re . compile ( r'^(#|track|browser)' )
width = int ( params [ "width" ] )
logger . info ( "preparing input (narrowPeak to BED, width %s)" , width )
warn_no_summit = True
with open ( bedfile , "w" ) as f_out :
with open ( inputfile ) as f_in :
for line in f_in :
if p . search ( line ) :
continue
vals = line . strip ( ) . split ( "\t" )
start , end = int ( vals [ 1 ] ) , int ( vals [ 2 ] )
summit = int ( vals [ 9 ] )
if summit == - 1 :
if warn_no_summit :
logger . warn ( "No summit present in narrowPeak file, using the peak center." )
warn_no_summit = False
summit = ( end - start ) // 2
start = start + summit - ( width // 2 )
end = start + width
f_out . write ( "{}\t{}\t{}\t{}\n" . format ( vals [ 0 ] , start , end , vals [ 6 ] ) )
prepare_denovo_input_bed ( bedfile , params , outdir )
|
def get_station_board ( self , crs , rows = 17 , include_departures = True , include_arrivals = False , destination_crs = None , origin_crs = None ) :
"""Query the darwin webservice to obtain a board for a particular station
and return a StationBoard instance
Positional arguments :
crs - - the three letter CRS code of a UK station
Keyword arguments :
rows - - the number of rows to retrieve ( default 10)
include _ departures - - include departing services in the departure board
( default True )
include _ arrivals - - include arriving services in the departure board
( default False )
destination _ crs - - filter results so they only include services
calling at a particular destination ( default None )
origin _ crs - - filter results so they only include services
originating from a particular station ( default None )"""
|
# Determine the darwn query we want to make
if include_departures and include_arrivals :
query_type = 'GetArrivalDepartureBoard'
elif include_departures :
query_type = 'GetDepartureBoard'
elif include_arrivals :
query_type = 'GetArrivalBoard'
else :
raise ValueError ( "get_station_board must have either include_departures or \
include_arrivals set to True" )
# build a query function
q = partial ( self . _base_query ( ) [ query_type ] , crs = crs , numRows = rows )
if destination_crs :
if origin_crs :
log . warn ( "Station board query can only filter on one of \
destination_crs and origin_crs, using only destination_crs" )
q = partial ( q , filterCrs = destination_crs , filterType = 'to' )
elif origin_crs :
q = partial ( q , filterCrs = origin_crs , filterType = 'from' )
try :
soap_response = q ( )
except WebFault :
raise WebServiceError
return StationBoard ( soap_response )
|
def mdot_t ( self , ifig = None , lims = [ 7.4 , 2.6 , - 8.5 , - 4.5 ] , label = None , colour = None , s2ms = False , dashes = None ) :
"""Plot mass loss history as a function of log - time - left
Parameters
ifig : integer or string
Figure label , if None the current figure is used
The default value is None .
lims : list [ x _ lower , x _ upper , y _ lower , y _ upper ]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
s2ms : boolean , optional
" skip to main sequence "
dashes : list , optional
Custom dashing style . If None , ignore .
The default is None ."""
|
fsize = 18
params = { 'axes.labelsize' : fsize , # ' font . family ' : ' serif ' ,
'font.family' : 'Times New Roman' , 'figure.facecolor' : 'white' , 'text.fontsize' : fsize , 'legend.fontsize' : fsize , 'xtick.labelsize' : fsize * 0.8 , 'ytick.labelsize' : fsize * 0.8 , 'text.usetex' : False }
try :
pl . rcParams . update ( params )
except :
pass
if ifig is not None :
pl . figure ( ifig )
if s2ms :
h1 = self . get ( 'center_h1' )
idx = np . where ( h1 [ 0 ] - h1 >= 3.e-3 ) [ 0 ] [ 0 ]
skip = idx
else :
skip = 0
gage = self . get ( 'star_age' )
lage = np . zeros ( len ( gage ) )
agemin = max ( old_div ( abs ( gage [ - 1 ] - gage [ - 2 ] ) , 5. ) , 1.e-10 )
for i in np . arange ( len ( gage ) ) :
if gage [ - 1 ] - gage [ i ] > agemin :
lage [ i ] = np . log10 ( gage [ - 1 ] - gage [ i ] + agemin )
else :
lage [ i ] = np . log10 ( agemin )
x = lage [ skip : ]
y = self . get ( 'log_abs_mdot' ) [ skip : ]
if ifig is not None :
pl . figure ( ifig )
if label is not None :
if colour is not None :
line , = pl . plot ( x , y , label = label , color = colour )
else :
line , = pl . plot ( x , y , label = label )
else :
if colour is not None :
line , = pl . plot ( x , y , color = colour )
else :
line , = pl . plot ( x , y )
if dashes is not None :
line . set_dashes ( dashes )
if label is not None :
pl . legend ( loc = 'best' ) . draw_frame ( False )
pl . xlim ( lims [ : 2 ] )
pl . ylim ( lims [ 2 : ] )
pl . ylabel ( '$\mathrm{log}_{10}(\|\dot{M}\|/M_\odot\,\mathrm{yr}^{-1})$' )
pl . xlabel ( '$\mathrm{log}_{10}(t^*/\mathrm{yr})$' )
|
def spdhg_generic ( x , f , g , A , tau , sigma , niter , ** kwargs ) :
r"""Computes a saddle point with a stochastic PDHG .
This means , a solution ( x * , y * ) , y * = ( y * _ 1 , . . . , y * _ n ) such that
( x * , y * ) in arg min _ x max _ y sum _ i = 1 ^ n < y _ i , A _ i > - f * [ i ] ( y _ i ) + g ( x )
where g : X - > IR _ infty and f [ i ] : Y [ i ] - > IR _ infty are convex , l . s . c . and
proper functionals . For this algorithm , they all may be non - smooth and no
strong convexity is assumed .
Parameters
x : primal variable
This variable is both input and output of the method .
f : functions
Functionals Y [ i ] - > IR _ infty that all have a convex conjugate with a
proximal operator , i . e .
f [ i ] . convex _ conj . proximal ( sigma [ i ] ) : Y [ i ] - > Y [ i ] .
g : function
Functional X - > IR _ infty that has a proximal operator , i . e .
g . proximal ( tau ) : X - > X .
A : functions
Operators A [ i ] : X - > Y [ i ] that possess adjoints : A [ i ] . adjoint
tau : scalar / vector / matrix
Step size for primal variable . Note that the proximal operator of g
has to be well - defined for this input .
sigma : scalar
Scalar / vector / matrix used as step size for dual variable . Note that
the proximal operator related to f ( see above ) has to be well - defined
for this input .
niter : int
Number of iterations
Other Parameters
y : dual variable , optional
Dual variable is part of a product space . By default equals 0.
z : variable , optional
Adjoint of dual variable , z = A ^ * y . By default equals 0 if y = 0.
mu _ g : scalar
Strong convexity constant of g .
theta : scalar
Global extrapolation factor .
extra : list
List of local extrapolation paramters for every index i . By default
extra _ i = 1.
fun _ select : function
Function that selects blocks at every iteration IN - > { 1 , . . . , n } . By
default this is serial uniform sampling , fun _ select ( k ) selects an index
i \ in { 1 , . . . , n } with probability 1 / n .
callback : callable , optional
Function called with the current iterate after each iteration .
References
[ CERS2017 ] A . Chambolle , M . J . Ehrhardt , P . Richtarik and C . - B . Schoenlieb ,
* Stochastic Primal - Dual Hybrid Gradient Algorithm with Arbitrary Sampling
and Imaging Applications * . ArXiv : http : / / arxiv . org / abs / 1706.04957 ( 2017 ) .
[ E + 2017 ] M . J . Ehrhardt , P . J . Markiewicz , P . Richtarik , J . Schott ,
A . Chambolle and C . - B . Schoenlieb , * Faster PET reconstruction with a
stochastic primal - dual hybrid gradient method * . Wavelets and Sparsity XVII ,
58 ( 2017 ) http : / / doi . org / 10.1117/12.2272946."""
|
# Callback object
callback = kwargs . pop ( 'callback' , None )
if callback is not None and not callable ( callback ) :
raise TypeError ( '`callback` {} is not callable' '' . format ( callback ) )
# Dual variable
y = kwargs . pop ( 'y' , None )
if y is None :
y = A . range . zero ( )
# Adjoint of dual variable
z = kwargs . pop ( 'z' , None )
if z is None :
if y . norm ( ) == 0 :
z = A . domain . zero ( )
else :
z = A . adjoint ( y )
# Strong convexity of g
mu_g = kwargs . pop ( 'mu_g' , None )
if mu_g is None :
update_proximal_primal = False
else :
update_proximal_primal = True
# Global extrapolation factor theta
theta = kwargs . pop ( 'theta' , 1 )
# Second extrapolation factor
extra = kwargs . pop ( 'extra' , None )
if extra is None :
extra = [ 1 ] * len ( sigma )
# Selection function
fun_select = kwargs . pop ( 'fun_select' , None )
if fun_select is None :
def fun_select ( x ) :
return [ int ( np . random . choice ( len ( A ) , 1 , p = 1 / len ( A ) ) ) ]
# Initialize variables
z_relax = z . copy ( )
dz = A . domain . element ( )
y_old = A . range . element ( )
# Save proximal operators
proximal_dual_sigma = [ fi . convex_conj . proximal ( si ) for fi , si in zip ( f , sigma ) ]
proximal_primal_tau = g . proximal ( tau )
# run the iterations
for k in range ( niter ) : # select block
selected = fun_select ( k )
# update primal variable
# tmp = x - tau * z _ relax ; z _ relax used as tmp variable
z_relax . lincomb ( 1 , x , - tau , z_relax )
# x = prox ( tmp )
proximal_primal_tau ( z_relax , out = x )
# update extrapolation parameter theta
if update_proximal_primal :
theta = float ( 1 / np . sqrt ( 1 + 2 * mu_g * tau ) )
# update dual variable and z , z _ relax
z_relax . assign ( z )
for i in selected : # save old yi
y_old [ i ] . assign ( y [ i ] )
# tmp = Ai ( x )
A [ i ] ( x , out = y [ i ] )
# tmp = y _ old + sigma _ i * Ai ( x )
y [ i ] . lincomb ( 1 , y_old [ i ] , sigma [ i ] , y [ i ] )
# y [ i ] = prox ( tmp )
proximal_dual_sigma [ i ] ( y [ i ] , out = y [ i ] )
# update adjoint of dual variable
y_old [ i ] . lincomb ( - 1 , y_old [ i ] , 1 , y [ i ] )
A [ i ] . adjoint ( y_old [ i ] , out = dz )
z += dz
# compute extrapolation
z_relax . lincomb ( 1 , z_relax , 1 + theta * extra [ i ] , dz )
# update the step sizes tau and sigma for acceleration
if update_proximal_primal :
for i in range ( len ( sigma ) ) :
sigma [ i ] /= theta
tau *= theta
proximal_dual_sigma = [ fi . convex_conj . proximal ( si ) for fi , si in zip ( f , sigma ) ]
proximal_primal_tau = g . proximal ( tau )
if callback is not None :
callback ( [ x , y ] )
|
def split_code_at_show ( text ) :
"""Split code at plt . show ( )"""
|
parts = [ ]
is_doctest = contains_doctest ( text )
part = [ ]
for line in text . split ( "\n" ) :
if ( not is_doctest and line . strip ( ) == 'plt.show()' ) or ( is_doctest and line . strip ( ) == '>>> plt.show()' ) :
part . append ( line )
parts . append ( "\n" . join ( part ) )
part = [ ]
else :
part . append ( line )
if "\n" . join ( part ) . strip ( ) :
parts . append ( "\n" . join ( part ) )
return parts
|
def corpus_page_generator ( corpus_files , tmp_dir , max_page_size_exp ) :
"""Generate pages from a list of . 7z encoded history dumps .
Args :
corpus _ files : a list of strings
tmp _ dir : a string
max _ page _ size _ exp : an integer
Yields :
strings"""
|
for remote_filepath in corpus_files :
filepath = maybe_copy_file_to_directory ( remote_filepath , tmp_dir )
tf . logging . info ( "Reading from " + filepath )
command = [ "7z" , "x" , "-so" , filepath ]
tf . logging . info ( "Running command: %s" , command )
p = subprocess . Popen ( command , stdout = subprocess . PIPE , bufsize = - 1 )
for page in file_page_generator ( p . stdout , 2 ** max_page_size_exp ) :
yield page
|
def extract ( self , searches , tree = None , as_dict = True ) :
"""> > > foo = pdf . extract ( [ [ ' pages ' , ' LTPage ' ] ] )
> > > foo
{ ' pages ' : [ < LTPage > , < LTPage > ] }
> > > pdf . extract ( [ [ ' bar ' , ' : in _ bbox ( " 100,100,400,400 " ) ' ] ] , foo [ ' pages ' ] [ 0 ] )
{ ' bar ' : [ < LTTextLineHorizontal > , < LTTextBoxHorizontal > , . . ."""
|
if self . tree is None or self . pq is None :
self . load ( )
if tree is None :
pq = self . pq
else :
pq = PyQuery ( tree , css_translator = PDFQueryTranslator ( ) )
results = [ ]
formatter = None
parent = pq
for search in searches :
if len ( search ) < 3 :
search = list ( search ) + [ formatter ]
key , search , tmp_formatter = search
if key == 'with_formatter' :
if isinstance ( search , six . string_types ) : # is a pyquery method name , e . g . ' text '
formatter = lambda o , search = search : getattr ( o , search ) ( )
elif hasattr ( search , '__call__' ) or not search : # is a method , or None to end formatting
formatter = search
else :
raise TypeError ( "Formatter should be either a pyquery " "method name or a callable function." )
elif key == 'with_parent' :
parent = pq ( search ) if search else pq
else :
try :
result = parent ( "*" ) . filter ( search ) if hasattr ( search , '__call__' ) else parent ( search )
except cssselect . SelectorSyntaxError as e :
raise cssselect . SelectorSyntaxError ( "Error applying selector '%s': %s" % ( search , e ) )
if tmp_formatter :
result = tmp_formatter ( result )
results += result if type ( result ) == tuple else [ [ key , result ] ]
if as_dict :
results = dict ( results )
return results
|
def _partial ( self ) :
"""Callback for partial output ."""
|
raw_stdout = self . _process . readAllStandardOutput ( )
stdout = handle_qbytearray ( raw_stdout , self . _get_encoding ( ) )
if self . _partial_stdout is None :
self . _partial_stdout = stdout
else :
self . _partial_stdout += stdout
self . sig_partial . emit ( self , stdout , None )
|
def _hilink_decrypt ( self , encrypted_firmware ) :
'''This does the actual decryption .'''
|
cipher = DES . new ( self . DES_KEY , DES . MODE_ECB )
p1 = encrypted_firmware [ 0 : 3 ]
p2 = encrypted_firmware [ 3 : ]
p2 += b"\x00" * ( 8 - ( len ( p2 ) % 8 ) )
d1 = p1 + cipher . decrypt ( p2 )
d1 += b"\x00" * ( 8 - ( len ( d1 ) % 8 ) )
return cipher . decrypt ( d1 )
|
def on_channel_open ( self , channel ) :
"""This method is invoked by pika when the channel has been opened .
The channel object is passed in so we can make use of it .
Since the channel is now open , we ' ll declare the exchange to use .
: param pika . channel . Channel channel : The channel object"""
|
logger . info ( 'Channel opened' , channel = channel )
self . _channel = channel
self . add_on_channel_close_callback ( )
self . setup_exchange ( self . _exchange )
|
def send ( self , stack : Layers ) :
"""Add a message stack to the send list ."""
|
if not isinstance ( stack , Stack ) :
stack = Stack ( stack )
if not self . platform . accept ( stack ) :
raise UnacceptableStack ( 'The platform does not allow "{}"' . format ( stack . describe ( ) ) )
self . _stacks . append ( stack )
|
def unify ( self , matching = None , sources = None , fast_matching = False , no_strict_matching = False , interactive = False , recovery = False ) :
"""Merge unique identities using a matching algorithm .
This method looks for sets of similar identities , merging those
identities into one unique identity . To determine when two unique
identities are likely the same , a matching algorithm will be given
using the parameter < matching > . When this parameter is not given ,
the default algorithm will be used . Rigorous validation of mathching
values ( i . e , well formed email addresses ) will be disabled when
< no _ strict _ matching > is set to to ` True ` .
When < fast _ matching > is set , it runs a fast algorithm to find matches
between identities . This mode will consume more resources ( i . e ,
memory ) but it is two orders of magnitude faster than the original .
Not every matcher can support this mode . When this happens , an
exception will be raised .
When < interactive > parameter is set to True , the user will have to confirm
whether these to identities should be merged into one . By default , the method
is set to False .
When a list of < sources > is given , only the unique identities from
those sources will be unified .
: param matching : type of matching used to merge existing identities
: param sources : unify the unique identities from these sources only
: param fast _ matching : use the fast mode
: param no _ strict _ matching : disable strict matching ( i . e , well - formed email addresses )
: param interactive : interactive mode for merging identities
: param recovery : if enabled , the unify will read the matching identities stored in
recovery file ( RECOVERY _ FILE _ PATH ) and process them"""
|
matcher = None
if not matching :
matching = 'default'
strict = not no_strict_matching
self . recovery = recovery
try :
blacklist = api . blacklist ( self . db )
matcher = create_identity_matcher ( matching , blacklist , sources , strict )
except MatcherNotSupportedError as e :
self . error ( str ( e ) )
return e . code
uidentities = api . unique_identities ( self . db )
try :
self . __unify_unique_identities ( uidentities , matcher , fast_matching , interactive )
self . __display_stats ( )
except MatcherNotSupportedError as e :
self . error ( str ( e ) )
return e . code
except Exception as e :
self . __display_stats ( )
raise RuntimeError ( str ( e ) )
return CMD_SUCCESS
|
def get_duration ( self ) :
"""Get duration formatted
Format is : " HHh MMm SSs "
Example : " 10h 20m 40s "
: return : Formatted duration
: rtype : str"""
|
mins , secs = divmod ( self . duration_sec , 60 )
hours , mins = divmod ( mins , 60 )
return "%02dh %02dm %02ds" % ( hours , mins , secs )
|
def _apply_overrides ( settings , overrides , schema ) :
"""Get overrides config overlayed onto modules defaults .
: param modules : require stack modules config .
: returns : dictionary of modules config with user overrides applied ."""
|
if overrides :
for k , v in six . iteritems ( overrides ) :
if k in schema :
if schema [ k ] is None :
settings [ k ] = v
elif type ( schema [ k ] ) is dict :
settings [ k ] = _apply_overrides ( settings [ k ] , overrides [ k ] , schema [ k ] )
else :
raise Exception ( "Unexpected type found in schema '%s'" % type ( schema [ k ] ) , level = ERROR )
else :
log ( "Unknown override key '%s' - ignoring" % ( k ) , level = INFO )
return settings
|
def currentRepoTreeItemChanged ( self ) :
"""Called to update the GUI when a repo tree item has changed or a new one was selected ."""
|
# When the model is empty the current index may be invalid and the currentItem may be None .
currentItem , currentIndex = self . getCurrentItem ( )
hasCurrent = currentIndex . isValid ( )
assert hasCurrent == ( currentItem is not None ) , "If current idex is valid, currentIndex may not be None"
# sanity check
# Set the item in the collector , will will subsequently update the inspector .
if hasCurrent :
logger . info ( "Adding rti to collector: {}" . format ( currentItem . nodePath ) )
self . collector . setRti ( currentItem )
# if rti . asArray is not None : # TODO : maybe later , first test how robust it is now
# self . collector . setRti ( rti )
# Update context menus in the repo tree
self . currentItemActionGroup . setEnabled ( hasCurrent )
isTopLevel = hasCurrent and self . model ( ) . isTopLevelIndex ( currentIndex )
self . topLevelItemActionGroup . setEnabled ( isTopLevel )
self . openItemAction . setEnabled ( currentItem is not None and currentItem . hasChildren ( ) and not currentItem . isOpen )
self . closeItemAction . setEnabled ( currentItem is not None and currentItem . hasChildren ( ) and currentItem . isOpen )
# Emit sigRepoItemChanged signal so that , for example , details panes can update .
logger . debug ( "Emitting sigRepoItemChanged: {}" . format ( currentItem ) )
self . sigRepoItemChanged . emit ( currentItem )
|
def single_read ( self , register ) :
'''Reads data from desired register only once .'''
|
comm_reg = ( 0b00010 << 3 ) + register
if register == self . AD7730_STATUS_REG :
bytes_num = 1
elif register == self . AD7730_DATA_REG :
bytes_num = 3
elif register == self . AD7730_MODE_REG :
bytes_num = 2
elif register == self . AD7730_FILTER_REG :
bytes_num = 3
elif register == self . AD7730_DAC_REG :
bytes_num = 1
elif register == self . AD7730_OFFSET_REG :
bytes_num = 3
elif register == self . AD7730_GAIN_REG :
bytes_num = 3
elif register == self . AD7730_TEST_REG :
bytes_num = 3
command = [ comm_reg ] + ( [ 0x00 ] * bytes_num )
spi . SPI_write ( self . CS , command )
data = spi . SPI_read ( bytes_num + 1 )
return data [ 1 : ]
|
def add_transition ( self , source : str , dest : str ) :
"""Adds a transition from one state to another .
Args :
source ( str ) : the name of the state from where the transition starts
dest ( str ) : the name of the state where the transition ends"""
|
self . _transitions [ source ] . append ( dest )
|
def create_alias ( self ) :
"""Create lambda alias with env name and points it to $ LATEST ."""
|
LOG . info ( 'Creating alias %s' , self . env )
try :
self . lambda_client . create_alias ( FunctionName = self . app_name , Name = self . env , FunctionVersion = '$LATEST' , Description = 'Alias for {}' . format ( self . env ) )
except boto3 . exceptions . botocore . exceptions . ClientError as error :
LOG . debug ( 'Create alias error: %s' , error )
LOG . info ( "Alias creation failed. Retrying..." )
raise
|
def quote ( self , s ) :
"""Return a shell - escaped version of the string s ."""
|
if six . PY2 :
from pipes import quote
else :
from shlex import quote
return quote ( s )
|
def list_dms ( archive , compression , cmd , verbosity , interactive ) :
"""List a DMS archive ."""
|
check_archive_ext ( archive )
return [ cmd , 'v' , archive ]
|
def zoompan ( stream , ** kwargs ) :
"""Apply Zoom & Pan effect .
Args :
zoom : Set the zoom expression . Default is 1.
x : Set the x expression . Default is 0.
y : Set the y expression . Default is 0.
d : Set the duration expression in number of frames . This sets for how many number of frames effect will last
for single input image .
s : Set the output image size , default is ` ` hd720 ` ` .
fps : Set the output frame rate , default is 25.
z : Alias for ` ` zoom ` ` .
Official documentation : ` zoompan < https : / / ffmpeg . org / ffmpeg - filters . html # zoompan > ` _ _"""
|
return FilterNode ( stream , zoompan . __name__ , kwargs = kwargs ) . stream ( )
|
def get_yaps_by_name ( root , name , afun = lambda x : x , default = None ) :
"""From XML root , return value of node matching attribute ' name ' .
Arguments :
root ( Element ) Root XML node ( xml . etree . ElementTree Element ) .
This is the root of the entire XML document , not the YAPS
subtree .
name ( String ) name = ' name ' attribute of ParamLong tag to be
matched .
afun Anonymous function in the form of a lambda expression to
process the string value . Defaults to the identity function .
default Default value if node is not found . Defaults to ' None ' ."""
|
node = root . find ( "ParamMap[@name='YAPS']/ParamLong[@name='%s']/value" % name )
if node is not None :
return ( afun ( node . text ) )
else :
return ( default )
|
def _get_diff_text ( old , new ) :
'''Returns the diff of two text blobs .'''
|
diff = difflib . unified_diff ( old . splitlines ( 1 ) , new . splitlines ( 1 ) )
return '' . join ( [ x . replace ( '\r' , '' ) for x in diff ] )
|
def _add_method_to_class ( target_cls , method_name , func , dataset_name , dataset , dataprovider , repeat_suffix , ) :
"""Add the described method to the given class .
: param target _ cls :
Test class to which to add a method .
: type target _ cls :
` class `
: param method _ name :
Base name of the method to add .
: type method _ name :
` unicode `
: param func :
The underlying test function to call .
: type func :
` callable `
: param dataset _ name :
Base name of the data set .
: type dataset _ name :
` unicode ` or None
: param dataset :
Tuple containing the args of the dataset .
: type dataset :
` tuple ` or None
: param repeat _ suffix :
Suffix to append to the name of the generated method .
: type repeat _ suffix :
` unicode ` or None
: param dataprovider :
The unbound function that ' s responsible for generating the actual
params that will be passed to the test function . Can be None .
: type dataprovider :
` callable `"""
|
# pylint : disable = too - many - arguments
test_method_name_for_dataset = _build_final_method_name ( method_name , dataset_name , dataprovider . __name__ if dataprovider else None , repeat_suffix , )
test_method_for_dataset = _build_test_method ( func , dataset , dataprovider )
test_method_for_dataset = functools . update_wrapper ( test_method_for_dataset , func , )
test_method_name_for_dataset = encode_non_ascii_string ( test_method_name_for_dataset , )
test_method_for_dataset . __name__ = test_method_name_for_dataset
test_method_for_dataset . genty_generated_test = True
# Add the method to the class under the proper name
setattr ( target_cls , test_method_name_for_dataset , test_method_for_dataset )
|
def read_config ( self , correlation_id , parameters ) :
"""Reads configuration and parameterize it with given values .
: param correlation _ id : ( optional ) transaction id to trace execution through call chain .
: param parameters : values to parameters the configuration or null to skip parameterization .
: return : ConfigParams configuration ."""
|
value = self . _read_object ( correlation_id , parameters )
return ConfigParams . from_value ( value )
|
def _evaluate ( self ) :
"""Scan for orphaned records and retrieve any records that have not already been grabbed"""
|
retrieved_records = SortedDict ( )
for record_id , record in six . iteritems ( self . _elements ) :
if record is self . _field . _unset : # Record has not yet been retrieved , get it
try :
record = self . target_app . records . get ( id = record_id )
except SwimlaneHTTP400Error : # Record appears to be orphaned , don ' t include in set of elements
logger . debug ( "Received 400 response retrieving record '{}', ignoring assumed orphaned record" )
continue
retrieved_records [ record_id ] = record
self . _elements = retrieved_records
return self . _elements . values ( )
|
def create_ini ( self , board , project_dir = '' , sayyes = False ) :
"""Creates a new apio project file"""
|
project_dir = util . check_dir ( project_dir )
ini_path = util . safe_join ( project_dir , PROJECT_FILENAME )
# Check board
boards = Resources ( ) . boards
if board not in boards . keys ( ) :
click . secho ( 'Error: no such board \'{}\'' . format ( board ) , fg = 'red' )
sys . exit ( 1 )
if isfile ( ini_path ) : # - - If sayyes , skip the question
if sayyes :
self . _create_ini_file ( board , ini_path , PROJECT_FILENAME )
else :
click . secho ( 'Warning: {} file already exists' . format ( PROJECT_FILENAME ) , fg = 'yellow' )
if click . confirm ( 'Do you want to replace it?' ) :
self . _create_ini_file ( board , ini_path , PROJECT_FILENAME )
else :
click . secho ( 'Abort!' , fg = 'red' )
else :
self . _create_ini_file ( board , ini_path , PROJECT_FILENAME )
|
def _is_leap_year ( year ) :
"""Determine if a year is leap year .
Parameters
year : numeric
Returns
isleap : array of bools"""
|
isleap = ( ( np . mod ( year , 4 ) == 0 ) & ( ( np . mod ( year , 100 ) != 0 ) | ( np . mod ( year , 400 ) == 0 ) ) )
return isleap
|
def time_boxed ( func , iterable , time_budget , * args ) :
"""Apply a function to the items of an iterable within a given time budget .
Loop the given iterable , calling the given function on each item . The expended
time is compared to the given time budget after each iteration ."""
|
time_budget = time_budget / 1000
# budget in milliseconds
start = time . time ( )
for thing in iterable :
yield func ( thing , * args )
end = time . time ( ) - start
if end > time_budget : # Putting the condition at the end of the loop ensures that we
# always run it once , which is useful for testing
return
|
def split_input ( cls , job_config ) :
"""Returns a list of input readers .
An equal number of input files are assigned to each shard ( + / - 1 ) . If there
are fewer files than shards , fewer than the requested number of shards will
be used . Input files are currently never split ( although for some formats
could be and may be split in a future implementation ) .
Args :
job _ config : map _ job . JobConfig
Returns :
A list of InputReaders . None when no input data can be found ."""
|
reader_params = job_config . input_reader_params
bucket = reader_params [ cls . BUCKET_NAME_PARAM ]
filenames = reader_params [ cls . OBJECT_NAMES_PARAM ]
delimiter = reader_params . get ( cls . DELIMITER_PARAM )
account_id = reader_params . get ( cls . _ACCOUNT_ID_PARAM )
buffer_size = reader_params . get ( cls . BUFFER_SIZE_PARAM )
path_filter = reader_params . get ( cls . PATH_FILTER_PARAM )
# Gather the complete list of files ( expanding wildcards )
all_filenames = [ ]
for filename in filenames :
if filename . endswith ( "*" ) :
all_filenames . extend ( [ file_stat . filename for file_stat in cloudstorage . listbucket ( "/" + bucket + "/" + filename [ : - 1 ] , delimiter = delimiter , _account_id = account_id ) ] )
else :
all_filenames . append ( "/%s/%s" % ( bucket , filename ) )
# Split into shards
readers = [ ]
for shard in range ( 0 , job_config . shard_count ) :
shard_filenames = all_filenames [ shard : : job_config . shard_count ]
if shard_filenames :
readers . append ( cls ( shard_filenames , buffer_size = buffer_size , _account_id = account_id , delimiter = delimiter , path_filter = path_filter ) )
return readers
|
def update_unnamed_class ( decls ) :
"""Adds name to class _ t declarations .
If CastXML is being used , the type definitions with an unnamed
class / struct are split across two nodes in the XML tree . For example ,
typedef struct { } cls ;
produces
< Struct id = " _ 7 " name = " " context = " _ 1 " . . . / >
< Typedef id = " _ 8 " name = " cls " type = " _ 7 " context = " _ 1 " . . . / >
For each typedef , we look at which class it refers to , and update the name
accordingly . This helps the matcher classes finding these declarations .
This was the behaviour with gccxml too , so this is important for
backward compatibility .
If the castxml epic version 1 is used , there is even an elaborated type
declaration between the typedef and the struct / class , that also needs to be
taken care of .
Args :
decls ( list [ declaration _ t ] ) : a list of declarations to be patched .
Returns :
None"""
|
for decl in decls :
if isinstance ( decl , declarations . typedef_t ) :
referent = decl . decl_type
if isinstance ( referent , declarations . elaborated_t ) :
referent = referent . base
if not isinstance ( referent , declarations . declarated_t ) :
continue
referent = referent . declaration
if referent . name or not isinstance ( referent , declarations . class_t ) :
continue
referent . name = decl . name
|
def rank ( self , member ) :
"""Get the rank ( index of a member ) ."""
|
score = self . _members . get ( member )
if score is None :
return None
return bisect_left ( self . _scores , ( score , member ) )
|
def check_sizes ( size , width , height ) :
"""Check that these arguments , if supplied , are consistent .
Return a ( width , height ) pair ."""
|
if not size :
return width , height
if len ( size ) != 2 :
raise ProtocolError ( "size argument should be a pair (width, height)" )
if width is not None and width != size [ 0 ] :
raise ProtocolError ( "size[0] (%r) and width (%r) should match when both are used." % ( size [ 0 ] , width ) )
if height is not None and height != size [ 1 ] :
raise ProtocolError ( "size[1] (%r) and height (%r) should match when both are used." % ( size [ 1 ] , height ) )
return size
|
def get_countries_geo_zone_by_id ( cls , countries_geo_zone_id , ** kwargs ) :
"""Find CountriesGeoZone
Return single instance of CountriesGeoZone by its ID .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . get _ countries _ geo _ zone _ by _ id ( countries _ geo _ zone _ id , async = True )
> > > result = thread . get ( )
: param async bool
: param str countries _ geo _ zone _ id : ID of countriesGeoZone to return ( required )
: return : CountriesGeoZone
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _get_countries_geo_zone_by_id_with_http_info ( countries_geo_zone_id , ** kwargs )
else :
( data ) = cls . _get_countries_geo_zone_by_id_with_http_info ( countries_geo_zone_id , ** kwargs )
return data
|
def emit ( self , span_datas ) :
""": type span _ datas : list of : class :
` ~ opencensus . trace . span _ data . SpanData `
: param list of opencensus . trace . span _ data . SpanData span _ datas :
SpanData tuples to emit"""
|
project = 'projects/{}' . format ( self . project_id )
# Map each span data to it ' s corresponding trace id
trace_span_map = defaultdict ( list )
for sd in span_datas :
trace_span_map [ sd . context . trace_id ] += [ sd ]
stackdriver_spans = [ ]
# Write spans to Stackdriver
for _ , sds in trace_span_map . items ( ) : # convert to the legacy trace json for easier refactoring
# TODO : refactor this to use the span data directly
trace = span_data . format_legacy_trace_json ( sds )
stackdriver_spans . extend ( self . translate_to_stackdriver ( trace ) )
self . client . batch_write_spans ( project , { 'spans' : stackdriver_spans } )
|
def http_download ( url , target_path ) :
"""Download file to local
Args :
- url ( string ) : url request path
- target _ path ( string ) : download destination"""
|
r = requests . get ( url , stream = True )
with open ( target_path , 'wb' ) as f : # shutil . copyfileobj ( resp , f )
for chunk in r . iter_content ( chunk_size = 1024 ) :
if chunk :
f . write ( chunk )
return target_path
|
def setStation ( self , number ) :
"""Select the given station number"""
|
# If we press up at the first station , we go to the last one
# and if we press down on the last one we go back to the first one .
if number < 0 :
number = len ( self . stations ) - 1
elif number >= len ( self . stations ) :
number = 0
self . selection = number
maxDisplayedItems = self . bodyMaxY - 2
if self . selection - self . startPos >= maxDisplayedItems :
self . startPos = self . selection - maxDisplayedItems + 1
elif self . selection < self . startPos :
self . startPos = self . selection
|
def clean_up ( self , table , verbose = False ) :
"""Removes exact duplicates , blank records or data without a * source _ id * from the specified * * table * * .
Then finds possible duplicates and prompts for conflict resolution .
Parameters
table : str
The name of the table to remove duplicates , blanks , and data without source attributions .
verbose : bool
Print out some diagnostic messages"""
|
# Get the table info and all the records
metadata = self . query ( "PRAGMA table_info({})" . format ( table ) , fmt = 'table' )
columns , types , required = [ np . array ( metadata [ n ] ) for n in [ 'name' , 'type' , 'notnull' ] ]
# records = self . query ( " SELECT * FROM { } " . format ( table ) , fmt = ' table ' , use _ converters = False )
ignore = self . query ( "SELECT * FROM ignore WHERE tablename LIKE ?" , ( table , ) )
duplicate , command = [ 1 ] , ''
# Remove records with missing required values
req_keys = columns [ np . where ( required ) ]
try :
self . modify ( "DELETE FROM {} WHERE {}" . format ( table , ' OR ' . join ( [ i + ' IS NULL' for i in req_keys ] ) ) , verbose = False )
self . modify ( "DELETE FROM {} WHERE {}" . format ( table , ' OR ' . join ( [ i + " IN ('null','None','')" for i in req_keys ] ) ) , verbose = False )
except :
pass
# Remove exact duplicates
self . modify ( "DELETE FROM {0} WHERE id NOT IN (SELECT min(id) FROM {0} GROUP BY {1})" . format ( table , ', ' . join ( columns [ 1 : ] ) ) , verbose = False )
# Check for records with identical required values but different ids .
if table . lower ( ) != 'sources' :
req_keys = columns [ np . where ( np . logical_and ( required , columns != 'id' ) ) ]
# List of old and new pairs to ignore
if not type ( ignore ) == np . ndarray :
ignore = np . array ( [ ] )
new_ignore = [ ]
while any ( duplicate ) : # Pull out duplicates one by one
if 'source_id' not in columns : # Check if there is a source _ id in the columns
SQL = "SELECT t1.id, t2.id FROM {0} t1 JOIN {0} t2 ON t1.id=t2.id WHERE " . format ( table )
else :
SQL = "SELECT t1.id, t2.id FROM {0} t1 JOIN {0} t2 ON t1.source_id=t2.source_id " "WHERE t1.id!=t2.id AND " . format ( table )
if any ( req_keys ) :
SQL += ' AND ' . join ( [ 't1.{0}=t2.{0}' . format ( i ) for i in req_keys ] ) + ' AND '
if any ( ignore ) :
SQL += ' AND ' . join ( [ "(t1.id NOT IN ({0}) AND t2.id NOT IN ({0}))" . format ( ',' . join ( map ( str , [ id1 , id2 ] ) ) ) for id1 , id2 in zip ( ignore [ 'id1' ] , ignore [ 'id2' ] ) ] if any ( ignore ) else '' ) + ' AND '
if any ( new_ignore ) :
SQL += ' AND ' . join ( [ "(t1.id NOT IN ({0}) AND t2.id NOT IN ({0}))" . format ( ',' . join ( map ( str , ni ) ) ) for ni in new_ignore ] if new_ignore else '' ) + ' AND '
# Clean up empty WHERE at end if it ' s present ( eg , for empty req _ keys , ignore , and new _ ignore )
if SQL [ - 6 : ] == 'WHERE ' :
SQL = SQL [ : - 6 ]
# Clean up hanging AND if present
if SQL [ - 5 : ] == ' AND ' :
SQL = SQL [ : - 5 ]
if verbose :
print ( '\nSearching for duplicates with: {}\n' . format ( SQL ) )
duplicate = self . query ( SQL , fetch = 'one' )
# Compare potential duplicates and prompt user for action on each
try : # Run record matches through comparison and return the command
command = self . _compare_records ( table , duplicate )
# Add acceptable duplicates to ignore list or abort
if command == 'keep' :
new_ignore . append ( [ duplicate [ 0 ] , duplicate [ 1 ] ] )
self . list ( "INSERT INTO ignore VALUES(?,?,?,?)" , ( None , duplicate [ 0 ] , duplicate [ 1 ] , table . lower ( ) ) )
elif command == 'undo' :
pass
# TODO : Add this functionality !
elif command == 'abort' :
break
else :
pass
except :
break
# Finish or abort table clean up
if command == 'abort' :
print ( '\nAborted clean up of {} table.' . format ( table . upper ( ) ) )
return 'abort'
else :
print ( '\nFinished clean up on {} table.' . format ( table . upper ( ) ) )
|
def as_unicode ( s , encoding = 'utf-8' ) :
"""Force conversion of given string to unicode type .
Unicode is ` ` str ` ` type for Python 3 . x and ` ` unicode ` ` for Python 2 . x .
If the string is already in unicode , then no conversion is done and the same string is returned .
Parameters
s : str or bytes ( Python3 ) , str or unicode ( Python2)
The string to convert to unicode .
encoding : str
The encoding of the input string ( default : utf - 8)
Raises
ValueError
In case an input of invalid type was passed to the function .
Returns
` ` str ` ` for Python3 or ` ` unicode ` ` for Python 2."""
|
if isinstance ( s , six . text_type ) :
return s
elif isinstance ( s , six . binary_type ) :
return s . decode ( encoding )
else :
raise ValueError ( 'Can only convert types {0} and {1}' . format ( six . text_type , six . binary_type ) )
|
def attr_item_call_auto_cache ( func ) :
"""Decorator for a a single positional argument function to cache
its results and to make ` ` f ( " a " ) = = f [ " a " ] = = f . a ` ` ."""
|
def __missing__ ( self , key ) :
result = self [ key ] = func ( key )
return result
wrapper = type ( snake2ucamel ( func . __name__ ) , ( dict , ) , { "__missing__" : __missing__ , "__call__" : dict . __getitem__ , "__getattr__" : dict . __getitem__ , "__doc__" : func . __doc__ , # Class docstring can ' t be updated afterwards
"__module__" : func . __module__ , } ) ( )
for k , v in vars ( func ) . items ( ) :
setattr ( wrapper , k , v )
return wrapper
|
def get_exttype ( self , num = False ) :
"""Get the extension type
By default the result is a string that mirrors
the enumerated type names in cfitsio
' IMAGE _ HDU ' , ' ASCII _ TBL ' , ' BINARY _ TBL '
which have numeric values
0 1 2
send num = True to get the numbers . The values
fitsio . IMAGE _ HDU . ASCII _ TBL , and . BINARY _ TBL
are available for comparison
parameters
num : bool , optional
Return the numeric values ."""
|
if num :
return self . _info [ 'hdutype' ]
else :
name = _hdu_type_map [ self . _info [ 'hdutype' ] ]
return name
|
def set ( self , key , value ) :
"""Sets the value for a specific requirement .
: param key : Name of requirement to be set
: param value : Value to set for requirement key
: return : Nothing , modifies requirement"""
|
if key == "tags" :
self . _set_tag ( tags = value )
else :
if isinstance ( value , dict ) and key in self . _requirements and isinstance ( self . _requirements [ key ] , dict ) :
self . _requirements [ key ] = merge ( self . _requirements [ key ] , value )
else :
self . _requirements [ key ] = value
|
def seq_str ( self ) :
"""str : Get the sequence formatted as a string"""
|
if not self . seq :
return None
return ssbio . protein . sequence . utils . cast_to_str ( self . seq )
|
def col_to_cat ( df , col_name , dest = False ) :
"""Coerces a column in a DataFrame to categorical
Parameters :
df - DataFrame
DataFrame to operate on
col _ name - string
Name of column to coerce
dest - bool , default False
Whether to apply the result to the DataFrame or return it .
True is apply , False is return ."""
|
new_col = df [ col_name ] . astype ( 'category' )
if dest :
set_col ( df , col_name , new_col )
else :
return new_col
|
def get_highest_version ( versions ) :
"""Returns highest available version for a package in a list of versions
Uses pkg _ resources to parse the versions
@ param versions : List of PyPI package versions
@ type versions : List of strings
@ returns : string of a PyPI package version"""
|
sorted_versions = [ ]
for ver in versions :
sorted_versions . append ( ( pkg_resources . parse_version ( ver ) , ver ) )
sorted_versions = sorted ( sorted_versions )
sorted_versions . reverse ( )
return sorted_versions [ 0 ] [ 1 ]
|
def getAddPerson ( self ) :
"""Return an L { AddPersonFragment } which is a child of this fragment and
which will add a person to C { self . organizer } ."""
|
fragment = AddPersonFragment ( self . organizer )
fragment . setFragmentParent ( self )
return fragment
|
def dump_table_as_insert_sql ( engine : Engine , table_name : str , fileobj : TextIO , wheredict : Dict [ str , Any ] = None , include_ddl : bool = False , multirow : bool = False ) -> None :
"""Reads a table from the database , and writes SQL to replicate the table ' s
data to the output ` ` fileobj ` ` .
Args :
engine : SQLAlchemy : class : ` Engine `
table _ name : name of the table
fileobj : file - like object to write to
wheredict : optional dictionary of ` ` { column _ name : value } ` ` to use as
` ` WHERE ` ` filters
include _ ddl : if ` ` True ` ` , include the DDL to create the table as well
multirow : write multi - row ` ` INSERT ` ` statements"""
|
# http : / / stackoverflow . com / questions / 5631078 / sqlalchemy - print - the - actual - query # noqa
# http : / / docs . sqlalchemy . org / en / latest / faq / sqlexpressions . html
# http : / / www . tylerlesmann . com / 2009 / apr / 27 / copying - databases - across - platforms - sqlalchemy / # noqa
# https : / / github . com / plq / scripts / blob / master / pg _ dump . py
log . info ( "dump_data_as_insert_sql: table_name={}" , table_name )
writelines_nl ( fileobj , [ SEP1 , sql_comment ( "Data for table: {}" . format ( table_name ) ) , SEP2 , sql_comment ( "Filters: {}" . format ( wheredict ) ) , ] )
dialect = engine . dialect
if not dialect . supports_multivalues_insert :
multirow = False
if multirow :
log . warning ( "dump_data_as_insert_sql: multirow parameter substitution " "not working yet" )
multirow = False
# literal _ query = make _ literal _ query _ fn ( dialect )
meta = MetaData ( bind = engine )
log . debug ( "... retrieving schema" )
table = Table ( table_name , meta , autoload = True )
if include_ddl :
log . debug ( "... producing DDL" )
dump_ddl ( table . metadata , dialect_name = engine . dialect . name , fileobj = fileobj )
# NewRecord = quick _ mapper ( table )
# columns = table . columns . keys ( )
log . debug ( "... fetching records" )
# log . debug ( " meta : { } " , meta ) # obscures password
# log . debug ( " table : { } " , table )
# log . debug ( " table . columns : { ! r } " , table . columns )
# log . debug ( " multirow : { } " , multirow )
query = select ( table . columns )
if wheredict :
for k , v in wheredict . items ( ) :
col = table . columns . get ( k )
query = query . where ( col == v )
# log . debug ( " query : { } " , query )
cursor = engine . execute ( query )
if multirow :
row_dict_list = [ ]
for r in cursor :
row_dict_list . append ( dict ( r ) )
# log . debug ( " row _ dict _ list : { } " , row _ dict _ list )
if row_dict_list :
statement = table . insert ( ) . values ( row_dict_list )
# log . debug ( " statement : { ! r } " , statement )
# insert _ str = literal _ query ( statement )
insert_str = get_literal_query ( statement , bind = engine )
# NOT WORKING FOR MULTIROW INSERTS . ONLY SUBSTITUTES FIRST ROW .
writeline_nl ( fileobj , insert_str )
else :
writeline_nl ( fileobj , sql_comment ( "No data!" ) )
else :
found_one = False
for r in cursor :
found_one = True
row_dict = dict ( r )
statement = table . insert ( values = row_dict )
# insert _ str = literal _ query ( statement )
insert_str = get_literal_query ( statement , bind = engine )
# log . debug ( " row _ dict : { } " , row _ dict )
# log . debug ( " insert _ str : { } " , insert _ str )
writeline_nl ( fileobj , insert_str )
if not found_one :
writeline_nl ( fileobj , sql_comment ( "No data!" ) )
writeline_nl ( fileobj , SEP2 )
log . debug ( "... done" )
|
def _is_proper_sequence ( seq ) :
"""Returns is seq is sequence and not string ."""
|
return ( isinstance ( seq , collections . abc . Sequence ) and not isinstance ( seq , str ) )
|
def monkey_patch_override_instance_method ( instance ) :
"""Override an instance method with a new version of the same name . The
original method implementation is made available within the override method
as ` _ original _ < METHOD _ NAME > ` ."""
|
def perform_override ( override_fn ) :
fn_name = override_fn . __name__
original_fn_name = '_original_' + fn_name
# Override instance method , if it hasn ' t already been done
if not hasattr ( instance , original_fn_name ) :
original_fn = getattr ( instance , fn_name )
setattr ( instance , original_fn_name , original_fn )
bound_override_fn = override_fn . __get__ ( instance )
setattr ( instance , fn_name , bound_override_fn )
return perform_override
|
def quote_selection ( self ) :
"""Quotes selected cells , marks content as changed"""
|
selection = self . get_selection ( )
current_table = self . grid . current_table
for row , col , tab in self . grid . code_array . dict_grid . keys ( ) :
if tab == current_table and ( row , col ) in selection :
self . grid . actions . quote_code ( ( row , col , tab ) )
self . grid . code_array . result_cache . clear ( )
|
def boxcox ( X ) :
"""Gaussianize X using the Box - Cox transformation : [ samples x phenotypes ]
- each phentoype is brought to a positive schale , by first subtracting the minimum value and adding 1.
- Then each phenotype transformed by the boxcox transformation"""
|
X_transformed = sp . zeros_like ( X )
maxlog = sp . zeros ( X . shape [ 1 ] )
for i in range ( X . shape [ 1 ] ) :
i_nan = sp . isnan ( X [ : , i ] )
values = X [ ~ i_nan , i ]
X_transformed [ i_nan , i ] = X [ i_nan , i ]
X_transformed [ ~ i_nan , i ] , maxlog [ i ] = st . boxcox ( values - values . min ( ) + 1.0 )
return X_transformed , maxlog
|
def base26 ( x , _alphabet = string . ascii_uppercase ) :
"""Return positive ` ` int ` ` ` ` x ` ` as string in bijective base26 notation .
> > > [ base26 ( i ) for i in [ 0 , 1 , 2 , 26 , 27 , 28 , 702 , 703 , 704 ] ]
[ ' ' , ' A ' , ' B ' , ' Z ' , ' AA ' , ' AB ' , ' ZZ ' , ' AAA ' , ' AAB ' ]
> > > base26(344799 ) # 19 * 26 * * 3 + 16 * 26 * * 2 + 1 * 26 * * 1 + 13 * 26 * * 0
' SPAM '
> > > base26(256)
' IV '"""
|
result = [ ]
while x :
x , digit = divmod ( x , 26 )
if not digit :
x -= 1
digit = 26
result . append ( _alphabet [ digit - 1 ] )
return '' . join ( result [ : : - 1 ] )
|
def fire_event ( self , evt_name , * args , ** kwargs ) :
"""触发事件
: params evt _ name : 事件名称
: params args : 给事件接受者的参数
: params kwargs : 给事件接受者的参数"""
|
listeners = self . __get_listeners ( evt_name )
evt = self . generate_event ( evt_name )
for listener in listeners :
listener ( evt , * args , ** kwargs )
|
def get_role_mapping ( self , name = None , params = None ) :
"""` < https : / / www . elastic . co / guide / en / elasticsearch / reference / current / security - api - get - role - mapping . html > ` _
: arg name : Role - Mapping name"""
|
return self . transport . perform_request ( "GET" , _make_path ( "_security" , "role_mapping" , name ) , params = params )
|
def COOKIES ( self ) :
"""Cookies parsed into a dictionary . Signed cookies are NOT decoded
automatically . See : meth : ` get _ cookie ` for details ."""
|
raw_dict = SimpleCookie ( self . headers . get ( 'Cookie' , '' ) )
cookies = { }
for cookie in six . itervalues ( raw_dict ) :
cookies [ cookie . key ] = cookie . value
return cookies
|
def plot_tree ( ax , tree , plane = 'xy' , diameter_scale = _DIAMETER_SCALE , linewidth = _LINEWIDTH , color = None , alpha = _ALPHA ) :
'''Plots a 2d figure of the tree ' s segments
Args :
ax ( matplotlib axes ) : on what to plot
tree ( neurom . core . Tree or neurom . core . Neurite ) : plotted tree
plane ( str ) : Any pair of ' xyz '
diameter _ scale ( float ) : Scale factor multiplied with segment diameters before plotting
linewidth ( float ) : all segments are plotted with this width , but only if diameter _ scale = None
color ( str or None ) : Color of plotted values , None corresponds to default choice
alpha ( float ) : Transparency of plotted values
Note :
If the tree contains one single point the plot will be empty
since no segments can be constructed .'''
|
plane0 , plane1 = _plane2col ( plane )
segs = [ ( ( s [ 0 ] [ plane0 ] , s [ 0 ] [ plane1 ] ) , ( s [ 1 ] [ plane0 ] , s [ 1 ] [ plane1 ] ) ) for s in iter_segments ( tree ) ]
linewidth = _get_linewidth ( tree , diameter_scale = diameter_scale , linewidth = linewidth )
color = _get_color ( color , tree . type )
collection = LineCollection ( segs , color = color , linewidth = linewidth , alpha = alpha )
ax . add_collection ( collection )
|
def ReadSerializableArray ( self , class_name , max = sys . maxsize ) :
"""Deserialize a stream into the object specific by ` class _ name ` .
Args :
class _ name ( str ) : a full path to the class to be deserialized into . e . g . ' neo . Core . Block . Block '
max ( int ) : ( Optional ) maximum number of bytes to read .
Returns :
list : list of ` class _ name ` objects deserialized from the stream ."""
|
module = '.' . join ( class_name . split ( '.' ) [ : - 1 ] )
klassname = class_name . split ( '.' ) [ - 1 ]
klass = getattr ( importlib . import_module ( module ) , klassname )
length = self . ReadVarInt ( max = max )
items = [ ]
# logger . info ( " READING ITEM % s % s " % ( length , class _ name ) )
try :
for i in range ( 0 , length ) :
item = klass ( )
item . Deserialize ( self )
# logger . info ( " deserialized item % s % s " % ( i , item ) )
items . append ( item )
except Exception as e :
logger . error ( "Couldn't deserialize %s " % e )
return items
|
def min ( self ) :
"""Return the minimum of ` ` self ` ` .
See Also
numpy . amin
max"""
|
results = [ x . ufuncs . min ( ) for x in self . elem ]
return np . min ( results )
|
def sort_values ( self , by = None , axis = 0 , ascending = True , inplace = False , kind = 'quicksort' , na_position = 'last' ) :
"""Sort by the values along either axis .
Parameters
- - - - - % ( optional _ by ) s
axis : % ( axes _ single _ arg ) s , default 0
Axis to be sorted .
ascending : bool or list of bool , default True
Sort ascending vs . descending . Specify list for multiple sort
orders . If this is a list of bools , must match the length of
the by .
inplace : bool , default False
If True , perform operation in - place .
kind : { ' quicksort ' , ' mergesort ' , ' heapsort ' } , default ' quicksort '
Choice of sorting algorithm . See also ndarray . np . sort for more
information . ` mergesort ` is the only stable algorithm . For
DataFrames , this option is only applied when sorting on a single
column or label .
na _ position : { ' first ' , ' last ' } , default ' last '
Puts NaNs at the beginning if ` first ` ; ` last ` puts NaNs at the
end .
Returns
sorted _ obj : DataFrame or None
DataFrame with sorted values if inplace = False , None otherwise .
Examples
> > > df = pd . DataFrame ( {
. . . ' col1 ' : [ ' A ' , ' A ' , ' B ' , np . nan , ' D ' , ' C ' ] ,
. . . ' col2 ' : [ 2 , 1 , 9 , 8 , 7 , 4 ] ,
. . . ' col3 ' : [ 0 , 1 , 9 , 4 , 2 , 3 ] ,
> > > df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
> > > df . sort _ values ( by = [ ' col1 ' ] )
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
> > > df . sort _ values ( by = [ ' col1 ' , ' col2 ' ] )
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
> > > df . sort _ values ( by = ' col1 ' , ascending = False )
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
> > > df . sort _ values ( by = ' col1 ' , ascending = False , na _ position = ' first ' )
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1"""
|
raise NotImplementedError ( "sort_values has not been implemented " "on Panel or Panel4D objects." )
|
def cdf ( data , mode = 'continuous' , ** kwargs ) :
'''Return cumulative density .
: arguments :
* * data * * ( ` ` < numpy . ndarray > ` ` )
Input data , to plot the distribution for .
: returns :
* * P * * ( ` ` < numpy . ndarray > ` ` )
Cumulative probability .
* * x * * ( ` ` < numpy . ndarray > ` ` )
Data points .'''
|
return ( np . linspace ( 0.0 , 1.0 , len ( data ) ) , np . sort ( data ) )
|
def parse_osm_node ( response ) :
"""Parse points from OSM nodes .
Parameters
response : JSON
Nodes from OSM response .
Returns
Dict of vertex IDs and their lat , lon coordinates ."""
|
try :
point = Point ( response [ 'lon' ] , response [ 'lat' ] )
poi = { 'osmid' : response [ 'id' ] , 'geometry' : point }
if 'tags' in response :
for tag in response [ 'tags' ] :
poi [ tag ] = response [ 'tags' ] [ tag ]
except Exception :
log ( 'Point has invalid geometry: {}' . format ( response [ 'id' ] ) )
return poi
|
def _attach_dummy_intf_rtr ( self , tenant_id , tenant_name , rtr_id ) :
"""Function to create a dummy router and interface ."""
|
serv_obj = self . get_service_obj ( tenant_id )
fw_dict = serv_obj . get_fw_dict ( )
fw_id = fw_dict . get ( 'fw_id' )
rtr_nwk = fw_id [ 0 : 4 ] + fw_const . DUMMY_SERVICE_NWK + ( fw_id [ len ( fw_id ) - 4 : ] )
net_id , subnet_id = self . os_helper . create_network ( rtr_nwk , tenant_id , self . servicedummy_ip_subnet )
if net_id is None or subnet_id is None :
return None , None
net_dict = { }
net_dict [ 'name' ] = rtr_nwk
self . store_net_db ( tenant_id , net_id , net_dict , 'SUCCESS' )
subnet_lst = set ( )
subnet_lst . add ( subnet_id )
if rtr_id is None :
self . os_helper . delete_network ( rtr_nwk , tenant_id , subnet_id , net_id )
return None , None
ret = self . os_helper . add_intf_router ( rtr_id , tenant_id , subnet_lst )
if not ret :
self . os_helper . delete_network ( rtr_nwk , tenant_id , subnet_id , net_id )
return None , None
return net_id , subnet_id
|
def show ( cls , report_name , data ) :
"""Shows a report by issuing a GET request to the / reports / report _ name
endpoint .
Args :
` report _ name ` : the name of the report to show
` data ` : the parameters for the report"""
|
conn = Qubole . agent ( )
return conn . get ( cls . element_path ( report_name ) , data )
|
def preprocess ( self , nb_man , resources , km = None ) :
"""Wraps the parent class process call slightly"""
|
with self . setup_preprocessor ( nb_man . nb , resources , km = km ) :
if self . log_output :
self . log . info ( "Executing notebook with kernel: {}" . format ( self . kernel_name ) )
nb , resources = self . papermill_process ( nb_man , resources )
info_msg = self . _wait_for_reply ( self . kc . kernel_info ( ) )
nb . metadata [ 'language_info' ] = info_msg [ 'content' ] [ 'language_info' ]
self . set_widgets_metadata ( )
return nb , resources
|
def get_placeholder_data ( self , request , obj ) :
"""Get placeholder data from layout ."""
|
if not obj or not getattr ( obj , 'layout' , None ) :
data = [ PlaceholderData ( slot = 'main' , role = 'm' , title = 'Main' ) ]
else :
data = obj . layout . get_placeholder_data ( )
return data
|
def _config2von ( self , config : dict , access : str = None ) -> dict :
"""Given a configuration dict with indy and possibly more configuration values , return the
corresponding VON wallet configuration dict from current default and input values .
: param config : input configuration
: param access : access credentials value
: return : configuration dict for VON wallet with VON - specific entries"""
|
rv = { k : config . get ( k , self . _defaults [ k ] ) for k in ( 'auto_create' , 'auto_remove' ) }
rv [ 'access' ] = access or self . default_access
for key in ( 'seed' , 'did' , 'link_secret_label' ) :
if key in config :
rv [ key ] = config [ key ]
return rv
|
def _logmessage_transform ( cls , s , by = 2 ) :
"""Preprocess / cleanup a bzr log message before parsing
Args :
s ( str ) : log message string
by ( int ) : cutoff threshold for log message length
Returns :
str : preprocessed log message string"""
|
if len ( s ) >= by :
return s [ by : ] . strip ( '\n' )
return s . strip ( '\n' )
|
async def _wait ( self ) :
'''Wait on the other editatoms who are constructing nodes my new nodes refer to'''
|
for buid in self . otherbldgbuids :
nodeevnt = self . allbldgbuids . get ( buid )
if nodeevnt is None :
continue
await nodeevnt [ 1 ] . wait ( )
|
def word_frame_pos ( self , _id ) :
"""Get the position of words"""
|
left = int ( self . words [ _id ] [ 0 ] / 1000 )
right = max ( left + 1 , int ( self . words [ _id ] [ 1 ] / 1000 ) )
return ( left , right )
|
def _index ( self , row ) :
"""Add a row to the internal list of rows without writing it to disk .
This function should keep the data structure consistent so it ' s usable
for both adding new rows , and loading pre - existing histories ."""
|
self . rows . append ( row )
self . _keys . update ( row . keys ( ) )
self . _steps += 1
|
def get_config_file ( ) :
"""Return the loaded config file if one exists ."""
|
# config will be created here if we can ' t find one
new_config_path = os . path . expanduser ( '~/.dagobahd.yml' )
config_dirs = [ '/etc' , os . path . expanduser ( '~' ) ]
config_filenames = [ 'dagobahd.yml' , 'dagobahd.yaml' , '.dagobahd.yml' , '.dagobahd.yaml' ]
for directory in config_dirs :
for filename in config_filenames :
try :
if os . path . isfile ( os . path . join ( directory , filename ) ) :
to_load = open ( os . path . join ( directory , filename ) )
config = yaml . load ( to_load . read ( ) )
to_load . close ( )
replace_nones ( config )
return config
except :
pass
# if we made it to here , need to create a config file
# double up on notifications here to make sure first - time user sees it
print 'Creating new config file in home directory'
logging . info ( 'Creating new config file in home directory' )
new_config = open ( new_config_path , 'w' )
new_config . write ( return_standard_conf ( ) )
new_config . close ( )
new_config = open ( new_config_path , 'r' )
config = yaml . load ( new_config . read ( ) )
new_config . close ( )
replace_nones ( config )
return config
|
def add_type ( self , new_name , orig_names ) :
"""Record the typedefd name for orig _ names . Resolve orig _ names
to their core names and save those .
: new _ name : TODO
: orig _ names : TODO
: returns : TODO"""
|
self . _dlog ( "adding a type '{}'" . format ( new_name ) )
# TODO do we allow clobbering of types ? ? ?
res = copy . copy ( orig_names )
resolved_names = self . _resolve_name ( res [ - 1 ] )
if resolved_names is not None :
res . pop ( )
res += resolved_names
self . _curr_scope [ "types" ] [ new_name ] = res
|
def delete ( self , request , ** resources ) :
"""Default DELETE method . Allow bulk delete .
: return django . http . response : empty response"""
|
resource = resources . get ( self . _meta . name )
if not resource :
raise HttpError ( "Bad request" , status = status . HTTP_404_NOT_FOUND )
for o in as_tuple ( resource ) :
o . delete ( )
return HttpResponse ( "" )
|
def _capitalize_word ( text , pos ) :
"""Capitalize the current ( or following ) word ."""
|
while pos < len ( text ) and not text [ pos ] . isalnum ( ) :
pos += 1
if pos < len ( text ) :
text = text [ : pos ] + text [ pos ] . upper ( ) + text [ pos + 1 : ]
while pos < len ( text ) and text [ pos ] . isalnum ( ) :
pos += 1
return text , pos
|
def generate ( self , number = None ) :
"""Generates vectors from the Gaussian .
@ param number : optional , if given , generates more than one vector .
@ returns : generated vector ( s ) , either as a one dimensional array
( shape ( d , ) ) if number is not set , or as a two dimensional array
( shape ( n , d ) ) if n is given as number parameter ."""
|
if number is None :
return numpy . random . multivariate_normal ( self . mu , self . sigma )
else :
return numpy . random . multivariate_normal ( self . mu , self . sigma , number )
|
def all_input ( self ) :
"""Returns all input files as a dict of { filename : feffio object }"""
|
d = { "HEADER" : self . header ( ) , "PARAMETERS" : self . tags }
if "RECIPROCAL" not in self . tags :
d . update ( { "POTENTIALS" : self . potential , "ATOMS" : self . atoms } )
return d
|
def dump ( self , dest_pattern = "{id}.jpg" , override = True , max_size = None , bits = 8 , contrast = None , gamma = None , colormap = None , inverse = None ) :
"""Download the image with optional image modifications .
Parameters
dest _ pattern : str , optional
Destination path for the downloaded image . " { X } " patterns are replaced by the value of X attribute
if it exists .
override : bool , optional
True if a file with same name can be overrided by the new file .
max _ size : int , tuple , optional
Maximum size ( width or height ) of returned image . None to get original size .
bits : int ( 8,16,32 ) or str ( " max " ) , optional
Bit depth ( bit per channel ) of returned image . " max " returns the original image bit depth
contrast : float , optional
Optional contrast applied on returned image .
gamma : float , optional
Optional gamma applied on returned image .
colormap : int , optional
Cytomine identifier of a colormap to apply on returned image .
inverse : bool , optional
True to inverse color mapping , False otherwise .
Returns
downloaded : bool
True if everything happens correctly , False otherwise . As a side effect , object attribute " filename "
is filled with downloaded file path ."""
|
if self . id is None :
raise ValueError ( "Cannot dump an annotation with no ID." )
pattern = re . compile ( "{(.*?)}" )
dest_pattern = re . sub ( pattern , lambda m : str ( getattr ( self , str ( m . group ( 0 ) ) [ 1 : - 1 ] , "_" ) ) , dest_pattern )
destination = os . path . dirname ( dest_pattern )
filename , extension = os . path . splitext ( os . path . basename ( dest_pattern ) )
extension = extension [ 1 : ]
if extension not in ( "jpg" , "png" , "tif" , "tiff" ) :
extension = "jpg"
if not os . path . exists ( destination ) :
os . makedirs ( destination )
if isinstance ( max_size , tuple ) or max_size is None :
max_size = max ( self . width , self . height )
parameters = { "maxSize" : max_size , "contrast" : contrast , "gamma" : gamma , "colormap" : colormap , "inverse" : inverse , "bits" : bits }
file_path = os . path . join ( destination , "{}.{}" . format ( filename , extension ) )
url = self . preview [ : self . preview . index ( "?" ) ]
url = url . replace ( ".png" , ".{}" . format ( extension ) )
result = Cytomine . get_instance ( ) . download_file ( url , file_path , override , parameters )
if result :
self . filename = file_path
return result
|
def validate_statusline ( self , valid_statusline ) :
"""Check that the statusline is valid , eg . starts with a numeric
code . If not , replace with passed in valid _ statusline"""
|
code = self . get_statuscode ( )
try :
code = int ( code )
assert ( code > 0 )
return True
except ( ValueError , AssertionError ) :
self . statusline = valid_statusline
return False
|
def hotp ( key , counter , format = 'dec6' , hash = hashlib . sha1 ) :
'''Compute a HOTP value as prescribed by RFC4226
: param key :
the HOTP secret key given as an hexadecimal string
: param counter :
the OTP generation counter
: param format :
the output format , can be :
- hex , for a variable length hexadecimal format ,
- hex - notrunc , for a 40 characters hexadecimal non - truncated format ,
- dec4 , for a 4 characters decimal format ,
- dec6,
- dec7 , or
- dec8
it defaults to dec6.
: param hash :
the hash module ( usually from the hashlib package ) to use ,
it defaults to hashlib . sha1.
: returns :
a string representation of the OTP value ( as instructed by the format parameter ) .
Examples :
> > > hotp ( ' 343434 ' , 2 , format = ' dec6 ' )
'791903' '''
|
bin_hotp = __hotp ( key , counter , hash )
if format == 'dec4' :
return dec ( bin_hotp , 4 )
elif format == 'dec6' :
return dec ( bin_hotp , 6 )
elif format == 'dec7' :
return dec ( bin_hotp , 7 )
elif format == 'dec8' :
return dec ( bin_hotp , 8 )
elif format == 'hex' :
return '%x' % truncated_value ( bin_hotp )
elif format == 'hex-notrunc' :
return _utils . tohex ( bin_hotp )
elif format == 'bin' :
return bin_hotp
elif format == 'dec' :
return str ( truncated_value ( bin_hotp ) )
else :
raise ValueError ( 'unknown format' )
|
def is_broker_action_done ( action , rid = None , unit = None ) :
"""Check whether broker action has completed yet .
@ param action : name of action to be performed
@ returns True if action complete otherwise False"""
|
rdata = relation_get ( rid , unit ) or { }
broker_rsp = rdata . get ( get_broker_rsp_key ( ) )
if not broker_rsp :
return False
rsp = CephBrokerRsp ( broker_rsp )
unit_name = local_unit ( ) . partition ( '/' ) [ 2 ]
key = "unit_{}_ceph_broker_action.{}" . format ( unit_name , action )
kvstore = kv ( )
val = kvstore . get ( key = key )
if val and val == rsp . request_id :
return True
return False
|
def get_random_giphy ( phrase ) :
"""Return the URL of a random GIF related to the phrase , if possible"""
|
with warnings . catch_warnings ( ) :
warnings . simplefilter ( 'ignore' )
giphy = giphypop . Giphy ( )
results = giphy . search_list ( phrase = phrase , limit = 100 )
if not results :
raise ValueError ( 'There were no results for that phrase' )
return random . choice ( results ) . media_url
|
def remove_custom_field_setting ( self , project , params = { } , ** options ) :
"""Remove a custom field setting on the project .
Parameters
project : { Id } The project to associate the custom field with
[ data ] : { Object } Data for the request
- [ custom _ field ] : { Id } The id of the custom field to remove from this project ."""
|
path = "/projects/%s/removeCustomFieldSetting" % ( project )
return self . client . post ( path , params , ** options )
|
def blend ( self , cycles = 1 ) :
"""Explands the existing Palette by inserting the blending colour
between all Colours already in the Palette .
Changes the Palette in - place .
args :
cycles ( int ) : number of * blend * cycles to apply . ( Default is 1)
Example usage :
. . code - block : : python
p1 . blend ( )
p1 . to _ image ( ' p1 _ blended . png ' , 60 , vertical = False )
. . image : : p1 _ blended . png
. . code - block : : python
p2 . blend ( )
p2 . to _ image ( ' p2 _ blended . png ' , 60 , vertical = False )
. . image : : p2 _ blended . png
The * blend * functionallity can be applied several times in a sequence
by use of the * cycles * parameter . This may be useful to quickly get a
longer series of intermediate colours .
. . code - block : : python
p3 = Palette ( Colour ( ' # fff ' ) , Colour ( ' # 7e1e9c ' ) )
p3 . blend ( cycles = 5)
p3 . to _ image ( ' p3 . png ' , max _ width = 360 , vertical = False )
. . image : : p3 . png
. . seealso : : : py : func : ` colourettu . blend `"""
|
for j in range ( int ( cycles ) ) :
new_colours = [ ]
for i , c in enumerate ( self . _colours ) :
if i != 0 :
c2 = blend ( c , self . _colours [ i - 1 ] )
new_colours . append ( c2 )
new_colours . append ( c )
self . _colours = new_colours
|
def add_permission_by_name ( self , code , save = False ) :
"""Adds a permission with given name .
Args :
code ( str ) : Code name of the permission .
save ( bool ) : If False , does nothing ."""
|
if not save :
return [ "%s | %s" % ( p . name , p . code ) for p in Permission . objects . filter ( code__contains = code ) ]
for p in Permission . objects . filter ( code__contains = code ) :
if p not in self . Permissions :
self . Permissions ( permission = p )
if p :
self . save ( )
|
def rebuildDay ( self , opt ) :
"""Rebuilds the scale for the day mode .
: param opt | < XGanttRenderOptions >"""
|
self . _labels = [ ]
self . _hlines = [ ]
self . _vlines = [ ]
self . _weekendRects = [ ]
self . _alternateRects = [ ]
self . _topLabels = [ ]
top_format = 'dddd MMMM dd'
label_format = 'ha'
increment = 60
# hour
# generate vertical lines
x = 0
i = 0
half = opt . header_height / 2.0
curr = QDateTime ( opt . start , QTime ( 0 , 0 , 0 ) )
end = QDateTime ( opt . end , QTime ( 23 , 0 , 0 ) )
top_label = opt . start . toString ( top_format )
top_rect = QRect ( 0 , 0 , 0 , half )
alt_rect = None
while curr <= end : # update the top rect
new_top_label = curr . toString ( top_format )
if new_top_label != top_label :
top_rect . setRight ( x )
self . _topLabels . append ( ( top_rect , top_label ) )
top_rect = QRect ( x , 0 , 0 , half )
top_label = new_top_label
if alt_rect is not None :
alt_rect . setRight ( x )
self . _alternateRects . append ( alt_rect )
alt_rect = None
else :
alt_rect = QRect ( x , 0 , 0 , opt . height )
# create the line
self . _hlines . append ( QLine ( x , 0 , x , opt . height ) )
# create the header label / rect
label = nativestring ( curr . toString ( label_format ) ) [ : - 1 ]
rect = QRect ( x , half , opt . cell_width , half )
self . _labels . append ( ( rect , label ) )
# increment the dates
curr = curr . addSecs ( increment * 60 )
x += opt . cell_width
i += 1
# update the top rect
top_rect . setRight ( x )
top_label = opt . end . toString ( top_format )
self . _topLabels . append ( ( top_rect , top_label ) )
if alt_rect is not None :
alt_rect . setRight ( x )
self . _alternateRects . append ( alt_rect )
# resize the width to match the last date range
new_width = x
self . setSceneRect ( 0 , 0 , new_width , opt . height )
# generate horizontal lines
y = 0
h = opt . height
width = new_width
while y < h :
self . _vlines . append ( QLine ( 0 , y , width , y ) )
y += opt . cell_height
# clear the dirty flag
self . _dirty = False
|
def headgrid ( self , xg , yg , layers = None , printrow = False ) :
"""Grid of heads
Parameters
xg : array
x values of grid
yg : array
y values of grid
layers : integer , list or array , optional
layers for which grid is returned
printrow : boolean , optional
prints dot to screen for each row of grid if set to ` True `
Returns
h : array size ` nlayers , ny , nx `
See also
: func : ` ~ timml . model . Model . headgrid2 `"""
|
nx , ny = len ( xg ) , len ( yg )
if layers is None :
Nlayers = self . aq . find_aquifer_data ( xg [ 0 ] , yg [ 0 ] ) . naq
else :
Nlayers = len ( np . atleast_1d ( layers ) )
h = np . empty ( ( Nlayers , ny , nx ) )
for j in range ( ny ) :
if printrow :
print ( '.' , end = '' , flush = True )
for i in range ( nx ) :
h [ : , j , i ] = self . head ( xg [ i ] , yg [ j ] , layers )
if printrow :
print ( '' , flush = True )
return h
|
def bgsize_rankspernode ( self ) :
"""Return ( bg _ size , ranks _ per _ node ) from mpi _ procs and omp _ threads ."""
|
bg_size = int ( math . ceil ( ( self . mpi_procs * self . omp_threads ) / self . hw . cores_per_node ) )
bg_size = max ( bg_size , 32 )
# TODO hardcoded
ranks_per_node = int ( math . ceil ( self . mpi_procs / bg_size ) )
return bg_size , ranks_per_node
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.