signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def isin ( arg , values ) :
"""Check whether the value expression is contained within the indicated
list of values .
Parameters
values : list , tuple , or array expression
The values can be scalar or array - like . Each of them must be
comparable with the calling expression , or None ( NULL ) .
Examples
> > > import ibis
> > > table = ibis . table ( [ ( ' string _ col ' , ' string ' ) ] )
> > > table2 = ibis . table ( [ ( ' other _ string _ col ' , ' string ' ) ] )
> > > expr = table . string _ col . isin ( [ ' foo ' , ' bar ' , ' baz ' ] )
> > > expr2 = table . string _ col . isin ( table2 . other _ string _ col )
Returns
contains : BooleanValue"""
|
op = ops . Contains ( arg , values )
return op . to_expr ( )
|
def find_windows ( elements , coordinates , processes = None , mol_size = None , adjust = 1 , pore_opt = True , increment = 1.0 , ** kwargs ) :
"""Return windows diameters and center of masses for a molecule ."""
|
# Copy the coordinates as will perform many opertaions on them
coordinates = deepcopy ( coordinates )
# Center of our cartesian system is always at origin
origin = np . array ( [ 0 , 0 , 0 ] )
# Initial center of mass to reverse translation at the end
initial_com = center_of_mass ( elements , coordinates )
# Shift the cage to the origin using either the standard center of mass
# or if pore _ opt flag is True , the optimised pore center as center of mass
if pore_opt is True : # Normally the pore is calculated from the COM of a molecule .
# So , esentially the molecule ' s COM is the pore center .
# To shift the molecule so that the center of the optimised pore
# is at the origin of the system and not the center of the not
# optimised one , we need to adjust the shift . We also have to update
# the initial com .
com_adjust = initial_com - opt_pore_diameter ( elements , coordinates , ** kwargs ) [ 2 ]
initial_com = initial_com - com_adjust
coordinates = shift_com ( elements , coordinates , com_adjust = com_adjust )
else : # Otherwise , we just shift the cage to the origin .
coordinates = shift_com ( elements , coordinates )
# We create an array of vdw radii of elements .
elements_vdw = np . array ( [ [ atomic_vdw_radius [ x . upper ( ) ] ] for x in elements ] )
# We calculate maximum diameter of a molecule to determine the radius
# of a sampling sphere neccessary to enclose the whole molecule .
shpere_radius = max_dim ( elements , coordinates ) [ 2 ] / 2
sphere_surface_area = 4 * np . pi * shpere_radius ** 2
# Here we determine the number of sampling points necessary for a fine
# sampling . Smaller molecules require more finner density of sampling
# points on the sampling sphere ' s surface , whereas largen require less .
# This formula was created so that larger molecule do not take much longer
# to analyse , as number _ sampling _ points * length _ of _ sampling _ vectors
# results in quadratic increase of sampling time . The 250 factor was
# specificly determined to produce close to 1 sampling point / Angstrom ^ 2
# for a sphere of radius ~ 24 Angstrom . We can adjust how fine is the
# sampling by changing the adjust factor .
number_of_points = int ( np . log10 ( sphere_surface_area ) * 250 * adjust )
# Here I use code by Alexandre Devert for spreading points on a sphere :
# http : / / blog . marmakoide . org / ? p = 1
golden_angle = np . pi * ( 3 - np . sqrt ( 5 ) )
theta = golden_angle * np . arange ( number_of_points )
z = np . linspace ( 1 - 1.0 / number_of_points , 1.0 / number_of_points - 1.0 , number_of_points )
radius = np . sqrt ( 1 - z * z )
points = np . zeros ( ( number_of_points , 3 ) )
points [ : , 0 ] = radius * np . cos ( theta ) * shpere_radius
points [ : , 1 ] = radius * np . sin ( theta ) * shpere_radius
points [ : , 2 ] = z * shpere_radius
# Here we will compute the eps parameter for the sklearn . cluster . DBSCAN
# (3 - dimensional spatial clustering algorithm ) which is the mean distance
# to the closest point of all points .
values = [ ]
tree = KDTree ( points )
for i in points :
dist , ind = tree . query ( i . reshape ( 1 , - 1 ) , k = 10 )
values . extend ( dist )
mean_distance = np . mean ( values )
# The best eps is parametrized when adding the mean distance and it ' s root .
eps = mean_distance + mean_distance ** 0.5
# Here we either run the sampling points vectors analysis in serial
# or parallel . The vectors that go through molecular pores return
# as analysed list with the increment at vector ' s path with largest
# included sphere , coordinates for this narrow channel point . vectors
# that find molecule on theirs path are return as NoneType object .
# Parralel analysis on user ' s defined number of CPUs .
if processes :
pool = Pool ( processes = processes )
parallel = [ pool . apply_async ( vector_preanalysis , args = ( point , coordinates , elements_vdw , ) , kwds = { 'increment' : increment } ) for point in points ]
results = [ p . get ( ) for p in parallel if p . get ( ) is not None ]
pool . terminate ( )
# Dataset is an array of sampling points coordinates .
dataset = np . array ( [ x [ 5 : 8 ] for x in results ] )
else :
results = [ vector_preanalysis ( point , coordinates , elements_vdw , increment = increment ) for point in points ]
results = [ x for x in results if x is not None ]
dataset = np . array ( [ x [ 5 : 8 ] for x in results ] )
# If not a single vector was returned from the analysis it mean that
# no molecular channels ( what we call windows here ) connects the
# molecule ' s interior with the surroungsings ( exterior space ) .
# The number of windows in that case equals zero and zero is returned .
# Otherwise we continue our search for windows .
if len ( results ) == 0 :
return None
else : # Perfomr DBSCAN to cluster the sampling points vectors .
# the n _ jobs will be developed later .
# db = DBSCAN ( eps = eps , n _ jobs = _ ncpus ) . fit ( dataset )
db = DBSCAN ( eps = eps ) . fit ( dataset )
core_samples_mask = np . zeros_like ( db . labels_ , dtype = bool )
core_samples_mask [ db . core_sample_indices_ ] = True
labels = set ( db . labels_ )
# Assing cluster label to a sampling point .
clusters = [ [ i , j ] for i , j in zip ( results , db . labels_ ) ]
clustered_results = { label : [ ] for label in labels }
# Create a dictionary of clusters with points listed .
[ clustered_results [ i [ 1 ] ] . append ( i [ 0 ] ) for i in clusters ]
# No for the sampling point vector in each cluster that had
# the widest channel ' s ' neck ' is assumed to pass the closest
# to the window ' s center and therefore will be passed to
# window analysis function .
# We also pass user defined settings for window analysis .
# Again either in serlia or in parallel .
# Noisy points get a cluster label - 1 , therefore we have to exclude it .
if processes :
pool = Pool ( processes = processes )
parallel = [ pool . apply_async ( window_analysis , args = ( np . array ( clustered_results [ cluster ] ) , elements , coordinates , elements_vdw ) , kwds = kwargs ) for cluster in clustered_results if cluster != - 1 ]
window_results = [ p . get ( ) for p in parallel if p . get ( ) is not None ]
pool . terminate ( )
else :
window_results = [ window_analysis ( np . array ( clustered_results [ cluster ] ) , elements , coordinates , elements_vdw , ** kwargs ) for cluster in clustered_results if cluster != - 1 ]
# The function returns two numpy arrays , one with windows diameters
# in Angstrom , second with corresponding windows center ' s coordinates
windows = np . array ( [ result [ 0 ] for result in window_results if result is not None ] )
windows_coms = np . array ( [ np . add ( result [ 1 ] , initial_com ) for result in window_results if result is not None ] )
# Safety measures , if one of the windows is None or negative a warning
# should be raised .
for result in window_results :
if result is None :
msg_ = " " . join ( [ 'Warning. One of the analysed windows has' , 'returned as None. See manual.' ] )
# print ( msg _ )
elif result [ 0 ] < 0 :
msg_ = " " . join ( [ 'Warning. One of the analysed windows has a vdW' , 'corrected diameter smaller than 0. See manual.' ] )
# print ( msg _ )
return ( windows , windows_coms )
|
def get_signatures ( self ) :
"""Return a list of the data of the signature files .
Only v1 / JAR Signing .
: rtype : list of bytes"""
|
signature_expr = re . compile ( r"^(META-INF/)(.*)(\.RSA|\.EC|\.DSA)$" )
signature_datas = [ ]
for i in self . get_files ( ) :
if signature_expr . search ( i ) :
signature_datas . append ( self . get_file ( i ) )
return signature_datas
|
def update_parameter_group ( name , parameters , apply_method = "pending-reboot" , tags = None , region = None , key = None , keyid = None , profile = None ) :
'''Update an RDS parameter group .
CLI example : :
salt myminion boto _ rds . update _ parameter _ group my - param - group parameters = ' { " back _ log " : 1 , " binlog _ cache _ size " : 4096 } ' region = us - east - 1'''
|
res = __salt__ [ 'boto_rds.parameter_group_exists' ] ( name , tags , region , key , keyid , profile )
if not res . get ( 'exists' ) :
return { 'exists' : bool ( res ) , 'message' : 'RDS parameter group {0} does not exist.' . format ( name ) }
param_list = [ ]
for key , value in six . iteritems ( parameters ) :
item = odict . OrderedDict ( )
item . update ( { 'ParameterName' : key } )
item . update ( { 'ApplyMethod' : apply_method } )
if type ( value ) is bool :
item . update ( { 'ParameterValue' : 'on' if value else 'off' } )
else :
item . update ( { 'ParameterValue' : str ( value ) } )
# future lint : disable = blacklisted - function
param_list . append ( item )
if not param_list :
return { 'results' : False }
try :
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
if not conn :
return { 'results' : bool ( conn ) }
res = conn . modify_db_parameter_group ( DBParameterGroupName = name , Parameters = param_list )
return { 'results' : bool ( res ) }
except ClientError as e :
return { 'error' : __utils__ [ 'boto3.get_error' ] ( e ) }
|
def _extract_params ( request_dict , param_list , param_fallback = False ) :
'''Extract pddb parameters from request'''
|
if not param_list or not request_dict :
return dict ( )
query = dict ( )
for param in param_list : # Retrieve all items in the form of { param : value } and
# convert { param _ _ key : value } into { param : { key : value } }
for query_key , query_value in request_dict . items ( ) :
if param == query_key :
query [ param ] = query_value
else :
query_key_parts = query_key . split ( '__' , 1 )
if param == query_key_parts [ 0 ] :
query [ param ] = { query_key_parts [ 1 ] : query_value }
# Convert special string " _ _ null _ _ " into Python None
nullifier = lambda d : { k : ( nullifier ( v ) if isinstance ( v , dict ) else # pylint : disable = used - before - assignment
( None if v == '__null__' else v ) ) for k , v in d . items ( ) }
# When fallback is enabled and no parameter matched , assume query refers to first parameter
if param_fallback and all ( [ param_key not in query . keys ( ) for param_key in param_list ] ) :
query = { param_list [ 0 ] : dict ( request_dict ) }
# Return a dictionary with only the requested parameters
return { k : v for k , v in nullifier ( query ) . items ( ) if k in param_list }
|
def process ( self , index = None ) :
"""This will completely process a directory of elevation tiles ( as
supplied in the constructor ) . Both phases of the calculation , the
single tile and edge resolution phases are run .
Parameters
index : int / slice ( optional )
Default None - processes all tiles in a directory . See
: py : func : ` process _ twi ` for additional options ."""
|
# Round 0 of twi processing , process the magnitude and directions of
# slopes
print "Starting slope calculation round"
self . process_twi ( index , do_edges = False , skip_uca_twi = True )
# Round 1 of twi processing
print "Starting self-area calculation round"
self . process_twi ( index , do_edges = False )
# Round 2 of twi processing : edge resolution
i = self . tile_edge . find_best_candidate ( self . elev_source_files )
print "Starting edge resolution round: " ,
count = 0
i_old = - 1
same_count = 0
while i is not None and same_count < 3 :
count += 1
print '*' * 10
print count , '(%d -- > %d) .' % ( i_old , i )
self . process_twi ( i , do_edges = True )
i_old = i
i = self . tile_edge . find_best_candidate ( self . elev_source_files )
if i_old == i :
same_count += 1
else :
same_count = 0
print '*' * 79
print '******* PROCESSING COMPLETED *******'
print '*' * 79
return self
|
def load_dbf ( self , shapefile_name ) :
"""Attempts to load file with . dbf extension as both lower and upper case"""
|
dbf_ext = 'dbf'
try :
self . dbf = open ( "%s.%s" % ( shapefile_name , dbf_ext ) , "rb" )
except IOError :
try :
self . dbf = open ( "%s.%s" % ( shapefile_name , dbf_ext . upper ( ) ) , "rb" )
except IOError :
pass
|
def insert ( self , index : int , item : object ) -> None :
"""The Abstract class ` MutableSequence ` leverages this insert method to
perform the ` BlueprintGroup . append ` operation .
: param index : Index to use for removing a new Blueprint item
: param item : New ` Blueprint ` object .
: return : None"""
|
self . _blueprints . insert ( index , item )
|
def sender ( self , value ) :
"""sender is a property to force to be always a Recipient class"""
|
if isinstance ( value , Recipient ) :
if value . _parent is None :
value . _parent = self
value . _field = 'from'
self . __sender = value
elif isinstance ( value , str ) :
self . __sender . address = value
self . __sender . name = ''
else :
raise ValueError ( 'sender must be an address string or a Recipient object' )
self . _track_changes . add ( 'from' )
|
def _set_adjustment_threshold ( self , v , load = False ) :
"""Setter method for adjustment _ threshold , mapped from YANG variable / mpls _ config / router / mpls / mpls _ cmds _ holder / autobw _ template / adjustment _ threshold ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ adjustment _ threshold is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ adjustment _ threshold ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = adjustment_threshold . adjustment_threshold , is_container = 'container' , presence = False , yang_name = "adjustment-threshold" , rest_name = "adjustment-threshold" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Set adjustment-threshold' , u'cli-full-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-mpls' , defining_module = 'brocade-mpls' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """adjustment_threshold must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=adjustment_threshold.adjustment_threshold, is_container='container', presence=False, yang_name="adjustment-threshold", rest_name="adjustment-threshold", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set adjustment-threshold', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""" , } )
self . __adjustment_threshold = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def log_likelihood_pairwise ( data , params ) :
"""Compute the log - likelihood of model parameters ."""
|
loglik = 0
for winner , loser in data :
loglik -= np . logaddexp ( 0 , - ( params [ winner ] - params [ loser ] ) )
return loglik
|
def generate_thumbnail_download_link_vimeo ( video_id_from_shortcode ) :
"""Thumbnail URL generator for Vimeo videos ."""
|
# Following the Vimeo API at https : / / developer . vimeo . com / api # video - request , we need to request the video ' s metadata and get the thumbnail from that . First , then , we ' ll get the metadata in JSON format , and then will parse it to find the thumbnail URL .
video_metadata = urlopen ( "https://vimeo.com/api/v2/video/" + str ( video_id_from_shortcode ) + ".json" ) . read ( )
# Download the video ' s metadata in JSON format .
video_metadata_parsed = json . loads ( video_metadata . decode ( 'utf-8' ) )
# Parse the JSON
video_thumbnail_large_location = video_metadata_parsed [ 0 ] [ 'thumbnail_large' ]
# Go into the JSON and get the URL of the thumbnail .
return video_thumbnail_large_location
|
def _make_event ( self , event_type , code , value ) :
"""Make a new event and send it to the character device ."""
|
secs , msecs = convert_timeval ( time . time ( ) )
data = struct . pack ( EVENT_FORMAT , secs , msecs , event_type , code , value )
self . _write_device . write ( data )
self . _write_device . flush ( )
|
def is_running ( self ) :
"""Checks if the container is running .
: returns : True or False
: rtype : bool"""
|
state = yield from self . _get_container_state ( )
if state == "running" :
return True
if self . status == "started" : # The container crashed we need to clean
yield from self . stop ( )
return False
|
def get_intended_direction ( self ) :
"""returns a Y , X value showing which direction the
agent should move in order to get to the target"""
|
x = 0
y = 0
if self . target_x == self . current_x and self . target_y == self . current_y :
return y , x
# target already acquired
if self . target_y > self . current_y :
y = 1
elif self . target_y < self . current_y :
y = - 1
if self . target_x > self . current_x :
x = 1
elif self . target_x < self . current_x :
x = - 1
return y , x
|
def _rsq_adj ( self ) :
"""Adjusted R - squared ."""
|
n = self . n
k = self . k
return 1.0 - ( ( 1.0 - self . _rsq ) * ( n - 1.0 ) / ( n - k - 1.0 ) )
|
def include_in ( self , dist ) :
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution . Note that this method may be called more than once
per feature , and so should be idempotent ."""
|
if not self . available :
raise DistutilsPlatformError ( self . description + " is required, " "but is not available on this platform" )
dist . include ( ** self . extras )
for f in self . require_features :
dist . include_feature ( f )
|
def fft ( logfile ) :
'''display fft for raw ACC data in logfile'''
|
print ( "Processing log %s" % filename )
mlog = mavutil . mavlink_connection ( filename )
data = { 'ACC1.rate' : 1000 , 'ACC2.rate' : 1600 , 'ACC3.rate' : 1000 , 'GYR1.rate' : 1000 , 'GYR2.rate' : 800 , 'GYR3.rate' : 1000 }
for acc in [ 'ACC1' , 'ACC2' , 'ACC3' ] :
for ax in [ 'AccX' , 'AccY' , 'AccZ' ] :
data [ acc + '.' + ax ] = [ ]
for gyr in [ 'GYR1' , 'GYR2' , 'GYR3' ] :
for ax in [ 'GyrX' , 'GyrY' , 'GyrZ' ] :
data [ gyr + '.' + ax ] = [ ]
# now gather all the data
while True :
m = mlog . recv_match ( condition = args . condition )
if m is None :
break
type = m . get_type ( )
if type . startswith ( "ACC" ) :
data [ type + '.AccX' ] . append ( m . AccX )
data [ type + '.AccY' ] . append ( m . AccY )
data [ type + '.AccZ' ] . append ( m . AccZ )
if type . startswith ( "GYR" ) :
data [ type + '.GyrX' ] . append ( m . GyrX )
data [ type + '.GyrY' ] . append ( m . GyrY )
data [ type + '.GyrZ' ] . append ( m . GyrZ )
print ( "Extracted %u data points" % len ( data [ 'ACC1.AccX' ] ) )
for msg in [ 'ACC1' , 'ACC2' , 'ACC3' , 'GYR1' , 'GYR2' , 'GYR3' ] :
pylab . figure ( )
if msg . startswith ( 'ACC' ) :
prefix = 'Acc'
else :
prefix = 'Gyr'
for axis in [ 'X' , 'Y' , 'Z' ] :
field = msg + '.' + prefix + axis
d = data [ field ]
if args . sample_length != 0 :
d = d [ 0 : args . sample_length ]
d = numpy . array ( d )
if len ( d ) == 0 :
continue
avg = numpy . sum ( d ) / len ( d )
d -= avg
d_fft = numpy . fft . rfft ( d )
freq = numpy . fft . rfftfreq ( len ( d ) , 1.0 / data [ msg + '.rate' ] )
pylab . plot ( freq , numpy . abs ( d_fft ) , label = field )
pylab . legend ( loc = 'upper right' )
|
def gcal2jd ( year , month , day ) :
"""Gregorian calendar date to Julian date .
The input and output are for the proleptic Gregorian calendar ,
i . e . , no consideration of historical usage of the calendar is
made .
Parameters
year : int
Year as an integer .
month : int
Month as an integer .
day : int
Day as an integer .
Returns
jd1 , jd2 : 2 - element tuple of floats
When added together , the numbers give the Julian date for the
given Gregorian calendar date . The first number is always
MJD _ 0 i . e . , 2451545.5 . So the second is the MJD .
Examples
> > > gcal2jd ( 2000,1,1)
(2400000.5 , 51544.0)
> > > 2400000.5 + 51544.0 + 0.5
2451545.0
> > > year = [ - 4699 , - 2114 , - 1050 , - 123 , - 1 , 0 , 1 , 123 , 1678.0 , 2000,
. . . . : 2012 , 2245]
> > > month = [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12]
> > > day = [ 1 , 12 , 23 , 14 , 25 , 16 , 27 , 8 , 9 , 10 , 11 , 31]
> > > x = [ gcal2jd ( y , m , d ) for y , m , d in zip ( year , month , day ) ]
> > > for i in x : print i
(2400000.5 , - 2395215.0)
(2400000.5 , - 1451021.0)
(2400000.5 , - 1062364.0)
(2400000.5 , - 723762.0)
(2400000.5 , - 679162.0)
(2400000.5 , - 678774.0)
(2400000.5 , - 678368.0)
(2400000.5 , - 633797.0)
(2400000.5 , - 65812.0)
(2400000.5 , 51827.0)
(2400000.5 , 56242.0)
(2400000.5 , 141393.0)
Negative months and days are valid . For example , 2000 / - 2 / - 4 = >
1999 / + 12-2 / - 4 = > 1999/10 / - 4 = > 1999/9/30-4 = > 1999/9/26.
> > > gcal2jd ( 2000 , - 2 , - 4)
(2400000.5 , 51447.0)
> > > gcal2jd ( 1999 , 9 , 26)
(2400000.5 , 51447.0)
> > > gcal2jd ( 2000 , 2 , - 1)
(2400000.5 , 51573.0)
> > > gcal2jd ( 2000 , 1 , 30)
(2400000.5 , 51573.0)
> > > gcal2jd ( 2000 , 3 , - 1)
(2400000.5 , 51602.0)
> > > gcal2jd ( 2000 , 2 , 28)
(2400000.5 , 51602.0)
Month 0 becomes previous month .
> > > gcal2jd ( 2000 , 0 , 1)
(2400000.5 , 51513.0)
> > > gcal2jd ( 1999 , 12 , 1)
(2400000.5 , 51513.0)
Day number 0 becomes last day of previous month .
> > > gcal2jd ( 2000 , 3 , 0)
(2400000.5 , 51603.0)
> > > gcal2jd ( 2000 , 2 , 29)
(2400000.5 , 51603.0)
If ` day ` is greater than the number of days in ` month ` , then it
gets carried over to the next month .
> > > gcal2jd ( 2000,2,30)
(2400000.5 , 51604.0)
> > > gcal2jd ( 2000,3,1)
(2400000.5 , 51604.0)
> > > gcal2jd ( 2001,2,30)
(2400000.5 , 51970.0)
> > > gcal2jd ( 2001,3,2)
(2400000.5 , 51970.0)
Notes
The returned Julian date is for mid - night of the given date . To
find the Julian date for any time of the day , simply add time as a
fraction of a day . For example Julian date for mid - day can be
obtained by adding 0.5 to either the first part or the second
part . The latter is preferable , since it will give the MJD for the
date and time .
BC dates should be given as - ( BC - 1 ) where BC is the year . For
example 1 BC = = 0 , 2 BC = = - 1 , and so on .
Negative numbers can be used for ` month ` and ` day ` . For example
2000 , - 1 , 1 is the same as 1999 , 11 , 1.
The Julian dates are proleptic Julian dates , i . e . , values are
returned without considering if Gregorian dates are valid for the
given date .
The input values are truncated to integers ."""
|
year = int ( year )
month = int ( month )
day = int ( day )
a = ipart ( ( month - 14 ) / 12.0 )
jd = ipart ( ( 1461 * ( year + 4800 + a ) ) / 4.0 )
jd += ipart ( ( 367 * ( month - 2 - 12 * a ) ) / 12.0 )
x = ipart ( ( year + 4900 + a ) / 100.0 )
jd -= ipart ( ( 3 * x ) / 4.0 )
jd += day - 2432075.5
# was 32075 ; add 2400000.5
jd -= 0.5
# 0 hours ; above JD is for midday , switch to midnight .
return MJD_0 , jd
|
def scalar_term ( self , st ) :
"""Return a _ ScalarTermS or _ ScalarTermU from a string , to perform text and HTML substitutions"""
|
if isinstance ( st , binary_type ) :
return _ScalarTermS ( st , self . _jinja_sub )
elif isinstance ( st , text_type ) :
return _ScalarTermU ( st , self . _jinja_sub )
elif st is None :
return _ScalarTermU ( u ( '' ) , self . _jinja_sub )
else :
return st
|
def remove_pod ( self , pod , array , ** kwargs ) :
"""Remove arrays from a pod .
: param pod : Name of the pod .
: type pod : str
: param array : Array to remove from pod .
: type array : str
: param \ * \ * kwargs : See the REST API Guide on your array for the
documentation on the request :
* * DELETE pod / : pod * * / array / : array * *
: type \ * \ * kwargs : optional
: returns : A dictionary mapping " name " to pod and " array " to the pod ' s
new array list .
: rtype : ResponseDict
. . note : :
Requires use of REST API 1.13 or later ."""
|
return self . _request ( "DELETE" , "pod/{0}/array/{1}" . format ( pod , array ) , kwargs )
|
def save_user ( self , request , sociallogin , form = None ) :
"""Saves a newly signed up social login . In case of auto - signup ,
the signup form is not available ."""
|
u = sociallogin . user
u . set_unusable_password ( )
if form :
get_account_adapter ( ) . save_user ( request , u , form )
else :
get_account_adapter ( ) . populate_username ( request , u )
sociallogin . save ( request )
return u
|
def parse_done ( self , buf : memoryview ) -> Tuple [ bool , memoryview ] :
"""Parse the continuation line sent by the client to end the ` ` IDLE ` `
command .
Args :
buf : The continuation line to parse ."""
|
match = self . _pattern . match ( buf )
if not match :
raise NotParseable ( buf )
done = match . group ( 1 ) . upper ( ) == self . continuation
buf = buf [ match . end ( 0 ) : ]
return done , buf
|
def _find_day_section_from_indices ( indices , split_interval ) :
"""Returns a list with [ weekday , section ] identifiers found
using a list of indices ."""
|
cells_day = 24 * 60 // split_interval
rv = [ [ int ( math . floor ( i / cells_day ) ) , i % cells_day ] for i in indices ]
return rv
|
def decorate ( fn , * args , ** kwargs ) :
"""Return a new function that replicates the behavior of the input
but also returns an additional value . Used for creating functions
of the proper type to pass to ` labeledfeatures ( ) ` .
Parameters
fn : function
* args : any
Additional parameters that the returned function will return
* * kwargs : dict
Each element in ` kwargs ` will become an attribute of the output
function .
Returns
wrapped : function
New function that acts like ` fn ` except that it also returns
an additional value .
Examples
> > > from functools import partial
> > > forecast _ interval = 32
> > > features , labels = pn . data . labeledfeatures ( eqdata , 256 , featurefn ,
. . . decorate ( partial ( pn . data . lab . growth , forecast _ interval , ' Adj Close ' ) , forecast _ interval ) )
> > > def f ( ) :
. . . return 0 , 1
> > > pn . decorate ( f , 3 , 4 , 5 ) ( )
(0 , 1 , 3 , 4 , 5)
> > > pn . decorate ( lambda x : x * . 5 , 3 , 4 , 5 ) ( 1 . )
(1 . , 3 , 4 , 5)
> > > pn . decorate ( lambda x : x , 1 2 ) ( ' foo ' )
( ' foo ' , 1 , 2)
> > > pn . decorate ( f , ' foo ' ) :
(0 , 1 , ' foo ' )
pn . decorate ( f , 0 , foo = ' bar ' ) . foo
> > > ' bar '
Notes
If ` fn ` returns multiple values , these will be returned in sequence
as the first values returned by ` add _ rets ( fn , arg0 , arg1 , arg2 ) ` . See example
above ."""
|
def _wrapper ( * _args , ** kwargs ) :
_ret = fn ( * _args , ** kwargs )
if isinstance ( _ret , tuple ) :
return _ret + args
if len ( args ) == 0 :
return _ret
return ( _ret , ) + args
for key , value in kwargs . items ( ) :
_wrapper . __dict__ [ key ] = value
return _wrapper
|
def get_top_albums ( self , period = PERIOD_OVERALL , limit = None , cacheable = True ) :
"""Returns the top albums played by a user .
* period : The period of time . Possible values :
o PERIOD _ OVERALL
o PERIOD _ 7DAYS
o PERIOD _ 1MONTH
o PERIOD _ 3MONTHS
o PERIOD _ 6MONTHS
o PERIOD _ 12MONTHS"""
|
params = self . _get_params ( )
params [ "period" ] = period
if limit :
params [ "limit" ] = limit
doc = self . _request ( self . ws_prefix + ".getTopAlbums" , cacheable , params )
return _extract_top_albums ( doc , self . network )
|
def view_status_code ( codes ) :
"""Return status code or random status code if more than one are given
tags :
- Status codes
parameters :
- in : path
name : codes
produces :
- text / plain
responses :
100:
description : Informational responses
200:
description : Success
300:
description : Redirection
400:
description : Client Errors
500:
description : Server Errors"""
|
if "," not in codes :
try :
code = int ( codes )
except ValueError :
return Response ( "Invalid status code" , status = 400 )
return status_code ( code )
choices = [ ]
for choice in codes . split ( "," ) :
if ":" not in choice :
code = choice
weight = 1
else :
code , weight = choice . split ( ":" )
try :
choices . append ( ( int ( code ) , float ( weight ) ) )
except ValueError :
return Response ( "Invalid status code" , status = 400 )
code = weighted_choice ( choices )
return status_code ( code )
|
def count_statements ( self , query , language = 'spo' , type = 'triples' , flush = None ) :
"""Run a query in a format supported by the Fedora Resource Index
( e . g . , SPO or Sparql ) and return the count of the results .
: param query : query as a string
: param language : query language to use ; defaults to ' spo '
: param flush : flush results to get recent changes ; defaults to False
: rtype : integer"""
|
result_format = 'count'
http_args = { 'type' : type , 'lang' : language , 'query' : query , 'format' : result_format }
return self . _query ( result_format , http_args , flush )
|
def node_str ( node ) :
"""Returns the complete menu entry text for a menu node , or " " for invisible
menu nodes . Invisible menu nodes are those that lack a prompt or that do
not have a satisfied prompt condition .
Example return value : " [ * ] Bool symbol ( BOOL ) "
The symbol name is printed in parentheses to the right of the prompt . This
is so that symbols can easily be referred to in the configuration
interface ."""
|
if not node . prompt :
return ""
# Even for menu nodes for symbols and choices , it ' s wrong to check
# Symbol . visibility / Choice . visibility here . The reason is that a symbol
# ( and a choice , in theory ) can be defined in multiple locations , giving it
# multiple menu nodes , which do not necessarily all have the same prompt
# visibility . Symbol . visibility / Choice . visibility is calculated as the OR
# of the visibility of all the prompts .
prompt , prompt_cond = node . prompt
if not expr_value ( prompt_cond ) :
return ""
if node . item == MENU :
return " " + prompt
if node . item == COMMENT :
return " *** {} ***" . format ( prompt )
# Symbol or Choice
sc = node . item
if sc . type == UNKNOWN : # Skip symbols defined without a type ( these are obscure and generate
# a warning )
return ""
# { : 3 } sets the field width to three . Gives nice alignment for empty string
# values .
res = "{:3} {}" . format ( value_str ( sc ) , prompt )
# Don ' t print the name for unnamed choices ( the normal kind )
if sc . name is not None :
res += " ({})" . format ( sc . name )
return res
|
def get_item_search_session ( self , proxy ) :
"""Gets the ` ` OsidSession ` ` associated with the item search service .
arg : proxy ( osid . proxy . Proxy ) : a proxy
return : ( osid . assessment . ItemSearchSession ) - an
` ` ItemSearchSession ` `
raise : NullArgument - ` ` proxy ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ item _ search ( ) ` ` is ` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ item _ search ( ) ` ` is ` ` true ` ` . *"""
|
if not self . supports_item_search ( ) :
raise errors . Unimplemented ( )
# pylint : disable = no - member
return sessions . ItemSearchSession ( proxy = proxy , runtime = self . _runtime )
|
def cudnnSetTensor4dDescriptorEx ( tensorDesc , dataType , n , c , h , w , nStride , cStride , hStride , wStride ) :
"""Initialize a Tensor descriptor object with strides .
This function initializes a previously created generic Tensor descriptor object into a
4D tensor , similarly to cudnnSetTensor4dDescriptor but with the strides explicitly
passed as parameters . This can be used to lay out the 4D tensor in any order or simply to
define gaps between dimensions .
Parameters
tensorDesc : cudnnTensorDescriptor _ t
Handle to a previously created tensor descriptor .
dataType : cudnnDataType
Data type .
n : int
Number of images .
c : int
Number of feature maps per image .
h : int
Height of each feature map .
w : int
Width of each feature map .
nStride : int
Stride between two consective images .
cStride : int
Stride between two consecutive feature maps .
hStride : int
Stride between two consecutive rows .
wStride : int
Stride between two consecutive columns ."""
|
status = _libcudnn . cudnnSetTensor4dDescriptorEx ( tensorDesc , dataType , n , c , h , w , nStride , cStride , hStride , wStride )
cudnnCheckStatus ( status )
|
def bank_identifier ( self ) :
"""Return the IBAN ' s Bank Identifier ."""
|
end = get_iban_spec ( self . country_code ) . bban_split_pos + 4
return self . _id [ 4 : end ]
|
def wave_module_patched ( ) :
'''True if wave module can write data size of 0xFFFFF , False otherwise .'''
|
f = StringIO ( )
w = wave . open ( f , "wb" )
w . setparams ( ( 1 , 2 , 44100 , 0 , "NONE" , "no compression" ) )
patched = True
try :
w . setnframes ( ( 0xFFFFFFFF - 36 ) / w . getnchannels ( ) / w . getsampwidth ( ) )
w . _ensure_header_written ( 0 )
except struct . error :
patched = False
logger . info ( "Error setting wave data size to 0xFFFFFFFF; wave module unpatched, setting sata size to 0x7FFFFFFF" )
w . setnframes ( ( 0x7FFFFFFF - 36 ) / w . getnchannels ( ) / w . getsampwidth ( ) )
w . _ensure_header_written ( 0 )
return patched
|
def project_move ( object_id , input_params = { } , always_retry = False , ** kwargs ) :
"""Invokes the / project - xxxx / move API method .
For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Folders - and - Deletion # API - method % 3A - % 2Fclass - xxxx % 2Fmove"""
|
return DXHTTPRequest ( '/%s/move' % object_id , input_params , always_retry = always_retry , ** kwargs )
|
def key_exists ( self , key ) :
"""Check whether a key exists in the event store .
Returns True if it does , False otherwise ."""
|
assert isinstance ( key , str )
cursor = self . conn . cursor ( )
with contextlib . closing ( cursor ) :
cursor . execute ( 'SELECT COUNT(*) FROM events WHERE uuid=?' , ( key , ) )
res = cursor . fetchone ( )
count = res [ 0 ]
if count == 0 :
return False
else :
assert count in ( 0 , 1 ) , "Duplicate event ids detected: {0}" . format ( count )
return True
|
def _quickLevels ( self , data ) :
"""Estimate the min / max values of * data * by subsampling ."""
|
while data . size > 1e6 :
ax = np . argmax ( data . shape )
sl = [ slice ( None ) ] * data . ndim
sl [ ax ] = slice ( None , None , 2 )
data = data [ sl ]
return self . _levelsFromMedianAndStd ( data )
|
def interpolate ( self , gmvs ) :
""": param gmvs :
array of intensity measure levels
: returns :
( interpolated loss ratios , interpolated covs , indices > min )"""
|
# gmvs are clipped to max ( iml )
gmvs_curve = numpy . piecewise ( gmvs , [ gmvs > self . imls [ - 1 ] ] , [ self . imls [ - 1 ] , lambda x : x ] )
idxs = gmvs_curve >= self . imls [ 0 ]
# indices over the minimum
gmvs_curve = gmvs_curve [ idxs ]
return self . _mlr_i1d ( gmvs_curve ) , self . _cov_for ( gmvs_curve ) , idxs
|
def basic_dependencies ( self ) :
"""Accesses basic dependencies from the XML output
: getter : Returns the dependency graph for basic dependencies
: type : corenlp _ xml . dependencies . DependencyGraph"""
|
if self . _basic_dependencies is None :
deps = self . _element . xpath ( 'dependencies[@type="basic-dependencies"]' )
if len ( deps ) > 0 :
self . _basic_dependencies = DependencyGraph ( deps [ 0 ] )
return self . _basic_dependencies
|
def tx_days_above ( tasmax , thresh = '25.0 degC' , freq = 'YS' ) :
r"""Number of summer days
Number of days where daily maximum temperature exceed a threshold .
Parameters
tasmax : xarray . DataArray
Maximum daily temperature [ ° C ] or [ K ]
thresh : str
Threshold temperature on which to base evaluation [ ° C ] or [ K ] . Default : ' 25 degC ' .
freq : str , optional
Resampling frequency
Returns
xarray . DataArray
Number of summer days .
Notes
Let : math : ` TX _ { ij } ` be the daily maximum temperature at day : math : ` i ` of period : math : ` j ` . Then
counted is the number of days where :
. . math : :
TX _ { ij } > Threshold [ ° C ]"""
|
thresh = utils . convert_units_to ( thresh , tasmax )
f = ( tasmax > ( thresh ) ) * 1
return f . resample ( time = freq ) . sum ( dim = 'time' )
|
def _prefix_from_ip_string ( self , ip_str ) :
"""Turn a netmask / hostmask string into a prefix length .
Args :
ip _ str : A netmask or hostmask , formatted as an IP address .
Returns :
The prefix length as an integer .
Raises :
NetmaskValueError : If the input is not a netmask or hostmask ."""
|
# Parse the netmask / hostmask like an IP address .
try :
ip_int = self . _ip_int_from_string ( ip_str )
except AddressValueError :
raise NetmaskValueError ( '%s is not a valid netmask' % ip_str )
# Try matching a netmask ( this would be / 1*0 * / as a bitwise regexp ) .
# Note that the two ambiguous cases ( all - ones and all - zeroes ) are
# treated as netmasks .
try :
return self . _prefix_from_ip_int ( ip_int )
except NetmaskValueError :
pass
# Invert the bits , and try matching a / 0 + 1 + / hostmask instead .
ip_int ^= self . _ALL_ONES
try :
return self . _prefix_from_ip_int ( ip_int )
except NetmaskValueError :
raise NetmaskValueError ( '%s is not a valid netmask' % ip_str )
|
def track_purchase ( self , user , items , total , purchase_id = None , campaign_id = None , template_id = None , created_at = None , data_fields = None ) :
"""The ' purchase _ id ' argument maps to ' id ' for this API endpoint .
This name is used to distinguish it from other instances where
' id ' is a part of the API request with other Iterable endpoints ."""
|
call = "/api/commerce/trackPurchase"
payload = { }
if isinstance ( user , dict ) :
payload [ "user" ] = user
else :
raise TypeError ( 'user key is not in Dictionary format' )
if isinstance ( items , list ) :
payload [ "items" ] = items
else :
raise TypeError ( 'items are not in Array format' )
if isinstance ( total , float ) :
payload [ "total" ] = total
else :
raise TypeError ( 'total is not in correct format' )
if purchase_id is not None :
payload [ "id" ] = str ( purchase_id )
if campaign_id is not None :
payload [ "campaignId" ] = campaign_id
if template_id is not None :
payload [ "templateId" ] = template_id
if created_at is not None :
payload [ "createdAt" ] = created_at
if data_fields is not None :
payload [ "data_fields" ] = data_fields
return self . api_call ( call = call , method = "POST" , json = payload )
|
def get_source ( path ) :
'''yields all non - empty lines in a file'''
|
for line in read ( path ) :
if 'import' in line or len ( line . strip ( ) ) == 0 or line . startswith ( '#' ) :
continue
if '__name__' in line and '__main__' in line :
break
else :
yield line
|
def print_cmd_line ( self , s , target , source , env ) :
"""In python 3 , and in some of our tests , sys . stdout is
a String io object , and it takes unicode strings only
In other cases it ' s a regular Python 2 . x file object
which takes strings ( bytes ) , and if you pass those a
unicode object they try to decode with ' ascii ' codec
which fails if the cmd line has any hi - bit - set chars .
This code assumes s is a regular string , but should
work if it ' s unicode too ."""
|
try :
sys . stdout . write ( s + u"\n" )
except UnicodeDecodeError :
sys . stdout . write ( s + "\n" )
|
def energy ( self , sample_like , dtype = np . float ) :
"""The energy of the given sample .
Args :
sample _ like ( samples _ like ) :
A raw sample . ` sample _ like ` is an extension of
NumPy ' s array _ like structure . See : func : ` . as _ samples ` .
dtype ( : class : ` numpy . dtype ` , optional ) :
The data type of the returned energies . Defaults to float .
Returns :
The energy ."""
|
energy , = self . energies ( sample_like , dtype = dtype )
return energy
|
def _level_coords ( self ) :
"""Return a mapping of all MultiIndex levels and their corresponding
coordinate name ."""
|
level_coords = OrderedDict ( )
for name , index in self . indexes . items ( ) :
if isinstance ( index , pd . MultiIndex ) :
level_names = index . names
( dim , ) = self . variables [ name ] . dims
level_coords . update ( { lname : dim for lname in level_names } )
return level_coords
|
def get_queryset ( self ) :
'''Parameters are already validated in the QuerySetPermission'''
|
model_type = self . request . GET . get ( "type" )
pk = self . request . GET . get ( "id" )
content_type_model = ContentType . objects . get ( model = model_type . lower ( ) )
Model = content_type_model . model_class ( )
model_obj = Model . objects . filter ( id = pk ) . first ( )
return Comment . objects . filter_by_object ( model_obj )
|
def diff_trees ( left , right , diff_options = None , formatter = None ) :
"""Takes two lxml root elements or element trees"""
|
if formatter is not None :
formatter . prepare ( left , right )
if diff_options is None :
diff_options = { }
differ = diff . Differ ( ** diff_options )
diffs = differ . diff ( left , right )
if formatter is None :
return list ( diffs )
return formatter . format ( diffs , left )
|
def generate_defect_structure ( self , supercell = ( 1 , 1 , 1 ) ) :
"""Returns Defective Substitution structure , decorated with charge
Args :
supercell ( int , [ 3x1 ] , or [ [ ] ] ( 3x3 ) ) : supercell integer , vector , or scaling matrix"""
|
defect_structure = self . bulk_structure . copy ( )
defect_structure . make_supercell ( supercell )
# consider modifying velocity property to make sure defect site is decorated
# consistently with bulk structure for final defect _ structure
defect_properties = self . site . properties . copy ( )
if ( 'velocities' in self . bulk_structure . site_properties ) and 'velocities' not in defect_properties :
if all ( vel == self . bulk_structure . site_properties [ 'velocities' ] [ 0 ] for vel in self . bulk_structure . site_properties [ 'velocities' ] ) :
defect_properties [ 'velocities' ] = self . bulk_structure . site_properties [ 'velocities' ] [ 0 ]
else :
raise ValueError ( "No velocity property specified for defect site and " "bulk_structure velocities are not homogeneous. Please specify this " "property within the initialized defect_site object." )
# create a trivial defect structure to find where supercell transformation moves the lattice
site_properties_for_fake_struct = { prop : [ val ] for prop , val in defect_properties . items ( ) }
struct_for_defect_site = Structure ( self . bulk_structure . copy ( ) . lattice , [ self . site . specie ] , [ self . site . frac_coords ] , to_unit_cell = True , site_properties = site_properties_for_fake_struct )
struct_for_defect_site . make_supercell ( supercell )
defect_site = struct_for_defect_site [ 0 ]
poss_deflist = sorted ( defect_structure . get_sites_in_sphere ( defect_site . coords , 2 , include_index = True ) , key = lambda x : x [ 1 ] )
defindex = poss_deflist [ 0 ] [ 2 ]
subsite = defect_structure . pop ( defindex )
defect_structure . append ( self . site . specie . symbol , subsite . coords , coords_are_cartesian = True , properties = defect_site . properties )
defect_structure . set_charge ( self . charge )
return defect_structure
|
def from_def ( cls , obj ) :
"""Builds a profile object from a raw player summary object"""
|
prof = cls ( obj [ "steamid" ] )
prof . _cache = obj
return prof
|
def with_indices ( * args ) :
'''Create indices for an event class . Every event class must be decorated with this decorator .'''
|
def decorator ( cls ) :
for c in cls . __bases__ :
if hasattr ( c , '_indicesNames' ) :
cls . _classnameIndex = c . _classnameIndex + 1
for i in range ( 0 , cls . _classnameIndex ) :
setattr ( cls , '_classname' + str ( i ) , getattr ( c , '_classname' + str ( i ) ) )
setattr ( cls , '_classname' + str ( cls . _classnameIndex ) , cls . _getTypename ( ) )
cls . _indicesNames = c . _indicesNames + ( '_classname' + str ( cls . _classnameIndex ) , ) + args
cls . _generateTemplate ( )
return cls
cls . _classnameIndex = - 1
cls . _indicesNames = args
cls . _generateTemplate ( )
return cls
return decorator
|
def global_defaults ( ) :
"""Default configuration values and behavior toggles .
Fabric only extends this method in order to make minor adjustments and
additions to Invoke ' s ` ~ invoke . config . Config . global _ defaults ` ; see its
documentation for the base values , such as the config subtrees
controlling behavior of ` ` run ` ` or how ` ` tasks ` ` behave .
For Fabric - specific modifications and additions to the Invoke - level
defaults , see our own config docs at : ref : ` default - values ` .
. . versionadded : : 2.0"""
|
# TODO : hrm should the run - related things actually be derived from the
# runner _ class ? E . g . Local defines local stuff , Remote defines remote
# stuff ? Doesn ' t help with the final config tree tho . . .
# TODO : as to that , this is a core problem , Fabric wants split
# local / remote stuff , eg replace _ env wants to be False for local and
# True remotely ; shell wants to differ depending on target ( and either
# way , does not want to use local interrogation for remote )
# TODO : is it worth moving all of our ' new ' settings to a discrete
# namespace for cleanliness ' sake ? e . g . ssh . port , ssh . user etc .
# It wouldn ' t actually simplify this code any , but it would make it
# easier for users to determine what came from which library / repo .
defaults = InvokeConfig . global_defaults ( )
ours = { # New settings
"connect_kwargs" : { } , "forward_agent" : False , "gateway" : None , "load_ssh_configs" : True , "port" : 22 , "run" : { "replace_env" : True } , "runners" : { "remote" : Remote } , "ssh_config_path" : None , "tasks" : { "collection_name" : "fabfile" } , # TODO : this becomes an override / extend once Invoke grows execution
# timeouts ( which should be timeouts . execute )
"timeouts" : { "connect" : None } , "user" : get_local_user ( ) , }
merge_dicts ( defaults , ours )
return defaults
|
def _get_separated_values ( self , secondary = False ) :
"""Separate values between odd and even series stacked"""
|
series = self . secondary_series if secondary else self . series
positive_vals = map ( sum , zip ( * [ serie . safe_values for index , serie in enumerate ( series ) if index % 2 ] ) )
negative_vals = map ( sum , zip ( * [ serie . safe_values for index , serie in enumerate ( series ) if not index % 2 ] ) )
return list ( positive_vals ) , list ( negative_vals )
|
def read ( self , fileobj ) :
"""Return if all data could be read and the atom payload"""
|
fileobj . seek ( self . _dataoffset , 0 )
data = fileobj . read ( self . datalength )
return len ( data ) == self . datalength , data
|
def _forward ( self ) :
"""Advance to the next token .
Internal methods , updates :
- self . current _ token
- self . current _ pos
Raises :
MissingTokensError : when trying to advance beyond the end of the
token flow ."""
|
try :
self . current_token = next ( self . tokens )
except StopIteration :
raise MissingTokensError ( "Unexpected end of token stream at %d." % self . current_pos )
self . current_pos += 1
|
def _transform_prefix ( cls , root_node , create_group_func ) :
"""Yield all the regular expressions matching a prefix of the grammar
defined by the ` Node ` instance .
This can yield multiple expressions , because in the case of on OR
operation in the grammar , we can have another outcome depending on
which clause would appear first . E . g . " ( A | B ) C " is not the same as
" ( B | A ) C " because the regex engine is lazy and takes the first match .
However , because we the current input is actually a prefix of the
grammar which meight not yet contain the data for " C " , we need to know
both intermediate states , in order to call the appropriate
autocompletion for both cases .
: param root _ node : The : class : ` Node ` instance for which we generate the grammar .
: param create _ group _ func : A callable which takes a ` Node ` and returns the next
free name for this node ."""
|
def transform ( node ) : # Generate regexes for all permutations of this OR . Each node
# should be in front once .
if isinstance ( node , Any ) :
for c in node . children :
for r in transform ( c ) :
yield '(?:%s)?' % r
# For a sequence . We can either have a match for the sequence
# of all the children , or for an exact match of the first X
# children , followed by a partial match of the next children .
elif isinstance ( node , Sequence ) :
for i in range ( len ( node . children ) ) :
a = [ cls . _transform ( c , create_group_func ) for c in node . children [ : i ] ]
for c in transform ( node . children [ i ] ) :
yield '(?:%s)' % ( '' . join ( a ) + c )
elif isinstance ( node , Regex ) :
yield '(?:%s)?' % node . regex
elif isinstance ( node , Lookahead ) :
if node . negative :
yield '(?!%s)' % cls . _transform ( node . childnode , create_group_func )
else : # Not sure what the correct semantics are in this case .
# ( Probably it ' s not worth implementing this . )
raise Exception ( 'Positive lookahead not yet supported.' )
elif isinstance ( node , Variable ) : # ( Note that we should not append a ' ? ' here . the ' transform '
# method will already recursively do that . )
for c in transform ( node . childnode ) :
yield '(?P<%s>%s)' % ( create_group_func ( node ) , c )
elif isinstance ( node , Repeat ) : # If we have a repetition of 8 times . That would mean that the
# current input could have for instance 7 times a complete
# match , followed by a partial match .
prefix = cls . _transform ( node . childnode , create_group_func )
for c in transform ( node . childnode ) :
if node . max_repeat :
repeat_sign = '{,%i}' % ( node . max_repeat - 1 )
else :
repeat_sign = '*'
yield '(?:%s)%s%s(?:%s)?' % ( prefix , repeat_sign , ( '' if node . greedy else '?' ) , c )
else :
raise TypeError ( 'Got %r' % node )
for r in transform ( root_node ) :
yield '^%s$' % r
|
def no_selenium_errors ( func ) :
"""Decorator to create an ` EmptyPromise ` check function that is satisfied
only when ` func ` executes without a Selenium error .
This protects against many common test failures due to timing issues .
For example , accessing an element after it has been modified by JavaScript
ordinarily results in a ` StaleElementException ` . Methods decorated
with ` no _ selenium _ errors ` will simply retry if that happens , which makes tests
more robust .
Args :
func ( callable ) : The function to execute , with retries if an error occurs .
Returns :
Decorated function"""
|
def _inner ( * args , ** kwargs ) : # pylint : disable = missing - docstring
try :
return_val = func ( * args , ** kwargs )
except WebDriverException :
LOGGER . warning ( u'Exception ignored during retry loop:' , exc_info = True )
return False
else :
return return_val
return _inner
|
def _cleanup ( self ) :
"""Remove the connection from the stack , closing out the cursor"""
|
if self . _cursor :
LOGGER . debug ( 'Closing the cursor on %s' , self . pid )
self . _cursor . close ( )
self . _cursor = None
if self . _conn :
LOGGER . debug ( 'Freeing %s in the pool' , self . pid )
try :
pool . PoolManager . instance ( ) . free ( self . pid , self . _conn )
except pool . ConnectionNotFoundError :
pass
self . _conn = None
|
def handle_message ( self , stream , payload ) :
'''Handle incoming messages from underlying TCP streams
: stream ZMQStream stream : A ZeroMQ stream .
See http : / / zeromq . github . io / pyzmq / api / generated / zmq . eventloop . zmqstream . html
: param dict payload : A payload to process'''
|
try :
payload = self . serial . loads ( payload [ 0 ] )
payload = self . _decode_payload ( payload )
except Exception as exc :
exc_type = type ( exc ) . __name__
if exc_type == 'AuthenticationError' :
log . debug ( 'Minion failed to auth to master. Since the payload is ' 'encrypted, it is not known which minion failed to ' 'authenticate. It is likely that this is a transient ' 'failure due to the master rotating its public key.' )
else :
log . error ( 'Bad load from minion: %s: %s' , exc_type , exc )
stream . send ( self . serial . dumps ( 'bad load' ) )
raise tornado . gen . Return ( )
# TODO helper functions to normalize payload ?
if not isinstance ( payload , dict ) or not isinstance ( payload . get ( 'load' ) , dict ) :
log . error ( 'payload and load must be a dict. Payload was: %s and load was %s' , payload , payload . get ( 'load' ) )
stream . send ( self . serial . dumps ( 'payload and load must be a dict' ) )
raise tornado . gen . Return ( )
try :
id_ = payload [ 'load' ] . get ( 'id' , '' )
if str ( '\0' ) in id_ :
log . error ( 'Payload contains an id with a null byte: %s' , payload )
stream . send ( self . serial . dumps ( 'bad load: id contains a null byte' ) )
raise tornado . gen . Return ( )
except TypeError :
log . error ( 'Payload contains non-string id: %s' , payload )
stream . send ( self . serial . dumps ( 'bad load: id {0} is not a string' . format ( id_ ) ) )
raise tornado . gen . Return ( )
# intercept the " _ auth " commands , since the main daemon shouldn ' t know
# anything about our key auth
if payload [ 'enc' ] == 'clear' and payload . get ( 'load' , { } ) . get ( 'cmd' ) == '_auth' :
stream . send ( self . serial . dumps ( self . _auth ( payload [ 'load' ] ) ) )
raise tornado . gen . Return ( )
# TODO : test
try : # Take the payload _ handler function that was registered when we created the channel
# and call it , returning control to the caller until it completes
ret , req_opts = yield self . payload_handler ( payload )
except Exception as e : # always attempt to return an error to the minion
stream . send ( self . serial . dumps ( 'Some exception handling minion payload' ) )
log . error ( 'Some exception handling a payload from minion' , exc_info = True )
raise tornado . gen . Return ( )
req_fun = req_opts . get ( 'fun' , 'send' )
if req_fun == 'send_clear' :
stream . send ( self . serial . dumps ( ret ) )
elif req_fun == 'send' :
stream . send ( self . serial . dumps ( self . crypticle . dumps ( ret ) ) )
elif req_fun == 'send_private' :
stream . send ( self . serial . dumps ( self . _encrypt_private ( ret , req_opts [ 'key' ] , req_opts [ 'tgt' ] , ) ) )
else :
log . error ( 'Unknown req_fun %s' , req_fun )
# always attempt to return an error to the minion
stream . send ( self . serial . dumps ( 'Server-side exception handling payload' ) )
raise tornado . gen . Return ( )
|
def remove_pardir_symbols ( path , sep = os . sep , pardir = os . pardir ) :
"""Remove relative path symobls such as ' . . '
Args :
path ( str ) : A target path string
sep ( str ) : A strint to refer path delimiter ( Default : ` os . sep ` )
pardir ( str ) : A string to refer parent directory ( Default : ` os . pardir ` )
Returns :
str"""
|
bits = path . split ( sep )
bits = ( x for x in bits if x != pardir )
return sep . join ( bits )
|
def instance_cache ( cls , func ) :
"""Save the cache to ` self `
This decorator take it for granted that the decorated function
is a method . The first argument of the function is ` self ` .
: param func : function to decorate
: return : the decorator"""
|
@ functools . wraps ( func )
def func_wrapper ( * args , ** kwargs ) :
if not args :
raise ValueError ( '`self` is not available.' )
else :
the_self = args [ 0 ]
func_key = cls . get_key ( func )
val_cache = cls . get_self_cache ( the_self , func_key )
lock = cls . get_self_cache_lock ( the_self , func_key )
return cls . _get_value_from_cache ( func , val_cache , lock , * args , ** kwargs )
return func_wrapper
|
def add_cell ( preso , pos , width , height , padding = 1 , top_margin = 4 , left_margin = 2 ) :
"""Add a text frame to current slide"""
|
available_width = SLIDE_WIDTH
available_width -= left_margin * 2
available_width -= padding * ( width - 1 )
column_width = available_width / width
avail_height = SLIDE_HEIGHT
avail_height -= top_margin
avail_height -= padding * ( height - 1 )
column_height = avail_height / height
col_pos = int ( ( pos - 1 ) % width )
row_pos = int ( ( pos - 1 ) / width )
w = "{}cm" . format ( column_width )
h = "{}cm" . format ( column_height )
x = "{}cm" . format ( left_margin + ( col_pos * column_width + ( col_pos ) * padding ) )
y = "{}cm" . format ( top_margin + ( row_pos * column_height + ( row_pos ) * padding ) )
attr = { "presentation:class" : "outline" , "presentation:style-name" : "Default-outline1" , "svg:width" : w , "svg:height" : h , "svg:x" : x , "svg:y" : y , }
preso . slides [ - 1 ] . add_text_frame ( attr )
preso . slides [ - 1 ] . grid_w_h_x_y = ( w , h , x , y )
|
def _dispatch_trigger ( self , msg ) :
"""Dispatches the message to the corresponding method ."""
|
if not msg . args [ 0 ] . startswith ( self . trigger_char ) :
return
split_args = msg . args [ 0 ] . split ( )
trigger = split_args [ 0 ] . lstrip ( self . trigger_char )
if trigger in self . triggers :
method = getattr ( self , trigger )
if msg . command == PRIVMSG :
if msg . dst == self . irc . nick :
if EVT_PRIVATE in self . triggers [ trigger ] :
msg . event = EVT_PRIVATE
method ( msg )
else :
if EVT_PUBLIC in self . triggers [ trigger ] :
msg . event = EVT_PUBLIC
method ( msg )
elif ( msg . command == NOTICE ) and ( EVT_NOTICE in self . triggers [ trigger ] ) :
msg . event = EVT_NOTICE
method ( msg )
|
def synthesize_multiple ( self , text_file , output_file_path , quit_after = None , backwards = False ) :
"""Synthesize the text contained in the given fragment list
into a WAVE file .
Return a tuple ( anchors , total _ time , num _ chars ) .
Concrete subclasses must implement at least one
of the following private functions :
1 . ` ` _ synthesize _ multiple _ python ( ) ` `
2 . ` ` _ synthesize _ multiple _ c _ extension ( ) ` `
3 . ` ` _ synthesize _ multiple _ subprocess ( ) ` `
: param text _ file : the text file to be synthesized
: type text _ file : : class : ` ~ aeneas . textfile . TextFile `
: param string output _ file _ path : the path to the output audio file
: param quit _ after : stop synthesizing as soon as
reaching this many seconds
: type quit _ after : : class : ` ~ aeneas . exacttiming . TimeValue `
: param bool backwards : if > 0 , synthesize from the end of the text file
: rtype : tuple ( anchors , total _ time , num _ chars )
: raises : TypeError : if ` ` text _ file ` ` is ` ` None ` ` or
one of the text fragments is not a Unicode string
: raises : ValueError : if ` ` self . rconf [ RuntimeConfiguration . ALLOW _ UNLISTED _ LANGUAGES ] ` ` is ` ` False ` `
and a fragment has a language code not supported by the TTS engine , or
if ` ` text _ file ` ` has no fragments or all its fragments are empty
: raises : OSError : if output file cannot be written to ` ` output _ file _ path ` `
: raises : RuntimeError : if both the C extension and
the pure Python code did not succeed ."""
|
if text_file is None :
self . log_exc ( u"text_file is None" , None , True , TypeError )
if len ( text_file ) < 1 :
self . log_exc ( u"The text file has no fragments" , None , True , ValueError )
if text_file . chars == 0 :
self . log_exc ( u"All fragments in the text file are empty" , None , True , ValueError )
if not self . rconf [ RuntimeConfiguration . ALLOW_UNLISTED_LANGUAGES ] :
for fragment in text_file . fragments :
if fragment . language not in self . LANGUAGE_TO_VOICE_CODE :
self . log_exc ( u"Language '%s' is not supported by the selected TTS engine" % ( fragment . language ) , None , True , ValueError )
for fragment in text_file . fragments :
for line in fragment . lines :
if not gf . is_unicode ( line ) :
self . log_exc ( u"The text file contain a line which is not a Unicode string" , None , True , TypeError )
# log parameters
if quit_after is not None :
self . log ( [ u"Quit after reaching %.3f" , quit_after ] )
if backwards :
self . log ( u"Synthesizing backwards" )
# check that output _ file _ path can be written
if not gf . file_can_be_written ( output_file_path ) :
self . log_exc ( u"Cannot write to output file '%s'" % ( output_file_path ) , None , True , OSError )
# first , call Python function _ synthesize _ multiple _ python ( ) if available
if self . HAS_PYTHON_CALL :
self . log ( u"Calling TTS engine via Python" )
try :
computed , result = self . _synthesize_multiple_python ( text_file , output_file_path , quit_after , backwards )
if computed :
self . log ( u"The _synthesize_multiple_python call was successful, returning anchors" )
return result
else :
self . log ( u"The _synthesize_multiple_python call failed" )
except Exception as exc :
self . log_exc ( u"An unexpected error occurred while calling _synthesize_multiple_python" , exc , False , None )
# call _ synthesize _ multiple _ c _ extension ( ) or _ synthesize _ multiple _ subprocess ( )
self . log ( u"Calling TTS engine via C extension or subprocess" )
c_extension_function = self . _synthesize_multiple_c_extension if self . HAS_C_EXTENSION_CALL else None
subprocess_function = self . _synthesize_multiple_subprocess if self . HAS_SUBPROCESS_CALL else None
return gf . run_c_extension_with_fallback ( self . log , self . C_EXTENSION_NAME , c_extension_function , subprocess_function , ( text_file , output_file_path , quit_after , backwards ) , rconf = self . rconf )
|
def resume ( config_path : str , restore_from : Optional [ str ] , cl_arguments : Iterable [ str ] , output_root : str ) -> None :
"""Load config from the directory specified and start the training .
: param config _ path : path to the config file or the directory in which it is stored
: param restore _ from : backend - specific path to the already trained model to be restored from .
If ` ` None ` ` is passed , it is inferred from the configuration file location as the directory
it is located in .
: param cl _ arguments : additional command line arguments which will update the configuration
: param output _ root : output root in which the training directory will be created"""
|
config = None
try :
config_path = find_config ( config_path )
restore_from = restore_from or path . dirname ( config_path )
config = load_config ( config_file = config_path , additional_args = cl_arguments )
validate_config ( config )
logging . debug ( '\tLoaded config: %s' , config )
except Exception as ex : # pylint : disable = broad - except
fallback ( 'Loading config failed' , ex )
run ( config = config , output_root = output_root , restore_from = restore_from )
|
def Call ( self , position , function_call ) :
"""Perform a function call in the inferior .
WARNING : Since Gdb ' s concept of threads can ' t be directly identified with
python threads , the function call will be made from what has to be assumed
is an arbitrary thread . This * will * interrupt the inferior . Continuing it
after the call is the responsibility of the caller .
Args :
position : the context of the inferior to call the function from .
function _ call : A string corresponding to a function call . Format :
' foo ( 0,0 ) '
Returns :
Thre return value of the called function ."""
|
self . EnsureGdbPosition ( position [ 0 ] , None , None )
if not gdb . selected_thread ( ) . is_stopped ( ) :
self . Interrupt ( position )
result_value = gdb . parse_and_eval ( function_call )
return self . _UnpackGdbVal ( result_value )
|
def Axn ( mt , x , n ) :
"""( A ^ 1 ) x : n : Returns the EPV ( net single premium ) of a term insurance ."""
|
return ( mt . Mx [ x ] - mt . Mx [ x + n ] ) / mt . Dx [ x ]
|
def __match_ancestry ( self , ancestry ) :
"""Find frames matching the given ancestry .
Returns a tuple containing the following :
* Topmost frame matching the given ancestry or the bottom - most sentry
frame if no frame matches .
* Unmatched ancestry part ."""
|
stack = self . __stack
if len ( stack ) == 1 :
return stack [ 0 ] , ancestry
previous = stack [ 0 ]
for frame , n in zip ( stack [ 1 : ] , xrange ( len ( ancestry ) ) ) :
if frame . id ( ) is not ancestry [ n ] :
return previous , ancestry [ n : ]
previous = frame
return frame , ancestry [ n + 1 : ]
|
def close ( self ) :
"""Close ( destroy ) this USB context , and all related instances .
When this method has been called , methods on its instance will
become mosty no - ops , returning None until explicitly re - opened
( by calling open ( ) or _ _ enter _ _ ( ) ) .
Note : " exit " is a deprecated alias of " close " ."""
|
self . __auto_open = False
self . __context_cond . acquire ( )
try :
while self . __context_refcount and self . __context_p :
self . __context_cond . wait ( )
self . _exit ( )
finally :
self . __context_cond . notifyAll ( )
self . __context_cond . release ( )
|
def key_from_password ( password ) :
"""This method just hashes self . password ."""
|
if isinstance ( password , unicode ) :
password = password . encode ( 'utf-8' )
if not isinstance ( password , bytes ) :
raise TypeError ( "password must be byte string, not %s" % type ( password ) )
sha = SHA256 . new ( )
sha . update ( password )
return sha . digest ( )
|
def format_html ( format_string , * args , ** kwargs ) :
"""Similar to str . format , but passes all arguments through conditional _ escape ,
and calls ' mark _ safe ' on the result . This function should be used instead
of str . format or % interpolation to build up small HTML fragments ."""
|
args_safe = map ( conditional_escape , args )
kwargs_safe = dict ( [ ( k , conditional_escape ( v ) ) for ( k , v ) in six . iteritems ( kwargs ) ] )
return mark_safe ( format_string . format ( * args_safe , ** kwargs_safe ) )
|
def AddTrainingOperators ( model , softmax , label ) :
"""Adds training operators to the model ."""
|
xent = model . LabelCrossEntropy ( [ softmax , label ] , 'xent' )
# compute the expected loss
loss = model . AveragedLoss ( xent , "loss" )
# track the accuracy of the model
AddAccuracy ( model , softmax , label )
# use the average loss we just computed to add gradient operators to the
# model
model . AddGradientOperators ( [ loss ] )
# do a simple stochastic gradient descent
ITER = brew . iter ( model , "iter" )
# set the learning rate schedule
LR = model . LearningRate ( ITER , "LR" , base_lr = - 0.1 , policy = "step" , stepsize = 1 , gamma = 0.999 )
# ONE is a constant value that is used in the gradient update . We only need
# to create it once , so it is explicitly placed in param _ init _ net .
ONE = model . param_init_net . ConstantFill ( [ ] , "ONE" , shape = [ 1 ] , value = 1.0 )
# Now , for each parameter , we do the gradient updates .
for param in model . params : # Note how we get the gradient of each parameter - ModelHelper keeps
# track of that .
param_grad = model . param_to_grad [ param ]
# The update is a simple weighted sum : param = param + param _ grad * LR
model . WeightedSum ( [ param , ONE , param_grad , LR ] , param )
|
def rename_fmapm ( bids_base , basename ) :
'''Rename magnitude fieldmap file to BIDS specification'''
|
files = dict ( )
for ext in [ 'nii.gz' , 'json' ] :
for echo in [ 1 , 2 ] :
fname = '{0}_e{1}.{2}' . format ( basename , echo , ext )
src = os . path . join ( bids_base , 'fmap' , fname )
if os . path . exists ( src ) :
dst = src . replace ( 'magnitude_e{0}' . format ( echo ) , 'magnitude{0}' . format ( echo ) )
logger . debug ( 'renaming %s to %s' , src , dst )
os . rename ( src , dst )
files [ ext ] = dst
return files
|
def importpath ( path , error_text = None ) :
"""Import value by specified ` ` path ` ` .
Value can represent module , class , object , attribute or method .
If ` ` error _ text ` ` is not None and import will
raise ImproperlyConfigured with user friendly text ."""
|
result = None
attrs = [ ]
parts = path . split ( '.' )
exception = None
while parts :
try :
result = __import__ ( '.' . join ( parts ) , { } , { } , [ '' ] )
except ImportError as e :
if exception is None :
exception = e
attrs = parts [ - 1 : ] + attrs
parts = parts [ : - 1 ]
else :
break
for attr in attrs :
try :
result = getattr ( result , attr )
except ( AttributeError , ValueError ) as e :
if error_text is not None :
raise ImproperlyConfigured ( 'Error: %s can import "%s"' % ( error_text , path ) )
else :
raise exception
return result
|
def disconnect ( self , sid , namespace ) :
"""Register a client disconnect from a namespace ."""
|
if namespace not in self . rooms :
return
rooms = [ ]
for room_name , room in six . iteritems ( self . rooms [ namespace ] . copy ( ) ) :
if sid in room :
rooms . append ( room_name )
for room in rooms :
self . leave_room ( sid , namespace , room )
if sid in self . callbacks and namespace in self . callbacks [ sid ] :
del self . callbacks [ sid ] [ namespace ]
if len ( self . callbacks [ sid ] ) == 0 :
del self . callbacks [ sid ]
if namespace in self . pending_disconnect and sid in self . pending_disconnect [ namespace ] :
self . pending_disconnect [ namespace ] . remove ( sid )
if len ( self . pending_disconnect [ namespace ] ) == 0 :
del self . pending_disconnect [ namespace ]
|
def __register_notification ( self , prop_name , method , kwargs ) :
"""Internal service which associates the given property name
to the method , and the ( prop _ name , method ) with the given
kwargs dictionary . If needed merges the dictionary , if the
given ( prop _ name , method ) pair was already registered ( in this
case the last registration wins in case of overlapping . )
If given prop _ name and method have been already registered , a
ValueError exception is raised ."""
|
key = ( prop_name , method )
if key in self . __PAT_METH_TO_KWARGS :
raise ValueError ( "In class %s method '%s' has been declared " "to be a notification for pattern '%s' " "multiple times (only one is allowed)." % ( self . __class__ , method . __name__ , prop_name ) )
if frozenset ( prop_name ) & WILDCARDS : # checks that at most one pattern is specified per - method :
# ( see ticket : 31 # comment : 7 and following )
if ( method in self . __METH_TO_PAT or ( method in self . __METH_TO_PROPS and self . __METH_TO_PROPS [ method ] ) ) :
raise ValueError ( "In class %s multiple patterns have been " "used to declare method '%s' to be a " "notification (only one is allowed.)" % ( self . __class__ , method . __name__ ) )
# for the sake of efficiency , method to patterns map is kept
self . __METH_TO_PAT [ method ] = prop_name
# the name contains wildcards
_dict = self . __PAT_TO_METHS
else : # check that it was not used for patterns
if method in self . __METH_TO_PAT :
raise ValueError ( "In class %s multiple patterns have been " "used to declare method '%s' to be a " "notification (only one is allowed.)" % ( self . __class__ , method . __name__ ) )
_dict = self . __PROP_TO_METHS
if method not in self . __METH_TO_PROPS :
self . __METH_TO_PROPS [ method ] = set ( )
self . __METH_TO_PROPS [ method ] . add ( prop_name )
# fills the internal structures
if prop_name not in _dict :
_dict [ prop_name ] = set ( )
_dict [ prop_name ] . add ( method )
self . __PAT_METH_TO_KWARGS [ key ] = kwargs
|
def set_ylabel ( self , s , panel = None ) :
"set plot xlabel"
|
if panel is None :
panel = self . current_panel
self . panels [ panel ] . set_ylabel ( s )
|
def split_by_line ( content ) :
"""Split the given content into a list of items by newline .
Both \r \n and \n are supported . This is done since it seems
that TTY devices on POSIX systems use \r \n for newlines in
some instances .
If the given content is an empty string or a string of only
whitespace , an empty list will be returned . If the given
content does not contain any newlines , it will be returned
as the only element in a single item list .
Leading and trailing whitespace is remove from all elements
returned .
: param str content : Content to split by newlines
: return : List of items that were separated by newlines .
: rtype : list"""
|
# Make sure we don ' t end up splitting a string with
# just a single trailing \ n or \ r \ n into multiple parts .
stripped = content . strip ( )
if not stripped :
return [ ]
if '\r\n' in stripped :
return _strip_all ( stripped . split ( '\r\n' ) )
if '\n' in stripped :
return _strip_all ( stripped . split ( '\n' ) )
return _strip_all ( [ stripped ] )
|
def do_stop_cluster ( self , cluster ) :
"""Completely stop the cluster
Usage :
> stop _ cluster < cluster >"""
|
try :
cluster = api . get_cluster ( cluster )
cluster . stop ( )
print ( "Stopping Cluster" )
except ApiException :
print ( "Cluster not found" )
return None
|
def update_labels ( repo ) :
"""Update labels ."""
|
updated = set ( )
for label in repo . get_labels ( ) :
edit = find_label ( label . name , label . color , label . description )
if edit is not None :
print ( ' Updating {}: #{} "{}"' . format ( edit . new , edit . color , edit . description ) )
label . edit ( edit . new , edit . color , edit . description )
updated . add ( edit . old )
updated . add ( edit . new )
else :
if DELETE_UNSPECIFIED :
print ( ' Deleting {}: #{} "{}"' . format ( label . name , label . color , label . description ) )
label . delete ( )
else :
print ( ' Skipping {}: #{} "{}"' . format ( label . name , label . color , label . description ) )
updated . add ( label . name )
for name , values in label_list . items ( ) :
color , description = values
if isinstance ( name , tuple ) :
new_name = name [ 1 ]
else :
new_name = name
if new_name not in updated :
print ( ' Creating {}: #{} "{}"' . format ( new_name , color , description ) )
repo . create_label ( new_name , color , description )
|
def get_or_create_votes ( self , row , division , candidate_election ) :
"""Gets or creates the Vote object for the given row of AP data ."""
|
vote . Votes . objects . get_or_create ( division = division , count = row [ "votecount" ] , pct = row [ "votepct" ] , winning = row [ "winner" ] , runoff = row [ "runoff" ] , candidate_election = candidate_election , )
|
def decrypt_pillar ( self , pillar ) :
'''Decrypt the specified pillar dictionary items , if configured to do so'''
|
errors = [ ]
if self . opts . get ( 'decrypt_pillar' ) :
decrypt_pillar = self . opts [ 'decrypt_pillar' ]
if not isinstance ( decrypt_pillar , dict ) :
decrypt_pillar = salt . utils . data . repack_dictlist ( self . opts [ 'decrypt_pillar' ] )
if not decrypt_pillar :
errors . append ( 'decrypt_pillar config option is malformed' )
for key , rend in six . iteritems ( decrypt_pillar ) :
ptr = salt . utils . data . traverse_dict ( pillar , key , default = None , delimiter = self . opts [ 'decrypt_pillar_delimiter' ] )
if ptr is None :
log . debug ( 'Pillar key %s not present' , key )
continue
try :
hash ( ptr )
immutable = True
except TypeError :
immutable = False
try :
ret = salt . utils . crypt . decrypt ( ptr , rend or self . opts [ 'decrypt_pillar_default' ] , renderers = self . rend , opts = self . opts , valid_rend = self . opts [ 'decrypt_pillar_renderers' ] )
if immutable : # Since the key pointed to an immutable type , we need
# to replace it in the pillar dict . First we will find
# the parent , and then we will replace the child key
# with the return data from the renderer .
parent , _ , child = key . rpartition ( self . opts [ 'decrypt_pillar_delimiter' ] )
if not parent : # key is a top - level key , so the pointer to the
# parent is the pillar dict itself .
ptr = pillar
else :
ptr = salt . utils . data . traverse_dict ( pillar , parent , default = None , delimiter = self . opts [ 'decrypt_pillar_delimiter' ] )
if ptr is not None :
ptr [ child ] = ret
except Exception as exc :
msg = 'Failed to decrypt pillar key \'{0}\': {1}' . format ( key , exc )
errors . append ( msg )
log . error ( msg , exc_info = True )
return errors
|
def _prepare_fetch ( self , request : Request , response : Response ) :
'''Prepare for a fetch .
Coroutine .'''
|
self . _request = request
self . _response = response
yield from self . _init_stream ( )
connection_closed = self . _control_connection . closed ( )
if connection_closed :
self . _login_table . pop ( self . _control_connection , None )
yield from self . _control_stream . reconnect ( )
request . address = self . _control_connection . address
connection_reused = not connection_closed
self . event_dispatcher . notify ( self . Event . begin_control , request , connection_reused = connection_reused )
if connection_closed :
yield from self . _commander . read_welcome_message ( )
yield from self . _log_in ( )
self . _response . request = request
|
def WriteMessageHandlerRequests ( self , requests , cursor = None ) :
"""Writes a list of message handler requests to the database ."""
|
query = ( "INSERT IGNORE INTO message_handler_requests " "(handlername, request_id, request) VALUES " )
value_templates = [ ]
args = [ ]
for r in requests :
args . extend ( [ r . handler_name , r . request_id , r . SerializeToString ( ) ] )
value_templates . append ( "(%s, %s, %s)" )
query += "," . join ( value_templates )
cursor . execute ( query , args )
|
def _KillProcess ( self , pid ) :
"""Issues a SIGKILL or equivalent to the process .
Args :
pid ( int ) : process identifier ( PID ) ."""
|
if sys . platform . startswith ( 'win' ) :
process_terminate = 1
handle = ctypes . windll . kernel32 . OpenProcess ( process_terminate , False , pid )
ctypes . windll . kernel32 . TerminateProcess ( handle , - 1 )
ctypes . windll . kernel32 . CloseHandle ( handle )
else :
try :
os . kill ( pid , signal . SIGKILL )
except OSError as exception :
logger . error ( 'Unable to kill process {0:d} with error: {1!s}' . format ( pid , exception ) )
|
def on_train_begin ( self , ** kwargs : Any ) -> None :
"Initialize inner arguments ."
|
self . wait , self . opt = 0 , self . learn . opt
super ( ) . on_train_begin ( ** kwargs )
|
def get_links ( html , outformat ) :
"""Return a list of reference links from the html .
Parameters
html : str
outformat : int
the output format of the citations
Returns
List [ str ]
the links to the references"""
|
if outformat == FORMAT_BIBTEX :
refre = re . compile ( r'<a href="https://scholar.googleusercontent.com(/scholar\.bib\?[^"]*)' )
elif outformat == FORMAT_ENDNOTE :
refre = re . compile ( r'<a href="https://scholar.googleusercontent.com(/scholar\.enw\?[^"]*)"' )
elif outformat == FORMAT_REFMAN :
refre = re . compile ( r'<a href="https://scholar.googleusercontent.com(/scholar\.ris\?[^"]*)"' )
elif outformat == FORMAT_WENXIANWANG :
refre = re . compile ( r'<a href="https://scholar.googleusercontent.com(/scholar\.ral\?[^"]*)"' )
reflist = refre . findall ( html )
# escape html entities
reflist = [ re . sub ( '&(%s);' % '|' . join ( name2codepoint ) , lambda m : chr ( name2codepoint [ m . group ( 1 ) ] ) , s ) for s in reflist ]
return reflist
|
def show_disk ( kwargs = None , conn = None , call = None ) :
'''. . versionadded : : 2015.8.0
Return information about a disk
CLI Example :
. . code - block : : bash
salt - cloud - f show _ disk my - azure name = my _ disk'''
|
if call != 'function' :
raise SaltCloudSystemExit ( 'The get_disk function must be called with -f or --function.' )
if not conn :
conn = get_conn ( )
if kwargs is None :
kwargs = { }
if 'name' not in kwargs :
raise SaltCloudSystemExit ( 'A name must be specified as "name"' )
data = conn . get_disk ( kwargs [ 'name' ] )
return object_to_dict ( data )
|
def zip_unicode ( output , version ) :
"""Zip the Unicode files ."""
|
zipper = zipfile . ZipFile ( os . path . join ( output , 'unicodedata' , '%s.zip' % version ) , 'w' , zipfile . ZIP_DEFLATED )
target = os . path . join ( output , 'unicodedata' , version )
print ( 'Zipping %s.zip...' % version )
for root , dirs , files in os . walk ( target ) :
for file in files :
if file . endswith ( '.txt' ) :
zipper . write ( os . path . join ( root , file ) , arcname = file )
|
def filter_convolve_stack ( data , filters , filter_rot = False , method = 'scipy' ) :
r"""Filter convolve
This method convolves the a stack of input images with the wavelet filters
Parameters
data : np . ndarray
Input data , 3D array
filters : np . ndarray
Wavelet filters , 3D array
filter _ rot : bool , optional
Option to rotate wavelet filters ( default is ' False ' )
method : str { ' astropy ' , ' scipy ' } , optional
Convolution method ( default is ' scipy ' )
Returns
np . ndarray convolved data
Examples
> > > from modopt . signal . wavelet import filter _ convolve _ stack
> > > x = np . arange ( 9 ) . reshape ( 3 , 3 ) . astype ( float )
> > > filter _ convolve ( x , x )
array ( [ [ [ 4 . , 1 . , 4 . ] ,
[ 13 . , 10 . , 13 . ] ,
[ 22 . , 19 . , 22 . ] ] ,
[ [ 13 . , 10 . , 13 . ] ,
[ 49 . , 46 . , 49 . ] ,
[ 85 . , 82 . , 85 . ] ] ,
[ [ 22 . , 19 . , 22 . ] ,
[ 85 . , 82 . , 85 . ] ,
[ 148 . , 145 . , 148 . ] ] ] )"""
|
# Return the convolved data cube .
return np . array ( [ filter_convolve ( x , filters , filter_rot = filter_rot , method = method ) for x in data ] )
|
def format_throughput ( available , used = None ) :
"""Format the read / write throughput for display"""
|
if used is None :
return str ( available )
percent = float ( used ) / available
return "{0:.0f}/{1:.0f} ({2:.0%})" . format ( used , available , percent )
|
def distinguish ( self , how = True ) :
"""Distinguishes this thing ( POST ) . Calls : meth : ` narwal . Reddit . distinguish ` .
: param how : either True , False , or ' admin '"""
|
return self . _reddit . distinguish ( self . name , how = how )
|
async def _async_start ( self , auto_register = True ) :
"""Starts the agent from a coroutine . This fires some actions :
* if auto _ register : register the agent in the server
* runs the event loop
* connects the agent to the server
* runs the registered behaviours
Args :
auto _ register ( bool , optional ) : register the agent in the server ( Default value = True )"""
|
if auto_register :
await self . _async_register ( )
self . client = aioxmpp . PresenceManagedClient ( self . jid , aioxmpp . make_security_layer ( self . password , no_verify = not self . verify_security ) , loop = self . loop , logger = logging . getLogger ( self . jid . localpart ) )
# obtain an instance of the service
self . message_dispatcher = self . client . summon ( SimpleMessageDispatcher )
# Presence service
self . presence = PresenceManager ( self )
await self . _async_connect ( )
# register a message callback here
self . message_dispatcher . register_callback ( aioxmpp . MessageType . CHAT , None , self . _message_received , )
await self . setup ( )
self . _alive . set ( )
for behaviour in self . behaviours :
if not behaviour . is_running :
behaviour . start ( )
|
def _reassign_vd_dirrecord_extents ( vd , current_extent ) : # type : ( headervd . PrimaryOrSupplementaryVD , int ) - > Tuple [ int , List [ inode . Inode ] ]
'''An internal helper method for reassign _ extents that assigns extents to
directory records for the passed in Volume Descriptor . The current
extent is passed in , and this function returns the extent after the
last one it assigned .
Parameters :
vd - The volume descriptor on which to operate .
current _ extent - The current extent before assigning extents to the
volume descriptor directory records .
Returns :
The current extent after assigning extents to the volume descriptor
directory records .'''
|
log_block_size = vd . logical_block_size ( )
# Here we re - walk the entire tree , re - assigning extents as necessary .
root_dir_record = vd . root_directory_record ( )
root_dir_record . set_data_location ( current_extent , 0 )
current_extent += utils . ceiling_div ( root_dir_record . data_length , log_block_size )
# Walk through the list , assigning extents to all of the directories .
child_link_recs = [ ]
# type : List [ dr . DirectoryRecord ]
parent_link_recs = [ ]
# type : List [ dr . DirectoryRecord ]
file_list = [ ]
ptr_index = 1
dirs = collections . deque ( [ root_dir_record ] )
while dirs :
dir_record = dirs . popleft ( )
if dir_record . is_root : # The root directory record doesn ' t need an extent assigned ,
# so just add its children to the list and continue on
for child in dir_record . children :
if child . ptr is not None :
child . ptr . update_parent_directory_number ( ptr_index )
ptr_index += 1
dirs . extend ( dir_record . children )
continue
dir_record_parent = dir_record . parent
if dir_record_parent is None :
raise pycdlibexception . PyCdlibInternalError ( 'Parent of record is empty, this should never happen' )
if dir_record . is_dot ( ) :
dir_record . set_data_location ( dir_record_parent . extent_location ( ) , 0 )
continue
dir_record_rock_ridge = dir_record . rock_ridge
if dir_record . is_dotdot ( ) :
if dir_record_parent . is_root : # Special case of the root directory record . In this case , we
# set the dotdot extent location to the same as the root .
dir_record . set_data_location ( dir_record_parent . extent_location ( ) , 0 )
continue
if dir_record_parent . parent is None :
raise pycdlibexception . PyCdlibInternalError ( 'Grandparent of record is empty, this should never happen' )
dir_record . set_data_location ( dir_record_parent . parent . extent_location ( ) , 0 )
# Now that we ' ve set the data location , move around the Rock Ridge
# links if necessary .
if dir_record_rock_ridge is not None :
if dir_record_rock_ridge . parent_link is not None :
parent_link_recs . append ( dir_record )
if dir_record_parent . rock_ridge is not None :
if dir_record_parent . parent is not None :
if dir_record_parent . parent . is_root :
source_dr = dir_record_parent . parent . children [ 0 ]
else :
source_dr = dir_record_parent . parent
if source_dr is None or source_dr . rock_ridge is None :
raise pycdlibexception . PyCdlibInternalError ( 'Expected directory record to have Rock Ridge' )
dir_record_rock_ridge . copy_file_links ( source_dr . rock_ridge )
continue
if dir_record . is_dir ( ) :
dir_record . set_data_location ( current_extent , current_extent )
for child in dir_record . children :
if child . ptr is not None :
child . ptr . update_parent_directory_number ( ptr_index )
ptr_index += 1
if dir_record_rock_ridge is None or not dir_record_rock_ridge . child_link_record_exists ( ) :
current_extent += utils . ceiling_div ( dir_record . data_length , log_block_size )
dirs . extend ( dir_record . children )
else :
if dir_record . data_length == 0 or ( dir_record_rock_ridge is not None and ( dir_record_rock_ridge . child_link_record_exists ( ) or dir_record_rock_ridge . is_symlink ( ) ) ) : # If this is a child link record , the extent location really
# doesn ' t matter , since it is fake . We set it to zero .
dir_record . set_data_location ( 0 , 0 )
else :
if dir_record . inode is not None :
file_list . append ( dir_record . inode )
if dir_record_rock_ridge is not None :
if dir_record_rock_ridge . dr_entries . ce_record is not None and dir_record_rock_ridge . ce_block is not None :
if dir_record_rock_ridge . ce_block . extent_location ( ) < 0 :
dir_record_rock_ridge . ce_block . set_extent_location ( current_extent )
current_extent += 1
dir_record_rock_ridge . dr_entries . ce_record . update_extent ( dir_record_rock_ridge . ce_block . extent_location ( ) )
if dir_record_rock_ridge . cl_to_moved_dr is not None :
child_link_recs . append ( dir_record )
# After we have reshuffled the extents , we need to update the rock ridge
# links .
for ch in child_link_recs :
if ch . rock_ridge is not None :
ch . rock_ridge . child_link_update_from_dirrecord ( )
for p in parent_link_recs :
if p . rock_ridge is not None :
p . rock_ridge . parent_link_update_from_dirrecord ( )
return current_extent , file_list
|
def parse_named_unicode ( self , i ) :
"""Parse named Unicode ."""
|
value = ord ( _unicodedata . lookup ( self . get_named_unicode ( i ) ) )
single = self . get_single_stack ( )
if self . span_stack :
text = self . convert_case ( chr ( value ) , self . span_stack [ - 1 ] )
value = ord ( self . convert_case ( text , single ) ) if single is not None else ord ( text )
elif single :
value = ord ( self . convert_case ( chr ( value ) , single ) )
if self . use_format and value in _CURLY_BRACKETS_ORD :
self . handle_format ( chr ( value ) , i )
elif value <= 0xFF :
self . result . append ( '\\%03o' % value )
else :
self . result . append ( chr ( value ) )
|
def _start_transmit ( self , stream ) :
"""Mark the : attr : ` transmit _ side < Stream . transmit _ side > ` on ` stream ` as
ready for writing . Must only be called from the Broker thread . When the
associated file descriptor becomes ready for writing ,
: meth : ` BasicStream . on _ transmit ` will be called ."""
|
_vv and IOLOG . debug ( '%r._start_transmit(%r)' , self , stream )
side = stream . transmit_side
assert side and side . fd is not None
self . poller . start_transmit ( side . fd , ( side , stream . on_transmit ) )
|
def _RetryLoop ( self , func , timeout = None ) :
"""Retries an operation until success or deadline .
Args :
func : The function to run . Must take a timeout , in seconds , as a single
parameter . If it raises grpc . RpcError and deadline has not be reached ,
it will be run again .
timeout : Retries will continue until timeout seconds have passed ."""
|
timeout = timeout or self . DEFAULT_TIMEOUT
deadline = time . time ( ) + timeout
sleep = 1
while True :
try :
return func ( timeout )
except grpc . RpcError :
if time . time ( ) + sleep > deadline :
raise
time . sleep ( sleep )
sleep *= 2
timeout = deadline - time . time ( )
|
def page_get_textblocks ( infile , pageno , xmltext , height ) :
"""Get text boxes out of Ghostscript txtwrite xml"""
|
root = xmltext
if not hasattr ( xmltext , 'findall' ) :
return [ ]
def blocks ( ) :
for span in root . findall ( './/span' ) :
bbox_str = span . attrib [ 'bbox' ]
font_size = span . attrib [ 'size' ]
pts = [ int ( pt ) for pt in bbox_str . split ( ) ]
pts [ 1 ] = pts [ 1 ] - int ( float ( font_size ) + 0.5 )
bbox_topdown = tuple ( pts )
bb = bbox_topdown
bbox_bottomup = ( bb [ 0 ] , height - bb [ 3 ] , bb [ 2 ] , height - bb [ 1 ] )
yield bbox_bottomup
def joined_blocks ( ) :
prev = None
for bbox in blocks ( ) :
if prev is None :
prev = bbox
if bbox [ 1 ] == prev [ 1 ] and bbox [ 3 ] == prev [ 3 ] :
gap = prev [ 2 ] - bbox [ 0 ]
height = abs ( bbox [ 3 ] - bbox [ 1 ] )
if gap < height : # Join boxes
prev = ( prev [ 0 ] , prev [ 1 ] , bbox [ 2 ] , bbox [ 3 ] )
continue
# yield previously joined bboxes and start anew
yield prev
prev = bbox
if prev is not None :
yield prev
return [ block for block in joined_blocks ( ) ]
|
def matches ( self , filter_props ) :
"""Check if the filter matches the supplied properties ."""
|
if filter_props is None :
return False
found_one = False
for key , value in filter_props . items ( ) :
if key in self . properties and value != self . properties [ key ] :
return False
elif key in self . properties and value == self . properties [ key ] :
found_one = True
return found_one
|
def get_branch ( db , root_hash , key ) :
"""Get a long - format Merkle branch"""
|
validate_is_bytes ( key )
return tuple ( _get_branch ( db , root_hash , encode_to_bin ( key ) ) )
|
def eventChunk ( key , lines ) :
"""Parse EVENT chunks"""
|
# # NOTE : RADAR file format not supported currently .
# # TODO : Add Support for RADAR file format type values
# Contants
KEYWORDS = ( 'EVENT' , 'NRPDS' , 'NRGAG' , 'COORD' , 'GAGES' , 'ACCUM' , 'RATES' , 'RADAR' )
NUM_CARDS = ( 'NRPDS' , 'NRGAG' )
VALUE_CARDS = ( 'GAGES' , 'ACCUM' , 'RATES' , 'RADAR' )
# Define result object
result = { 'description' : None , 'nrgag' : None , 'nrpds' : None , 'coords' : [ ] , 'valLines' : [ ] }
chunks = pt . chunk ( KEYWORDS , lines )
# Parse chunks associated with each key
for card , chunkList in iteritems ( chunks ) : # Parse each chunk in the chunk list
for chunk in chunkList :
schunk = chunk [ 0 ] . strip ( ) . split ( )
# Cases
if card == 'EVENT' : # EVENT handler
schunk = pt . splitLine ( chunk [ 0 ] )
result [ 'description' ] = schunk [ 1 ]
elif card in NUM_CARDS : # Num cards handler
result [ card . lower ( ) ] = schunk [ 1 ]
elif card == 'COORD' : # COORD handler
schunk = pt . splitLine ( chunk [ 0 ] )
try : # Extract the event description
desc = schunk [ 3 ]
except : # Handle case where the event description is blank
desc = ""
coord = { 'x' : schunk [ 1 ] , 'y' : schunk [ 2 ] , 'description' : desc }
result [ 'coords' ] . append ( coord )
elif card in VALUE_CARDS : # Value cards handler
# Extract DateTime
dateTime = datetime ( year = int ( schunk [ 1 ] ) , month = int ( schunk [ 2 ] ) , day = int ( schunk [ 3 ] ) , hour = int ( schunk [ 4 ] ) , minute = int ( schunk [ 5 ] ) )
# Compile values into a list
values = [ ]
for index in range ( 6 , len ( schunk ) ) :
values . append ( schunk [ index ] )
valueLine = { 'type' : schunk [ 0 ] , 'dateTime' : dateTime , 'values' : values }
result [ 'valLines' ] . append ( valueLine )
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.