signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def distance_matrix ( coords_a , coords_b , cutoff , periodic = False , method = "simple" ) :
"""Calculate distances matrix the array of coordinates * coord _ a *
and * coord _ b * within a certain cutoff .
This function is a wrapper around different routines and data structures
for distance searches . It return a np . ndarray containing the distances .
Returns a matrix with all the computed distances . When using the
" cell - lists " method it returns a scipy . sparse . dok _ matrix .
* * Parameters * *
coords _ a : np . ndarray ( ( N , 3 ) , dtype = float )
First coordinate array
coords _ b : np . ndarray ( ( N , 3 ) , dtype = float )
Second coordinate array
cutoff : float
Maximum distance to search for
periodic : False or np . ndarray ( ( 3 , ) , dtype = float )
If False , don ' t consider periodic images . Otherwise
periodic is an array containing the periodicity in the
3 dimensions .
method : " simple " | " cell - lists "
The method to use . * simple * is a brute - force
distance search , and * cell - lists * uses the cell
linked list method .""" | coords_a = np . array ( coords_a )
coords_b = np . array ( coords_b )
if method == "simple" :
if periodic is not False :
return distance_array ( coords_a , coords_b , cutoff = cutoff , period = periodic . astype ( np . double ) )
else :
dist = cdist ( coords_a , coords_b )
dist [ dist > cutoff ] = 0
return dist
elif method == "cell-lists" :
if periodic is not False :
if np . any ( cutoff > periodic / 2 ) :
raise Exception ( "Not working with such a big cutoff." )
# We need all positive elements
mina = coords_a [ : , 0 ] . min ( ) , coords_a [ : , 1 ] . min ( ) , coords_a [ : , 2 ] . min ( )
minb = coords_b [ : , 0 ] . min ( ) , coords_b [ : , 1 ] . min ( ) , coords_b [ : , 2 ] . min ( )
# Find the lowest
origin = np . minimum ( mina , minb )
a = CellLinkedList ( coords_a - origin , cutoff , periodic )
b = CellLinkedList ( coords_b - origin , cutoff , periodic )
dist = a . query_distances_other ( b , cutoff )
return dist
else :
raise Exception ( "Method {} not available." . format ( method ) ) |
def goBack ( self ) :
"""Moves the cursor to the end of the previous editor""" | index = self . indexOf ( self . currentEditor ( ) )
if index == - 1 :
return
previous = self . editorAt ( index - 1 )
if previous :
previous . setFocus ( )
previous . setCursorPosition ( self . sectionLength ( ) ) |
def update ( self , id ) :
"""PUT / datastores / id : Update an existing item .""" | # url ( ' DataStores ' , id = ID )
content = request . environ [ 'wsgi.input' ] . read ( int ( request . environ [ 'CONTENT_LENGTH' ] ) )
content = content . decode ( 'utf8' )
content = simplejson . loads ( content )
result = meta . Session . query ( DataStore ) . get ( id )
result . name = content [ 'name' ]
result . type = content [ 'type' ]
result . ogrstring = content [ 'ogrstring' ]
meta . Session . commit ( )
response . status = 201 |
def wait_for_idle ( self ) :
"""Waits until the worker has nothing more to do . Very useful in tests""" | # Be mindful that this is being executed in a different greenlet than the work _ * methods .
while True :
time . sleep ( 0.01 )
with self . work_lock :
if self . status != "wait" :
continue
if len ( self . gevent_pool ) > 0 :
continue
# Force a refresh of the current subqueues , one might just have been created .
self . refresh_queues ( )
# We might be dequeueing a new subqueue . Double check that we don ' t have anything more to do
outcome , dequeue_jobs = self . work_once ( free_pool_slots = 1 , max_jobs = None )
if outcome is "wait" and dequeue_jobs == 0 :
break |
def compute_statistics ( out_dir , gold_prefix , source_prefix , same_samples , use_sge , final_out_prefix ) :
"""Compute the statistics .""" | # Now , creating a temporary directory
if not os . path . isdir ( out_dir ) :
try :
os . mkdir ( out_dir )
except OSError :
msg = "{}: file exists" . format ( out_dir )
raise ProgramError ( msg )
# The out prefix
out_prefix = os . path . join ( out_dir , "tmp" )
# Subsetting the files
logger . info ( " - Subsetting the files" )
for k , ( gold_sample , source_sample ) in enumerate ( same_samples ) : # Preparing the files
try :
filename = out_prefix + "_{}_gold.samples" . format ( k )
with open ( filename , 'w' ) as output_file :
print >> output_file , "\t" . join ( gold_sample )
filename = out_prefix + "_{}_source.samples" . format ( k )
with open ( filename , "w" ) as output_file :
print >> output_file , "\t" . join ( source_sample )
except IOError :
msg = "can't write in dir {}" . format ( out_dir )
raise ProgramError ( msg )
# Preparing the files
nb = len ( same_samples )
keepSamples ( [ gold_prefix ] * nb + [ source_prefix ] * nb , [ out_prefix + "_{}_gold.samples" . format ( i ) for i in range ( nb ) ] + [ out_prefix + "_{}_source.samples" . format ( i ) for i in range ( nb ) ] , [ out_prefix + "_{}_gold" . format ( i ) for i in range ( nb ) ] + [ out_prefix + "_{}_source" . format ( i ) for i in range ( nb ) ] , use_sge , transpose = True , )
# Creating reports
# The diff file
diff_file = None
try :
diff_file = open ( final_out_prefix + ".diff" , 'w' )
except IOError :
msg = "{}: can't write file" . format ( final_out_prefix + ".diff" )
raise ProgramError ( msg )
# The diff file header
print >> diff_file , "\t" . join ( [ "name" , "famID_source" , "indID_source" , "famID_gold" , "indID_gold" , "geno_source" , "geno_gold" ] )
# The concordance file
concordance_file = None
try :
concordance_file = open ( final_out_prefix + ".concordance" , "w" )
except IOError :
msg = "{}: can't write file" . format ( final_out_prefix + ".concordance" )
raise ProgramError ( msg )
# The concordance file header
print >> concordance_file , "\t" . join ( [ "famID_source" , "indID_source" , "famID_gold" , "indID_gold" , "nb_geno" , "nb_diff" , "concordance" ] )
for k in range ( nb ) : # The samples
gold_sample , source_sample = same_samples [ k ]
# Reading the source freq file
source_genotypes = { }
filename = out_prefix + "_{}_source.tped" . format ( k )
try :
with open ( filename , 'r' ) as input_file :
for line in input_file :
row = line . rstrip ( "\r\n" ) . split ( "\t" )
marker_name = row [ 1 ]
geno = row [ 4 ]
if geno != "0 0" :
geno = set ( geno . split ( " " ) )
source_genotypes [ marker_name ] = geno
except IOError :
msg = "{}: no such file" . format ( filename )
raise ProgramError ( msg )
# Reading the gold freq file
gold_genotypes = { }
filename = out_prefix + "_{}_gold.tped" . format ( k )
try :
with open ( filename , 'r' ) as input_file :
for line in input_file :
row = line . rstrip ( "\r\n" ) . split ( "\t" )
marker_name = row [ 1 ]
geno = row [ 4 ]
if geno != "0 0" :
geno = set ( geno . split ( " " ) )
gold_genotypes [ marker_name ] = geno
except IOError :
msg = "{}: no such file" . format ( filename )
raise ProgramError ( msg )
# The genotyped markers in both , with their number
genotyped_markers = source_genotypes . viewkeys ( ) & gold_genotypes . viewkeys ( )
nb_genotyped = len ( genotyped_markers )
# Finding the number of differences , and creating the diff file
nb_diff = 0
for marker_name in genotyped_markers : # Getting the genotypes
source_geno = source_genotypes [ marker_name ]
gold_geno = gold_genotypes [ marker_name ]
# Comparing the genotypes
if source_geno != gold_geno :
nb_diff += 1
source_geno_print = list ( source_geno - { "0" } )
source_geno_print . sort ( )
if len ( source_geno_print ) == 1 :
source_geno_print . append ( source_geno_print [ 0 ] )
gold_geno_print = list ( gold_geno - { "0" } )
gold_geno_print . sort ( )
if len ( gold_geno_print ) == 1 :
gold_geno_print . append ( gold_geno_print [ 0 ] )
# We print in the diff file
print >> diff_file , "\t" . join ( [ marker_name , "\t" . join ( source_sample ) , "\t" . join ( gold_sample ) , "/" . join ( source_geno_print ) , "/" . join ( gold_geno_print ) ] )
# Creating the concordance file
concordance = 0.0
if nb_genotyped != 0 :
concordance = ( nb_genotyped - nb_diff ) / float ( nb_genotyped )
print >> concordance_file , "\t" . join ( [ "\t" . join ( source_sample ) , "\t" . join ( gold_sample ) , str ( nb_genotyped ) , str ( nb_diff ) , str ( concordance ) ] )
# Closing the output files
diff_file . close ( )
concordance_file . close ( )
# Deleating the temporary directory
try :
shutil . rmtree ( out_dir )
except IOError :
print >> sys . stderr , " - Can't delete {}" . format ( out_dir ) |
def set_level ( self , val ) :
"""Set the device ON LEVEL .""" | if val == 0 :
self . off ( )
elif val == 255 :
self . on ( )
else :
setlevel = 255
if val < 1 :
setlevel = val * 255
elif val <= 0xff :
setlevel = val
change = setlevel - self . _value
increment = 255 / self . _steps
steps = round ( abs ( change ) / increment )
print ( 'Steps: ' , steps )
if change > 0 :
method = self . brighten
self . _value += round ( steps * increment )
self . _value = min ( 255 , self . _value )
else :
method = self . dim
self . _value -= round ( steps * increment )
self . _value = max ( 0 , self . _value )
# pylint : disable = unused - variable
for step in range ( 0 , steps ) :
method ( True )
self . _update_subscribers ( self . _value ) |
def get_all_queues ( self ) :
"""Get information about all queues in the cluster .
Returns
queues : list of Queue
Examples
> > > client . get _ all _ queues ( )
[ Queue < name = ' default ' , percent _ used = 0.00 > ,
Queue < name = ' myqueue ' , percent _ used = 5.00 > ,
Queue < name = ' child1 ' , percent _ used = 10.00 > ,
Queue < name = ' child2 ' , percent _ used = 0.00 > ]""" | resp = self . _call ( 'getAllQueues' , proto . Empty ( ) )
return [ Queue . from_protobuf ( q ) for q in resp . queues ] |
def _build_opr_data ( self , data , store ) :
"""Returns a well formatted OPR data""" | return { "invoice_data" : { "invoice" : { "total_amount" : data . get ( "total_amount" ) , "description" : data . get ( "description" ) } , "store" : store . info } , "opr_data" : { "account_alias" : data . get ( "account_alias" ) } } |
def _get_full_block ( grouped_dicoms ) :
"""Generate a full datablock containing all timepoints""" | # For each slice / mosaic create a data volume block
data_blocks = [ ]
for index in range ( 0 , len ( grouped_dicoms ) ) :
logger . info ( 'Creating block %s of %s' % ( index + 1 , len ( grouped_dicoms ) ) )
data_blocks . append ( _timepoint_to_block ( grouped_dicoms [ index ] ) )
# Add the data _ blocks together to one 4d block
size_x = numpy . shape ( data_blocks [ 0 ] ) [ 0 ]
size_y = numpy . shape ( data_blocks [ 0 ] ) [ 1 ]
size_z = numpy . shape ( data_blocks [ 0 ] ) [ 2 ]
size_t = len ( data_blocks )
full_block = numpy . zeros ( ( size_x , size_y , size_z , size_t ) , dtype = data_blocks [ 0 ] . dtype )
for index in range ( 0 , size_t ) :
if full_block [ : , : , : , index ] . shape != data_blocks [ index ] . shape :
logger . warning ( 'Missing slices (slice count mismatch between timepoint %s and %s)' % ( index - 1 , index ) )
logger . warning ( '---------------------------------------------------------' )
logger . warning ( full_block [ : , : , : , index ] . shape )
logger . warning ( data_blocks [ index ] . shape )
logger . warning ( '---------------------------------------------------------' )
raise ConversionError ( "MISSING_DICOM_FILES" )
full_block [ : , : , : , index ] = data_blocks [ index ]
return full_block |
def simplify ( self ) :
"""Return a new simplified expression in canonical form from this
expression .
For simplification of AND and OR fthe ollowing rules are used
recursively bottom up :
- Associativity ( output does not contain same operations nested )
- Annihilation
- Idempotence
- Identity
- Complementation
- Elimination
- Absorption
- Commutativity ( output is always sorted )
Other boolean objects are also in their canonical form .""" | # TODO : Refactor DualBase . simplify into different " sub - evals " .
# If self is already canonical do nothing .
if self . iscanonical :
return self
# Otherwise bring arguments into canonical form .
args = [ arg . simplify ( ) for arg in self . args ]
# Create new instance of own class with canonical args .
# TODO : Only create new class if some args changed .
expr = self . __class__ ( * args )
# Literalize before doing anything , this also applies De Morgan ' s Law
expr = expr . literalize ( )
# Associativity :
# ( A & B ) & C = A & ( B & C ) = A & B & C
# ( A | B ) | C = A | ( B | C ) = A | B | C
expr = expr . flatten ( )
# Annihilation : A & 0 = 0 , A | 1 = 1
if self . annihilator in expr . args :
return self . annihilator
# Idempotence : A & A = A , A | A = A
# this boils down to removing duplicates
args = [ ]
for arg in expr . args :
if arg not in args :
args . append ( arg )
if len ( args ) == 1 :
return args [ 0 ]
# Identity : A & 1 = A , A | 0 = A
if self . identity in args :
args . remove ( self . identity )
if len ( args ) == 1 :
return args [ 0 ]
# Complementation : A & ~ A = 0 , A | ~ A = 1
for arg in args :
if self . NOT ( arg ) in args :
return self . annihilator
# Elimination : ( A & B ) | ( A & ~ B ) = A , ( A | B ) & ( A | ~ B ) = A
i = 0
while i < len ( args ) - 1 :
j = i + 1
ai = args [ i ]
if not isinstance ( ai , self . dual ) :
i += 1
continue
while j < len ( args ) :
aj = args [ j ]
if not isinstance ( aj , self . dual ) or len ( ai . args ) != len ( aj . args ) :
j += 1
continue
# Find terms where only one arg is different .
negated = None
for arg in ai . args : # FIXME : what does this pass Do ?
if arg in aj . args :
pass
elif self . NOT ( arg ) . cancel ( ) in aj . args :
if negated is None :
negated = arg
else :
negated = None
break
else :
negated = None
break
# If the different arg is a negation simplify the expr .
if negated is not None : # Cancel out one of the two terms .
del args [ j ]
aiargs = list ( ai . args )
aiargs . remove ( negated )
if len ( aiargs ) == 1 :
args [ i ] = aiargs [ 0 ]
else :
args [ i ] = self . dual ( * aiargs )
if len ( args ) == 1 :
return args [ 0 ]
else : # Now the other simplifications have to be redone .
return self . __class__ ( * args ) . simplify ( )
j += 1
i += 1
# Absorption : A & ( A | B ) = A , A | ( A & B ) = A
# Negative absorption : A & ( ~ A | B ) = A & B , A | ( ~ A & B ) = A | B
args = self . absorb ( args )
if len ( args ) == 1 :
return args [ 0 ]
# Commutativity : A & B = B & A , A | B = B | A
args . sort ( )
# Create new ( now canonical ) expression .
expr = self . __class__ ( * args )
expr . iscanonical = True
return expr |
def undefine ( self ) :
"""Undefine the Function .
Python equivalent of the CLIPS undeffunction command .
The object becomes unusable after this method has been called .""" | if lib . EnvUndeffunction ( self . _env , self . _fnc ) != 1 :
raise CLIPSError ( self . _env )
self . _env = None |
def get_events ( self ) -> List [ Event ] :
"""Get events associated with the scheduling object .
Returns :
list of Event objects""" | LOG . debug ( 'Getting events for %s' , self . key )
return get_events ( self . key ) |
def getChildren ( self , name = None , ns = None ) :
"""Get a list of children by ( optional ) name and / or ( optional ) namespace .
@ param name : The name of a child element ( may contain prefix ) .
@ type name : basestring
@ param ns : An optional namespace used to match the child .
@ type ns : ( I { prefix } , I { name } )
@ return : The list of matching children .
@ rtype : [ L { Element } , . . . ]""" | if name is None :
matched = self . __root
else :
matched = self . getChild ( name , ns )
if matched is None :
return [ ]
else :
return [ matched , ] |
def decode_body ( cls , header , f ) :
"""Generates a ` MqttPingresp ` packet given a
` MqttFixedHeader ` . This method asserts that header . packet _ type
is ` pingresp ` .
Parameters
header : MqttFixedHeader
f : file
Object with a read method .
Raises
DecodeError
When there are extra bytes at the end of the packet .
Returns
int
Number of bytes consumed from ` ` f ` ` .
MqttPingresp
Object extracted from ` ` f ` ` .""" | assert header . packet_type == MqttControlPacketType . pingresp
if header . remaining_len != 0 :
raise DecodeError ( 'Extra bytes at end of packet.' )
return 0 , MqttPingresp ( ) |
def axis_bounds ( self ) -> Dict [ str , Tuple [ float , float ] ] :
"""The ( minimum , maximum ) bounds for each axis .""" | return { ax : ( 0 , pos + 0.5 ) for ax , pos in _HOME_POSITION . items ( ) if ax not in 'BC' } |
def add_link ( self , ** kwgs ) :
"""Add additional link to the document . Links will be embeded only inside of this document .
> > > add _ link ( href = ' styles . css ' , rel = ' stylesheet ' , type = ' text / css ' )""" | self . links . append ( kwgs )
if kwgs . get ( 'type' ) == 'text/javascript' :
if 'scripted' not in self . properties :
self . properties . append ( 'scripted' ) |
def _select_best_smooth_at_each_point ( self ) :
"""Solve Eq ( 10 ) to find the best span for each observation .
Stores index so we can easily grab the best residual smooth , primary smooth , etc .""" | for residuals_i in zip ( * self . _residual_smooths ) :
index_of_best_span = residuals_i . index ( min ( residuals_i ) )
self . _best_span_at_each_point . append ( DEFAULT_SPANS [ index_of_best_span ] ) |
def get_points ( orig , dest , taillen ) :
"""Return a pair of lists of points for use making an arrow .
The first list is the beginning and end point of the trunk of the arrow .
The second list is the arrowhead .""" | # Adjust the start and end points so they ' re on the first non - transparent pixel .
# y = slope ( x - ox ) + oy
# x = ( y - oy ) / slope + ox
ox , oy = orig . center
ow , oh = orig . size
dx , dy = dest . center
dw , dh = dest . size
if ox < dx :
leftx = ox
rightx = dx
xco = 1
elif ox > dx :
leftx = ox * - 1
rightx = dx * - 1
xco = - 1
else : # straight up and down arrow
return up_and_down ( orig , dest , taillen )
if oy < dy :
boty = oy
topy = dy
yco = 1
elif oy > dy :
boty = oy * - 1
topy = dy * - 1
yco = - 1
else : # straight left and right arrow
return left_and_right ( orig , dest , taillen )
slope = ( topy - boty ) / ( rightx - leftx )
# start from the earliest point that intersects the bounding box .
# work toward the center to find a non - transparent pixel
# y - boty = ( ( topy - boty ) / ( rightx - leftx ) ) * ( x - leftx )
if slope <= 1 :
for rightx in range ( int ( rightx - dw / 2 ) , int ( rightx ) + 1 ) :
topy = slope * ( rightx - leftx ) + boty
if dest . collide_point ( rightx * xco , topy * yco ) :
rightx = float ( rightx - 1 )
for pip in range ( 10 ) :
rightx += 0.1 * pip
topy = slope * ( rightx - leftx ) + boty
if dest . collide_point ( rightx * xco , topy * yco ) :
break
break
for leftx in range ( int ( leftx + ow / 2 ) , int ( leftx ) - 1 , - 1 ) :
boty = slope * ( leftx - rightx ) + topy
if orig . collide_point ( leftx * xco , boty * yco ) :
leftx = float ( leftx + 1 )
for pip in range ( 10 ) :
leftx -= 0.1 * pip
boty = slope * ( leftx - rightx ) + topy
if orig . collide_point ( leftx * xco , boty * yco ) :
break
break
else : # x = leftx + ( ( rightx - leftx ) ( y - boty ) ) / ( topy - boty )
for topy in range ( int ( topy - dh / 2 ) , int ( topy ) + 1 ) :
rightx = leftx + ( topy - boty ) / slope
if dest . collide_point ( rightx * xco , topy * yco ) :
topy = float ( topy - 1 )
for pip in range ( 10 ) :
topy += 0.1 * pip
rightx = leftx + ( topy - boty ) / slope
if dest . collide_point ( rightx * xco , topy * yco ) :
break
break
for boty in range ( int ( boty + oh / 2 ) , int ( boty ) - 1 , - 1 ) :
leftx = ( boty - topy ) / slope + rightx
if orig . collide_point ( leftx * xco , boty * yco ) :
boty = float ( boty + 1 )
for pip in range ( 10 ) :
boty -= 0.1 * pip
leftx = ( boty - topy ) / slope + rightx
if orig . collide_point ( leftx * xco , boty * yco ) :
break
break
rise = topy - boty
run = rightx - leftx
try :
start_theta = atan ( rise / run )
except ZeroDivisionError :
return up_and_down ( orig , dest , taillen )
try :
end_theta = atan ( run / rise )
except ZeroDivisionError :
return left_and_right ( orig , dest , taillen )
# make the little wedge at the end so you can tell which way the
# arrow ' s pointing , and flip it all back around to the way it was
top_theta = start_theta - fortyfive
bot_theta = pi - fortyfive - end_theta
xoff1 = cos ( top_theta ) * taillen
yoff1 = sin ( top_theta ) * taillen
xoff2 = cos ( bot_theta ) * taillen
yoff2 = sin ( bot_theta ) * taillen
x1 = ( rightx - xoff1 ) * xco
x2 = ( rightx - xoff2 ) * xco
y1 = ( topy - yoff1 ) * yco
y2 = ( topy - yoff2 ) * yco
startx = leftx * xco
starty = boty * yco
endx = rightx * xco
endy = topy * yco
return ( [ startx , starty , endx , endy ] , [ x1 , y1 , endx , endy , x2 , y2 ] ) |
def quick1D ( data , axis = 0 , at = { } , channel = 0 , * , local = False , autosave = False , save_directory = None , fname = None , verbose = True ) :
"""Quickly plot 1D slice ( s ) of data .
Parameters
data : WrightTools . Data object
Data to plot .
axis : string or integer ( optional )
Expression or index of axis . Default is 0.
at : dictionary ( optional )
Dictionary of parameters in non - plotted dimension ( s ) . If not
provided , plots will be made at each coordinate .
channel : string or integer ( optional )
Name or index of channel to plot . Default is 0.
local : boolean ( optional )
Toggle plotting locally . Default is False .
autosave : boolean ( optional )
Toggle autosave . Default is False .
save _ directory : string ( optional )
Location to save image ( s ) . Default is None ( auto - generated ) .
fname : string ( optional )
File name . If None , data name is used . Default is None .
verbose : boolean ( optional )
Toggle talkback . Default is True .
Returns
list of strings
List of saved image files ( if any ) .""" | # channel index
channel_index = wt_kit . get_index ( data . channel_names , channel )
shape = data . channels [ channel_index ] . shape
collapse = [ i for i in range ( len ( shape ) ) if shape [ i ] == 1 ]
at = at . copy ( )
at . update ( { c : 0 for c in collapse } )
# prepare data
chopped = data . chop ( axis , at = at , verbose = False )
# prepare figure
fig = None
if len ( chopped ) > 10 :
if not autosave :
print ( "more than 10 images will be generated: forcing autosave" )
autosave = True
# prepare output folders
if autosave :
if save_directory :
pass
else :
if len ( chopped ) == 1 :
save_directory = os . getcwd ( )
if fname :
pass
else :
fname = data . natural_name
else :
folder_name = "quick1D " + wt_kit . TimeStamp ( ) . path
os . mkdir ( folder_name )
save_directory = folder_name
# determine ymin and ymax for global axis scale
data_channel = data . channels [ channel_index ]
ymin , ymax = data_channel . min ( ) , data_channel . max ( )
dynamic_range = ymax - ymin
ymin -= dynamic_range * 0.05
ymax += dynamic_range * 0.05
if np . sign ( ymin ) != np . sign ( data_channel . min ( ) ) :
ymin = 0
if np . sign ( ymax ) != np . sign ( data_channel . max ( ) ) :
ymax = 0
# chew through image generation
out = [ ]
for i , d in enumerate ( chopped . values ( ) ) : # unpack data - - - - -
axis = d . axes [ 0 ]
xi = axis . full
channel = d . channels [ channel_index ]
zi = channel [ : ]
# create figure - - - - -
aspects = [ [ [ 0 , 0 ] , 0.5 ] ]
fig , gs = create_figure ( width = "single" , nrows = 1 , cols = [ 1 ] , aspects = aspects )
ax = plt . subplot ( gs [ 0 , 0 ] )
# plot - - - - -
plt . plot ( xi , zi , lw = 2 )
plt . scatter ( xi , zi , color = "grey" , alpha = 0.5 , edgecolor = "none" )
# decoration - - - - -
plt . grid ( )
# limits
if local :
pass
else :
plt . ylim ( ymin , ymax )
# label axes
ax . set_xlabel ( axis . label , fontsize = 18 )
ax . set_ylabel ( channel . natural_name , fontsize = 18 )
plt . xticks ( rotation = 45 )
plt . axvline ( 0 , lw = 2 , c = "k" )
plt . xlim ( xi . min ( ) , xi . max ( ) )
# add constants to title
ls = [ ]
for constant in d . constants :
ls . append ( constant . label )
title = ", " . join ( ls )
_title ( fig , data . natural_name , subtitle = title )
# variable marker lines
for constant in d . constants :
if constant . units is not None :
if axis . units_kind == constant . units_kind :
constant . convert ( axis . units )
plt . axvline ( constant . value , color = "k" , linewidth = 4 , alpha = 0.25 )
# save - - - - -
if autosave :
if fname :
file_name = fname + " " + str ( i ) . zfill ( 3 )
else :
file_name = str ( i ) . zfill ( 3 )
fpath = os . path . join ( save_directory , file_name + ".png" )
savefig ( fpath , fig = fig )
plt . close ( )
if verbose :
print ( "image saved at" , fpath )
out . append ( fpath )
chopped . close ( )
return out |
def _find_benchmarks ( self ) :
"""Return a suite of all tests cases contained in testCaseClass""" | def is_bench_method ( attrname , prefix = "bench" ) :
return attrname . startswith ( prefix ) and hasattr ( getattr ( self . __class__ , attrname ) , '__call__' )
return list ( filter ( is_bench_method , dir ( self . __class__ ) ) ) |
def tx_min ( tasmax , freq = 'YS' ) :
r"""Lowest max temperature
The minimum of daily maximum temperature .
Parameters
tasmax : xarray . DataArray
Maximum daily temperature [ ° C ] or [ K ]
freq : str , optional
Resampling frequency
Returns
xarray . DataArray
Minimum of daily maximum temperature .
Notes
Let : math : ` TX _ { ij } ` be the maximum temperature at day : math : ` i ` of period : math : ` j ` . Then the minimum
daily maximum temperature for period : math : ` j ` is :
. . math : :
TXn _ j = min ( TX _ { ij } )""" | return tasmax . resample ( time = freq ) . min ( dim = 'time' , keep_attrs = True ) |
def remove_file ( filename , recursive = False , force = False ) :
"""Removes a file or directory .""" | import os
try :
mode = os . stat ( filename ) [ 0 ]
if mode & 0x4000 != 0 : # directory
if recursive :
for file in os . listdir ( filename ) :
success = remove_file ( filename + '/' + file , recursive , force )
if not success and not force :
return False
os . rmdir ( filename )
# PGH Work like Unix : require recursive
else :
if not force :
return False
else :
os . remove ( filename )
except :
if not force :
return False
return True |
def _make_entities_from_ids ( entity_cls , entity_objs_and_ids , server_config ) :
"""Given an iterable of entities and / or IDs , return a list of entities .
: param entity _ cls : An : class : ` Entity ` subclass .
: param entity _ obj _ or _ id : An iterable of
: class : ` nailgun . entity _ mixins . Entity ` objects and / or entity IDs . All of
the entities in this iterable should be of type ` ` entity _ cls ` ` .
: returns : A list of ` ` entity _ cls ` ` objects .""" | return [ _make_entity_from_id ( entity_cls , entity_or_id , server_config ) for entity_or_id in entity_objs_and_ids ] |
def gpu_memory_info ( device_id = 0 ) :
"""Query CUDA for the free and total bytes of GPU global memory .
Parameters
device _ id : int , optional
The device id of the GPU device .
Raises
Will raise an exception on any CUDA error .
Returns
( free , total ) : ( int , int )
The number of GPUs .""" | free = ctypes . c_uint64 ( )
total = ctypes . c_uint64 ( )
dev_id = ctypes . c_int ( device_id )
check_call ( _LIB . MXGetGPUMemoryInformation64 ( dev_id , ctypes . byref ( free ) , ctypes . byref ( total ) ) )
return ( free . value , total . value ) |
def summarize ( requestContext , seriesList , intervalString , func = 'sum' , alignToFrom = False ) :
"""Summarize the data into interval buckets of a certain size .
By default , the contents of each interval bucket are summed together .
This is useful for counters where each increment represents a discrete
event and retrieving a " per X " value requires summing all the events in
that interval .
Specifying ' avg ' instead will return the mean for each bucket , which can
be more useful when the value is a gauge that represents a certain value
in time .
' max ' , ' min ' or ' last ' can also be specified .
By default , buckets are calculated by rounding to the nearest interval .
This works well for intervals smaller than a day . For example , 22:32 will
end up in the bucket 22:00-23:00 when the interval = 1hour .
Passing alignToFrom = true will instead create buckets starting at the from
time . In this case , the bucket for 22:32 depends on the from time . If
from = 6:30 then the 1hour bucket for 22:32 is 22:30-23:30.
Example : :
# total errors per hour
& target = summarize ( counter . errors , " 1hour " )
# new users per week
& target = summarize ( nonNegativeDerivative ( gauge . num _ users ) , " 1week " )
# average queue size per hour
& target = summarize ( queue . size , " 1hour " , " avg " )
# maximum queue size during each hour
& target = summarize ( queue . size , " 1hour " , " max " )
# 2010 Q1-4
& target = summarize ( metric , " 13week " , " avg " , true ) & from = midnight + 20100101""" | results = [ ]
delta = parseTimeOffset ( intervalString )
interval = to_seconds ( delta )
for series in seriesList :
buckets = { }
timestamps = range ( int ( series . start ) , int ( series . end ) + 1 , int ( series . step ) )
datapoints = zip_longest ( timestamps , series )
for timestamp , value in datapoints :
if timestamp is None :
continue
if alignToFrom :
bucketInterval = int ( ( timestamp - series . start ) / interval )
else :
bucketInterval = timestamp - ( timestamp % interval )
if bucketInterval not in buckets :
buckets [ bucketInterval ] = [ ]
if value is not None :
buckets [ bucketInterval ] . append ( value )
if alignToFrom :
newStart = series . start
newEnd = series . end
else :
newStart = series . start - ( series . start % interval )
newEnd = series . end - ( series . end % interval ) + interval
newValues = [ ]
for timestamp in range ( newStart , newEnd , interval ) :
if alignToFrom :
newEnd = timestamp
bucketInterval = int ( ( timestamp - series . start ) / interval )
else :
bucketInterval = timestamp - ( timestamp % interval )
bucket = buckets . get ( bucketInterval , [ ] )
if bucket :
if func == 'avg' :
newValues . append ( float ( sum ( bucket ) ) / float ( len ( bucket ) ) )
elif func == 'last' :
newValues . append ( bucket [ len ( bucket ) - 1 ] )
elif func == 'max' :
newValues . append ( max ( bucket ) )
elif func == 'min' :
newValues . append ( min ( bucket ) )
else :
newValues . append ( sum ( bucket ) )
else :
newValues . append ( None )
if alignToFrom :
newEnd += interval
newName = "summarize(%s, \"%s\", \"%s\"%s)" % ( series . name , intervalString , func , alignToFrom and ", true" or "" )
newSeries = TimeSeries ( newName , newStart , newEnd , interval , newValues )
newSeries . pathExpression = newName
results . append ( newSeries )
return results |
def get_scoped_variable_from_name ( self , name ) :
"""Get the scoped variable for a unique name
: param name : the unique name of the scoped variable
: return : the scoped variable specified by the name
: raises exceptions . AttributeError : if the name is not in the the scoped _ variables dictionary""" | for scoped_variable_id , scoped_variable in self . scoped_variables . items ( ) :
if scoped_variable . name == name :
return scoped_variable_id
raise AttributeError ( "Name %s is not in scoped_variables dictionary" , name ) |
def _bind_for_search ( anonymous = False , opts = None ) :
'''Bind with binddn and bindpw only for searching LDAP
: param anonymous : Try binding anonymously
: param opts : Pass in when _ _ opts _ _ is not available
: return : LDAPConnection object''' | # Get config params ; create connection dictionary
connargs = { }
# config params ( auth . ldap . * )
params = { 'mandatory' : [ 'uri' , 'server' , 'port' , 'starttls' , 'tls' , 'no_verify' , 'anonymous' , 'accountattributename' , 'activedirectory' ] , 'additional' : [ 'binddn' , 'bindpw' , 'filter' , 'groupclass' , 'auth_by_group_membership_only' ] , }
paramvalues = { }
for param in params [ 'mandatory' ] :
paramvalues [ param ] = _config ( param , opts = opts )
for param in params [ 'additional' ] :
paramvalues [ param ] = _config ( param , mandatory = False , opts = opts )
paramvalues [ 'anonymous' ] = anonymous
# Only add binddn / bindpw to the connargs when they ' re set , as they ' re not
# mandatory for initializing the LDAP object , but if they ' re provided
# initially , a bind attempt will be done during the initialization to
# validate them
if paramvalues [ 'binddn' ] :
connargs [ 'binddn' ] = paramvalues [ 'binddn' ]
if paramvalues [ 'bindpw' ] :
params [ 'mandatory' ] . append ( 'bindpw' )
for name in params [ 'mandatory' ] :
connargs [ name ] = paramvalues [ name ]
if not paramvalues [ 'anonymous' ] :
if paramvalues [ 'binddn' ] and paramvalues [ 'bindpw' ] : # search for the user ' s DN to be used for the actual authentication
return _LDAPConnection ( ** connargs ) . ldap |
def setup ( app ) :
"""Called at Sphinx initialization .""" | # Triggers sphinx - apidoc to generate API documentation .
app . connect ( 'builder-inited' , RunSphinxAPIDoc )
app . add_config_value ( 'recommonmark_config' , { 'enable_auto_doc_ref' : False } , True )
app . add_transform ( AutoStructify )
app . add_transform ( ProcessLink ) |
def valueFromString ( self , value , context = None ) :
"""Converts the inputted string text to a value that matches the type from
this column type .
: param value | < str >""" | if value == 'now' :
return datetime . datetime . now ( ) . time ( )
elif dateutil_parser :
return dateutil_parser . parse ( value ) . time ( )
else :
time_struct = time . strptime ( value , self . defaultFormat ( ) )
return datetime . time ( time_struct . tm_hour , time_struct . tm_min , time_struct . tm_sec ) |
def get_prep_value ( self , value ) :
"""Returns field ' s value prepared for saving into a database .""" | if isinstance ( value , LocalizedValue ) :
prep_value = LocalizedValue ( )
for k , v in value . __dict__ . items ( ) :
if v is None :
prep_value . set ( k , '' )
else : # Need to convert File objects provided via a form to
# unicode for database insertion
prep_value . set ( k , six . text_type ( v ) )
return super ( ) . get_prep_value ( prep_value )
return super ( ) . get_prep_value ( value ) |
def _namedtupleload ( l : Loader , value : Dict [ str , Any ] , type_ ) -> Tuple :
"""This loads a Dict [ str , Any ] into a NamedTuple .""" | if not hasattr ( type_ , '__dataclass_fields__' ) :
fields = set ( type_ . _fields )
optional_fields = set ( getattr ( type_ , '_field_defaults' , { } ) . keys ( ) )
type_hints = type_ . _field_types
else : # dataclass
import dataclasses
fields = set ( type_ . __dataclass_fields__ . keys ( ) )
optional_fields = { k for k , v in type_ . __dataclass_fields__ . items ( ) if not ( isinstance ( getattr ( v , 'default' , dataclasses . _MISSING_TYPE ( ) ) , dataclasses . _MISSING_TYPE ) and isinstance ( getattr ( v , 'default_factory' , dataclasses . _MISSING_TYPE ( ) ) , dataclasses . _MISSING_TYPE ) ) }
type_hints = { k : v . type for k , v in type_ . __dataclass_fields__ . items ( ) }
# Name mangling
# Prepare the list of the needed name changes
transforms = [ ]
# type : List [ Tuple [ str , str ] ]
for field in fields :
if type_ . __dataclass_fields__ [ field ] . metadata :
name = type_ . __dataclass_fields__ [ field ] . metadata . get ( 'name' )
if name :
transforms . append ( ( field , name ) )
# Do the needed name changes
if transforms :
value = value . copy ( )
for pyname , dataname in transforms :
if dataname in value :
tmp = value [ dataname ]
del value [ dataname ]
value [ pyname ] = tmp
necessary_fields = fields . difference ( optional_fields )
try :
vfields = set ( value . keys ( ) )
except AttributeError as e :
raise TypedloadAttributeError ( str ( e ) , value = value , type_ = type_ )
if necessary_fields . intersection ( vfields ) != necessary_fields :
raise TypedloadValueError ( 'Value does not contain fields: %s which are necessary for type %s' % ( necessary_fields . difference ( vfields ) , type_ ) , value = value , type_ = type_ , )
fieldsdiff = vfields . difference ( fields )
if l . failonextra and len ( fieldsdiff ) :
extra = ', ' . join ( fieldsdiff )
raise TypedloadValueError ( 'Dictionary has unrecognized fields: %s and cannot be loaded into %s' % ( extra , type_ ) , value = value , type_ = type_ , )
params = { }
for k , v in value . items ( ) :
if k not in fields :
continue
params [ k ] = l . load ( v , type_hints [ k ] , annotation = Annotation ( AnnotationType . FIELD , k ) , )
return type_ ( ** params ) |
def delete ( self , msg , claim_id = None ) :
"""Deletes the specified message from its queue . If the message has been
claimed , the ID of that claim must be passed as the ' claim _ id '
parameter .""" | msg_id = utils . get_id ( msg )
if claim_id :
uri = "/%s/%s?claim_id=%s" % ( self . uri_base , msg_id , claim_id )
else :
uri = "/%s/%s" % ( self . uri_base , msg_id )
return self . _delete ( uri ) |
def fw_policy_delete ( self , data , fw_name = None ) :
"""Top level policy delete routine .""" | LOG . debug ( "FW Policy Debug" )
self . _fw_policy_delete ( fw_name , data ) |
def wait_all ( jobs , timeout = None ) :
"""Return when at all of the specified jobs have completed or timeout expires .
Args :
jobs : a Job or list of Jobs to wait on .
timeout : a timeout in seconds to wait for . None ( the default ) means no timeout .
Returns :
A list of the jobs that have now completed or None if there were no jobs .""" | return Job . _wait ( jobs , timeout , concurrent . futures . ALL_COMPLETED ) |
def resolve_operation_info ( self , encoding_map , mips_op_info ) :
"""Adds the predefined operation info ( opcode , funct ) to the current encoding map .""" | encoding_map [ 'opcode' ] = mips_op_info . opcode
encoding_map [ 'funct' ] = mips_op_info . funct |
def parse_text_urls ( mesg ) :
"""Parse a block of text , splitting it into its url and non - url
components .""" | rval = [ ]
loc = 0
for match in URLRE . finditer ( mesg ) :
if loc < match . start ( ) :
rval . append ( Chunk ( mesg [ loc : match . start ( ) ] , None ) )
# Turn email addresses into mailto : links
email = match . group ( "email" )
if email and "mailto" not in email :
mailto = "mailto:{}" . format ( email )
else :
mailto = match . group ( 1 )
rval . append ( Chunk ( None , mailto ) )
loc = match . end ( )
if loc < len ( mesg ) :
rval . append ( Chunk ( mesg [ loc : ] , None ) )
return rval |
def step ( self , step_size : Timedelta = None ) :
"""Advance the simulation one step .
Parameters
step _ size
An optional size of step to take . Must be the same type as the
simulation clock ' s step size ( usually a pandas . Timedelta ) .""" | old_step_size = self . clock . step_size
if step_size is not None :
if not isinstance ( step_size , type ( self . clock . step_size ) ) :
raise ValueError ( f"Provided time must be an instance of {type(self.clock.step_size)}" )
self . clock . _step_size = step_size
super ( ) . step ( )
self . clock . _step_size = old_step_size |
def convert_to_sympy_matrix ( expr , full_space = None ) :
"""Convert a QNET expression to an explicit ` ` n x n ` ` instance of
` sympy . Matrix ` , where ` ` n ` ` is the dimension of ` full _ space ` . The entries
of the matrix may contain symbols .
Parameters :
expr : a QNET expression
full _ space ( qnet . algebra . hilbert _ space _ algebra . HilbertSpace ) : The
Hilbert space in which ` expr ` is defined . If not given ,
` ` expr . space ` ` is used . The Hilbert space must have a well - defined
basis .
Raises :
qnet . algebra . hilbert _ space _ algebra . BasisNotSetError : if ` full _ space `
does not have a defined basis
ValueError : if ` expr ` is not in ` full _ space ` , or if ` expr ` cannot be
converted .""" | if full_space is None :
full_space = expr . space
if not expr . space . is_tensor_factor_of ( full_space ) :
raise ValueError ( "expr must be in full_space" )
if expr is IdentityOperator :
return sympy . eye ( full_space . dimension )
elif expr is ZeroOperator :
return 0
elif isinstance ( expr , LocalOperator ) :
n = full_space . dimension
if full_space != expr . space :
all_spaces = full_space . local_factors
own_space_index = all_spaces . index ( expr . space )
factors = [ sympy . eye ( s . dimension ) for s in all_spaces [ : own_space_index ] ]
factors . append ( convert_to_sympy_matrix ( expr , expr . space ) )
factors . extend ( [ sympy . eye ( s . dimension ) for s in all_spaces [ own_space_index + 1 : ] ] )
return tensor ( * factors )
if isinstance ( expr , ( Create , Jz , Jplus ) ) :
return SympyCreate ( n )
elif isinstance ( expr , ( Destroy , Jminus ) ) :
return SympyCreate ( n ) . H
elif isinstance ( expr , Phase ) :
phi = expr . phase
result = sympy . zeros ( n )
for i in range ( n ) :
result [ i , i ] = sympy . exp ( sympy . I * i * phi )
return result
elif isinstance ( expr , Displace ) :
alpha = expr . operands [ 1 ]
a = SympyCreate ( n )
return ( alpha * a - alpha . conjugate ( ) * a . H ) . exp ( )
elif isinstance ( expr , Squeeze ) :
eta = expr . operands [ 1 ]
a = SympyCreate ( n )
return ( ( eta / 2 ) * a ** 2 - ( eta . conjugate ( ) / 2 ) * ( a . H ) ** 2 ) . exp ( )
elif isinstance ( expr , LocalSigma ) :
ket = basis_state ( expr . index_j , n )
bra = basis_state ( expr . index_k , n ) . H
return ket * bra
else :
raise ValueError ( "Cannot convert '%s' of type %s" % ( str ( expr ) , type ( expr ) ) )
elif ( isinstance ( expr , Operator ) and isinstance ( expr , Operation ) ) :
if isinstance ( expr , OperatorPlus ) :
s = convert_to_sympy_matrix ( expr . operands [ 0 ] , full_space )
for op in expr . operands [ 1 : ] :
s += convert_to_sympy_matrix ( op , full_space )
return s
elif isinstance ( expr , OperatorTimes ) : # if any factor acts non - locally , we need to expand distributively .
if any ( len ( op . space ) > 1 for op in expr . operands ) :
se = expr . expand ( )
if se == expr :
raise ValueError ( "Cannot represent as sympy matrix: %s" % expr )
return convert_to_sympy_matrix ( se , full_space )
all_spaces = full_space . local_factors
by_space = [ ]
ck = 0
for ls in all_spaces : # group factors by associated local space
ls_ops = [ convert_to_sympy_matrix ( o , o . space ) for o in expr . operands if o . space == ls ]
if len ( ls_ops ) : # compute factor associated with local space
by_space . append ( ls_ops [ 0 ] )
for ls_op in ls_ops [ 1 : ] :
by_space [ - 1 ] *= ls_op
ck += len ( ls_ops )
else : # if trivial action , take identity matrix
by_space . append ( sympy . eye ( ls . dimension ) )
assert ck == len ( expr . operands )
# combine local factors in tensor product
if len ( by_space ) == 1 :
return by_space [ 0 ]
else :
return tensor ( * by_space )
elif isinstance ( expr , Adjoint ) :
return convert_to_sympy_matrix ( expr . operand , full_space ) . H
elif isinstance ( expr , PseudoInverse ) :
raise NotImplementedError ( 'Cannot convert PseudoInverse to sympy matrix' )
elif isinstance ( expr , NullSpaceProjector ) :
raise NotImplementedError ( 'Cannot convert NullSpaceProjector to sympy' )
elif isinstance ( expr , ScalarTimesOperator ) :
return expr . coeff * convert_to_sympy_matrix ( expr . term , full_space )
else :
raise ValueError ( "Cannot convert '%s' of type %s" % ( str ( expr ) , type ( expr ) ) )
else :
raise ValueError ( "Cannot convert '%s' of type %s" % ( str ( expr ) , type ( expr ) ) ) |
def __get_hooks_for_dll ( self , event ) :
"""Get the requested API hooks for the current DLL .
Used by L { _ _ hook _ dll } and L { _ _ unhook _ dll } .""" | result = [ ]
if self . __apiHooks :
path = event . get_module ( ) . get_filename ( )
if path :
lib_name = PathOperations . pathname_to_filename ( path ) . lower ( )
for hook_lib , hook_api_list in compat . iteritems ( self . __apiHooks ) :
if hook_lib == lib_name :
result . extend ( hook_api_list )
return result |
def delete_dispatch ( self , dispatch_id ) :
"""Deleting an existing dispatch
: param dispatch _ id : is the dispatch that the client wants to delete""" | self . _validate_uuid ( dispatch_id )
url = "/notification/v1/dispatch/{}" . format ( dispatch_id )
response = NWS_DAO ( ) . deleteURL ( url , self . _write_headers ( ) )
if response . status != 204 :
raise DataFailureException ( url , response . status , response . data )
return response . status |
def geocode ( self , query , lang = 'en' , exactly_one = True , timeout = DEFAULT_SENTINEL ) :
"""Return a location point for a ` 3 words ` query . If the ` 3 words ` address
doesn ' t exist , a : class : ` geopy . exc . GeocoderQueryError ` exception will be
thrown .
: param str query : The 3 - word address you wish to geocode .
: param str lang : two character language codes as supported by
the API ( https : / / docs . what3words . com / api / v2 / # lang ) .
: param bool exactly _ one : Return one result or a list of results , if
available . Due to the address scheme there is always exactly one
result for each ` 3 words ` address , so this parameter is rather
useless for this geocoder .
. . versionchanged : : 1.14.0
` ` exactly _ one = False ` ` now returns a list of a single location .
This option wasn ' t respected before .
: param int timeout : Time , in seconds , to wait for the geocoding service
to respond before raising a : class : ` geopy . exc . GeocoderTimedOut `
exception . Set this only if you wish to override , on this call
only , the value set during the geocoder ' s initialization .
: rtype : : class : ` geopy . location . Location ` or a list of them , if
` ` exactly _ one = False ` ` .""" | if not self . _check_query ( query ) :
raise exc . GeocoderQueryError ( "Search string must be 'word.word.word'" )
params = { 'addr' : self . format_string % query , 'lang' : lang . lower ( ) , 'key' : self . api_key , }
url = "?" . join ( ( self . geocode_api , urlencode ( params ) ) )
logger . debug ( "%s.geocode: %s" , self . __class__ . __name__ , url )
return self . _parse_json ( self . _call_geocoder ( url , timeout = timeout ) , exactly_one = exactly_one ) |
def get_user_info ( self , user_id , lang = "zh_CN" ) :
"""获取用户基本信息 。
: param user _ id : 用户 ID 。 就是你收到的 ` Message ` 的 source
: param lang : 返回国家地区语言版本 , zh _ CN 简体 , zh _ TW 繁体 , en 英语
: return : 返回的 JSON 数据包""" | return self . get ( url = "https://api.weixin.qq.com/cgi-bin/user/info" , params = { "access_token" : self . token , "openid" : user_id , "lang" : lang } ) |
def iter_widgets ( self , file = None , place = None ) :
'''Iterate registered widgets , optionally matching given criteria .
: param file : optional file object will be passed to widgets ' filter
functions .
: type file : browsepy . file . Node or None
: param place : optional template place hint .
: type place : str
: yields : widget instances
: ytype : object''' | for filter , dynamic , cwidget in self . _widgets :
try :
if file and filter and not filter ( file ) :
continue
except BaseException as e : # Exception is handled as this method execution is deffered ,
# making hard to debug for plugin developers .
warnings . warn ( 'Plugin action filtering failed with error: %s' % e , RuntimeWarning )
continue
if place and place != cwidget . place :
continue
if file and dynamic :
cwidget = self . _resolve_widget ( file , cwidget )
yield cwidget |
def permutation_isc ( iscs , group_assignment = None , pairwise = False , # noqa : C901
summary_statistic = 'median' , n_permutations = 1000 , random_state = None ) :
"""Group - level permutation test for ISCs
For ISCs from one or more voxels or ROIs , permute group assignments to
construct a permutation distribution . Input is a list or ndarray of
ISCs for a single voxel / ROI , or an ISCs - by - voxels ndarray . If two groups ,
ISC values should stacked along first dimension ( vertically ) , and a
group _ assignment list ( or 1d array ) of same length as the number of
subjects should be provided to indicate groups . If no group _ assignment
is provided , one - sample test is performed using a sign - flipping procedure .
Performs exact test if number of possible permutations ( 2 * * N for one - sample
sign - flipping , N ! for two - sample shuffling ) is less than or equal to number
of requested permutation ; otherwise , performs approximate permutation test
using Monte Carlo resampling . ISC values should either be N ISC values for
N subjects in the leave - one - out approach ( pairwise = False ) or N ( N - 1 ) / 2 ISC
values for N subjects in the pairwise approach ( pairwise = True ) . In the
pairwise approach , ISC values should correspond to the vectorized upper
triangle of a square corrlation matrix ( scipy . stats . distance . squareform ) .
Note that in the pairwise approach , group _ assignment order should match the
row / column order of the subject - by - subject square ISC matrix even though
the input ISCs should be supplied as the vectorized upper triangle of the
square ISC matrix . Returns the observed ISC and permutation - based p - value
( two - tailed test ) , as well as the permutation distribution of summary
statistic . According to Chen et al . , 2016 , this is the preferred
nonparametric approach for controlling false positive rates ( FPR ) for
two - sample tests . This approach may yield inflated FPRs for one - sample
tests .
The implementation is based on the work in [ Chen2016 ] _ .
Parameters
iscs : list or ndarray , correlation matrix of ISCs
ISC values for one or more voxels
group _ assignment : list or ndarray , group labels
Group labels matching order of ISC input
pairwise : bool , default : False
Indicator of pairwise or leave - one - out , should match ISCs variable
summary _ statistic : str , default : ' median '
Summary statistic , either ' median ' ( default ) or ' mean '
n _ permutations : int , default : 1000
Number of permutation iteration ( randomizing group assignment )
random _ state = int , None , or np . random . RandomState , default : None
Initial random seed
Returns
observed : float , ISC summary statistic or difference
Actual ISC or group difference ( excluding between - group ISCs )
p : float , p - value
p - value based on permutation test
distribution : ndarray , permutations by voxels ( optional )
Permutation distribution if return _ bootstrap = True""" | # Standardize structure of input data
iscs , n_subjects , n_voxels = _check_isc_input ( iscs , pairwise = pairwise )
# Check for valid summary statistic
if summary_statistic not in ( 'mean' , 'median' ) :
raise ValueError ( "Summary statistic must be 'mean' or 'median'" )
# Check match between group labels and ISCs
group_assignment = _check_group_assignment ( group_assignment , n_subjects )
# Get group parameters
group_parameters = _get_group_parameters ( group_assignment , n_subjects , pairwise = pairwise )
# Set up permutation type ( exact or Monte Carlo )
if group_parameters [ 'n_groups' ] == 1 :
if n_permutations < 2 ** n_subjects :
logger . info ( "One-sample approximate permutation test using " "sign-flipping procedure with Monte Carlo resampling." )
exact_permutations = None
else :
logger . info ( "One-sample exact permutation test using " "sign-flipping procedure with 2**{0} " "({1}) iterations." . format ( n_subjects , 2 ** n_subjects ) )
exact_permutations = list ( product ( [ - 1 , 1 ] , repeat = n_subjects ) )
n_permutations = 2 ** n_subjects
# Check for exact test for two groups
else :
if n_permutations < np . math . factorial ( n_subjects ) :
logger . info ( "Two-sample approximate permutation test using " "group randomization with Monte Carlo resampling." )
exact_permutations = None
else :
logger . info ( "Two-sample exact permutation test using group " "randomization with {0}! " "({1}) iterations." . format ( n_subjects , np . math . factorial ( n_subjects ) ) )
exact_permutations = list ( permutations ( np . arange ( len ( group_assignment ) ) ) )
n_permutations = np . math . factorial ( n_subjects )
# If one group , just get observed summary statistic
if group_parameters [ 'n_groups' ] == 1 :
observed = compute_summary_statistic ( iscs , summary_statistic = summary_statistic , axis = 0 ) [ np . newaxis , : ]
# If two groups , get the observed difference
else :
observed = ( compute_summary_statistic ( iscs [ group_parameters [ 'group_selector' ] == group_parameters [ 'group_labels' ] [ 0 ] , : ] , summary_statistic = summary_statistic , axis = 0 ) - compute_summary_statistic ( iscs [ group_parameters [ 'group_selector' ] == group_parameters [ 'group_labels' ] [ 1 ] , : ] , summary_statistic = summary_statistic , axis = 0 ) )
observed = np . array ( observed )
# Set up an empty list to build our permutation distribution
distribution = [ ]
# Loop through n permutation iterations and populate distribution
for i in np . arange ( n_permutations ) : # Random seed to be deterministically re - randomized at each iteration
if exact_permutations :
prng = None
elif isinstance ( random_state , np . random . RandomState ) :
prng = random_state
else :
prng = np . random . RandomState ( random_state )
# If one group , apply sign - flipping procedure
if group_parameters [ 'n_groups' ] == 1 :
isc_sample = _permute_one_sample_iscs ( iscs , group_parameters , i , pairwise = pairwise , summary_statistic = summary_statistic , exact_permutations = exact_permutations , prng = prng )
# If two groups , set up group matrix get the observed difference
else :
isc_sample = _permute_two_sample_iscs ( iscs , group_parameters , i , pairwise = pairwise , summary_statistic = summary_statistic , exact_permutations = exact_permutations , prng = prng )
# Tack our permuted ISCs onto the permutation distribution
distribution . append ( isc_sample )
# Update random state for next iteration
if not exact_permutations :
random_state = np . random . RandomState ( prng . randint ( 0 , MAX_RANDOM_SEED ) )
# Convert distribution to numpy array
distribution = np . array ( distribution )
# Get p - value for actual median from shifted distribution
if exact_permutations :
p = p_from_null ( observed , distribution , side = 'two-sided' , exact = True , axis = 0 )
else :
p = p_from_null ( observed , distribution , side = 'two-sided' , exact = False , axis = 0 )
return observed , p , distribution |
def as_dates ( self ) :
"""Create a new DateRange with the datetimes converted to dates and changing to CLOSED / CLOSED .""" | new_start = self . start . date ( ) if self . start and isinstance ( self . start , datetime . datetime ) else self . start
new_end = self . end . date ( ) if self . end and isinstance ( self . end , datetime . datetime ) else self . end
return DateRange ( new_start , new_end , CLOSED_CLOSED ) |
def setCurrentMode ( self , mode ) :
"""Sets what mode this loader will be in .
: param mode | < XLoaderWidget . Mode >""" | if ( mode == self . _currentMode ) :
return
self . _currentMode = mode
ajax = mode == XLoaderWidget . Mode . Spinner
self . _movieLabel . setVisible ( ajax )
self . _primaryProgressBar . setVisible ( not ajax )
self . _subProgressBar . setVisible ( not ajax and self . _showSubProgress ) |
def post ( method , hmc , uri , uri_parms , body , logon_required , wait_for_completion ) :
"""Operation : Create Metrics Context .""" | assert wait_for_completion is True
# always synchronous
check_required_fields ( method , uri , body , [ 'anticipated-frequency-seconds' ] )
new_metrics_context = hmc . metrics_contexts . add ( body )
result = { 'metrics-context-uri' : new_metrics_context . uri , 'metric-group-infos' : new_metrics_context . get_metric_group_infos ( ) }
return result |
def read ( filename , mmap = False ) :
"""Return the sample rate ( in samples / sec ) and data from a WAV file
Parameters
filename : string or open file handle
Input wav file .
mmap : bool , optional
Whether to read data as memory mapped .
Only to be used on real files ( Default : False )
. . versionadded : : 0.12.0
Returns
rate : int
Sample rate of wav file
data : numpy array
Data read from wav file
Notes
* The file can be an open file or a filename .
* The returned sample rate is a Python integer .
* The data is returned as a numpy array with a data - type determined
from the file .
* This function cannot read wav files with 24 bit data .""" | if hasattr ( filename , 'read' ) :
fid = filename
mmap = False
else :
fid = open ( filename , 'rb' )
try :
fsize = _read_riff_chunk ( fid )
noc = 1
bits = 8
comp = WAVE_FORMAT_PCM
while ( fid . tell ( ) < fsize ) : # read the next chunk
chunk_id = fid . read ( 4 )
if chunk_id == b'fmt ' :
size , comp , noc , rate , sbytes , ba , bits = _read_fmt_chunk ( fid )
if bits == 24 :
raise ValueError ( "Unsupported bit depth: the wav file " "has 24 bit data." )
elif chunk_id == b'fact' :
_skip_unknown_chunk ( fid )
elif chunk_id == b'data' :
data = _read_data_chunk ( fid , comp , noc , bits , mmap = mmap )
elif chunk_id == b'LIST' : # Someday this could be handled properly but for now skip it
_skip_unknown_chunk ( fid )
else :
warnings . warn ( "Chunk (non-data) not understood, skipping it." , WavFileWarning )
_skip_unknown_chunk ( fid )
finally :
if not hasattr ( filename , 'read' ) :
fid . close ( )
else :
fid . seek ( 0 )
return rate , data |
def get_public_url ( self , doc_id , branch = 'master' ) :
"""Returns a GitHub URL for the doc in question ( study , collection , . . . )""" | name , path_frag = self . get_repo_and_path_fragment ( doc_id )
return 'https://raw.githubusercontent.com/OpenTreeOfLife/' + name + '/' + branch + '/' + path_frag |
def create_instances_from_document ( doc_database , doc_idx , max_seq_length , short_seq_prob , masked_lm_prob , max_predictions_per_seq , vocab_list ) :
"""This code is mostly a duplicate of the equivalent function from Google BERT ' s repo .
However , we make some changes and improvements . Sampling is improved and no longer requires a loop in this function .
Also , documents are sampled proportionally to the number of sentences they contain , which means each sentence
( rather than each document ) has an equal chance of being sampled as a false example for the NextSentence task .""" | document = doc_database [ doc_idx ]
# Account for [ CLS ] , [ SEP ] , [ SEP ]
max_num_tokens = max_seq_length - 3
# We * usually * want to fill up the entire sequence since we are padding
# to ` max _ seq _ length ` anyways , so short sequences are generally wasted
# computation . However , we * sometimes *
# ( i . e . , short _ seq _ prob = = 0.1 = = 10 % of the time ) want to use shorter
# sequences to minimize the mismatch between pre - training and fine - tuning .
# The ` target _ seq _ length ` is just a rough target however , whereas
# ` max _ seq _ length ` is a hard limit .
target_seq_length = max_num_tokens
if random ( ) < short_seq_prob :
target_seq_length = randint ( 2 , max_num_tokens )
# We DON ' T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy . Instead , we split the input into
# segments " A " and " B " based on the actual " sentences " provided by the user
# input .
instances = [ ]
current_chunk = [ ]
current_length = 0
i = 0
while i < len ( document ) :
segment = document [ i ]
current_chunk . append ( segment )
current_length += len ( segment )
if i == len ( document ) - 1 or current_length >= target_seq_length :
if current_chunk : # ` a _ end ` is how many segments from ` current _ chunk ` go into the ` A `
# ( first ) sentence .
a_end = 1
if len ( current_chunk ) >= 2 :
a_end = randrange ( 1 , len ( current_chunk ) )
tokens_a = [ ]
for j in range ( a_end ) :
tokens_a . extend ( current_chunk [ j ] )
tokens_b = [ ]
# Random next
if len ( current_chunk ) == 1 or random ( ) < 0.5 :
is_random_next = True
target_b_length = target_seq_length - len ( tokens_a )
# Sample a random document , with longer docs being sampled more frequently
random_document = doc_database . sample_doc ( current_idx = doc_idx , sentence_weighted = True )
random_start = randrange ( 0 , len ( random_document ) )
for j in range ( random_start , len ( random_document ) ) :
tokens_b . extend ( random_document [ j ] )
if len ( tokens_b ) >= target_b_length :
break
# We didn ' t actually use these segments so we " put them back " so
# they don ' t go to waste .
num_unused_segments = len ( current_chunk ) - a_end
i -= num_unused_segments
# Actual next
else :
is_random_next = False
for j in range ( a_end , len ( current_chunk ) ) :
tokens_b . extend ( current_chunk [ j ] )
truncate_seq_pair ( tokens_a , tokens_b , max_num_tokens )
assert len ( tokens_a ) >= 1
assert len ( tokens_b ) >= 1
tokens = [ "[CLS]" ] + tokens_a + [ "[SEP]" ] + tokens_b + [ "[SEP]" ]
# The segment IDs are 0 for the [ CLS ] token , the A tokens and the first [ SEP ]
# They are 1 for the B tokens and the final [ SEP ]
segment_ids = [ 0 for _ in range ( len ( tokens_a ) + 2 ) ] + [ 1 for _ in range ( len ( tokens_b ) + 1 ) ]
tokens , masked_lm_positions , masked_lm_labels = create_masked_lm_predictions ( tokens , masked_lm_prob , max_predictions_per_seq , vocab_list )
instance = { "tokens" : tokens , "segment_ids" : segment_ids , "is_random_next" : is_random_next , "masked_lm_positions" : masked_lm_positions , "masked_lm_labels" : masked_lm_labels }
instances . append ( instance )
current_chunk = [ ]
current_length = 0
i += 1
return instances |
def add_constants ( namespace , registry ) :
"""Adds the quantities from : mod : ` unyt . physical _ constants ` to a namespace
Parameters
namespace : dict
The dict to insert quantities into . The keys will be string names
and values will be the corresponding quantities .
registry : : class : ` unyt . unit _ registry . UnitRegistry `
The registry to create units from . Note that if you would like to
use a custom unit system , ensure your registry was created using
that unit system .
Example
> > > from unyt . unit _ registry import UnitRegistry
> > > class MyClass ( ) :
. . . def _ _ init _ _ ( self ) :
. . . self . reg = UnitRegistry ( unit _ system = ' cgs ' )
. . . add _ constants ( vars ( self ) , self . reg )
> > > foo = MyClass ( )
> > > foo . gravitational _ constant
unyt _ quantity ( 6.67384e - 08 , ' cm * * 3 / ( g * s * * 2 ) ' )
> > > foo . speed _ of _ light
unyt _ quantity ( 2.99792458e + 10 , ' cm / s ' )""" | from unyt . array import unyt_quantity
for constant_name in physical_constants :
value , unit_name , alternate_names = physical_constants [ constant_name ]
for name in alternate_names + [ constant_name ] :
quan = unyt_quantity ( value , unit_name , registry = registry )
try :
namespace [ name ] = quan . in_base ( unit_system = registry . unit_system )
except UnitsNotReducible :
namespace [ name ] = quan
namespace [ name + "_mks" ] = unyt_quantity ( value , unit_name , registry = registry )
try :
namespace [ name + "_cgs" ] = quan . in_cgs ( )
except UnitsNotReducible :
pass
if name == "h" : # backward compatibility for unyt 1.0 , which defined hmks
namespace [ "hmks" ] = namespace [ "h_mks" ] . copy ( )
namespace [ "hcgs" ] = namespace [ "h_cgs" ] . copy ( ) |
def gen_query ( self ) :
"""Generate an SQL query for the edge object .""" | return ( SQL . forwards_relation ( self . src , self . rel ) if self . dst is None else SQL . inverse_relation ( self . dst , self . rel ) ) |
def transform_form_error ( form , verbose = True ) :
"""transform form errors to list like
[ " field1 : error1 " , " field2 : error2 " ]""" | errors = [ ]
for field , err_msg in form . errors . items ( ) :
if field == '__all__' : # general errors
errors . append ( ', ' . join ( err_msg ) )
else : # field errors
field_name = field
if verbose and field in form . fields :
field_name = form . fields [ field ] . label or field
errors . append ( '%s: %s' % ( field_name , ', ' . join ( err_msg ) ) )
return errors |
def run ( self ) :
"""Process incoming HTTP connections .
Retrieves incoming connections from thread pool .""" | self . server . stats [ 'Worker Threads' ] [ self . getName ( ) ] = self . stats
try :
self . ready = True
while True :
conn = self . server . requests . get ( )
if conn is _SHUTDOWNREQUEST :
return
self . conn = conn
if self . server . stats [ 'Enabled' ] :
self . start_time = time . time ( )
try :
conn . communicate ( )
finally :
conn . close ( )
if self . server . stats [ 'Enabled' ] :
self . requests_seen += self . conn . requests_seen
self . bytes_read += self . conn . rfile . bytes_read
self . bytes_written += self . conn . wfile . bytes_written
self . work_time += time . time ( ) - self . start_time
self . start_time = None
self . conn = None
except ( KeyboardInterrupt , SystemExit ) as ex :
self . server . interrupt = ex |
def _send_packet ( self , data ) :
"Send to server ." | data = json . dumps ( data ) . encode ( 'utf-8' )
# Be sure that our socket is blocking , otherwise , the send ( ) call could
# raise ` BlockingIOError ` if the buffer is full .
self . socket . setblocking ( 1 )
self . socket . send ( data + b'\0' ) |
def last_modified ( self ) :
"""Gets the most recent modification time for all entries in the view""" | if self . entries :
latest = max ( self . entries , key = lambda x : x . last_modified )
return arrow . get ( latest . last_modified )
return arrow . get ( ) |
def reply_video ( self , video : str , quote : bool = None , caption : str = "" , parse_mode : str = "" , duration : int = 0 , width : int = 0 , height : int = 0 , thumb : str = None , supports_streaming : bool = True , disable_notification : bool = None , reply_to_message_id : int = None , reply_markup : Union [ "pyrogram.InlineKeyboardMarkup" , "pyrogram.ReplyKeyboardMarkup" , "pyrogram.ReplyKeyboardRemove" , "pyrogram.ForceReply" ] = None , progress : callable = None , progress_args : tuple = ( ) ) -> "Message" :
"""Bound method * reply _ video * of : obj : ` Message < pyrogram . Message > ` .
Use as a shortcut for :
. . code - block : : python
client . send _ video (
chat _ id = message . chat . id ,
video = video
Example :
. . code - block : : python
message . reply _ video ( video )
Args :
video ( ` ` str ` ` ) :
Video to send .
Pass a file _ id as string to send a video that exists on the Telegram servers ,
pass an HTTP URL as a string for Telegram to get a video from the Internet , or
pass a file path as string to upload a new video that exists on your local machine .
quote ( ` ` bool ` ` , * optional * ) :
If ` ` True ` ` , the message will be sent as a reply to this message .
If * reply _ to _ message _ id * is passed , this parameter will be ignored .
Defaults to ` ` True ` ` in group chats and ` ` False ` ` in private chats .
caption ( ` ` str ` ` , * optional * ) :
Video caption , 0-1024 characters .
parse _ mode ( ` ` str ` ` , * optional * ) :
Use : obj : ` MARKDOWN < pyrogram . ParseMode . MARKDOWN > ` or : obj : ` HTML < pyrogram . ParseMode . HTML > `
if you want Telegram apps to show bold , italic , fixed - width text or inline URLs in your caption .
Defaults to Markdown .
duration ( ` ` int ` ` , * optional * ) :
Duration of sent video in seconds .
width ( ` ` int ` ` , * optional * ) :
Video width .
height ( ` ` int ` ` , * optional * ) :
Video height .
thumb ( ` ` str ` ` , * optional * ) :
Thumbnail of the video sent .
The thumbnail should be in JPEG format and less than 200 KB in size .
A thumbnail ' s width and height should not exceed 90 pixels .
Thumbnails can ' t be reused and can be only uploaded as a new file .
supports _ streaming ( ` ` bool ` ` , * optional * ) :
Pass True , if the uploaded video is suitable for streaming .
disable _ notification ( ` ` bool ` ` , * optional * ) :
Sends the message silently .
Users will receive a notification with no sound .
reply _ to _ message _ id ( ` ` int ` ` , * optional * ) :
If the message is a reply , ID of the original message .
reply _ markup ( : obj : ` InlineKeyboardMarkup ` | : obj : ` ReplyKeyboardMarkup ` | : obj : ` ReplyKeyboardRemove ` | : obj : ` ForceReply ` , * optional * ) :
Additional interface options . An object for an inline keyboard , custom reply keyboard ,
instructions to remove reply keyboard or to force a reply from the user .
progress ( ` ` callable ` ` , * optional * ) :
Pass a callback function to view the upload progress .
The function must take * ( client , current , total , \ * args ) * as positional arguments ( look at the section
below for a detailed description ) .
progress _ args ( ` ` tuple ` ` , * optional * ) :
Extra custom arguments for the progress callback function . Useful , for example , if you want to pass
a chat _ id and a message _ id in order to edit a message with the updated progress .
Other Parameters :
client ( : obj : ` Client < pyrogram . Client > ` ) :
The Client itself , useful when you want to call other API methods inside the callback function .
current ( ` ` int ` ` ) :
The amount of bytes uploaded so far .
total ( ` ` int ` ` ) :
The size of the file .
* args ( ` ` tuple ` ` , * optional * ) :
Extra custom arguments as defined in the * progress _ args * parameter .
You can either keep * \ * args * or add every single extra argument in your function signature .
Returns :
On success , the sent : obj : ` Message < pyrogram . Message > ` is returned .
In case the upload is deliberately stopped with : meth : ` stop _ transmission ` , None is returned instead .
Raises :
: class : ` RPCError < pyrogram . RPCError > ` in case of a Telegram RPC error .""" | if quote is None :
quote = self . chat . type != "private"
if reply_to_message_id is None and quote :
reply_to_message_id = self . message_id
return self . _client . send_video ( chat_id = self . chat . id , video = video , caption = caption , parse_mode = parse_mode , duration = duration , width = width , height = height , thumb = thumb , supports_streaming = supports_streaming , disable_notification = disable_notification , reply_to_message_id = reply_to_message_id , reply_markup = reply_markup , progress = progress , progress_args = progress_args ) |
def mv_normal_cov_like ( x , mu , C ) :
r"""Multivariate normal log - likelihood parameterized by a covariance
matrix .
. . math : :
f ( x \ mid \ pi , C ) = \ frac { 1 } { ( 2 \ pi | C | ) ^ { 1/2 } } \ exp \ left \ { - \ frac { 1 } { 2 } ( x - \ mu ) ^ { \ prime } C ^ { - 1 } ( x - \ mu ) \ right \ }
: Parameters :
- ` x ` : ( n , k )
- ` mu ` : ( k ) Location parameter .
- ` C ` : ( k , k ) Positive definite covariance matrix .
. . seealso : : : func : ` mv _ normal _ like ` , : func : ` mv _ normal _ chol _ like `""" | # TODO : Vectorize in Fortran
if len ( np . shape ( x ) ) > 1 :
return np . sum ( [ flib . cov_mvnorm ( r , mu , C ) for r in x ] )
else :
return flib . cov_mvnorm ( x , mu , C ) |
def all_node_style ( self , ** kwargs ) :
'''Modifies all node styles''' | for node in self . nodes :
self . node_style ( node , ** kwargs ) |
def _getFirmwareVersion ( self , device ) :
"""Get the firmware version .
: Parameters :
device : ` int `
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol .
: Returns :
An integer indicating the version number .""" | cmd = self . _COMMAND . get ( 'get-fw-version' )
self . _writeData ( cmd , device )
try :
result = self . _serial . read ( size = 1 )
result = int ( result )
except serial . SerialException as e :
self . _log and self . _log . error ( "Error: %s" , e , exc_info = True )
raise e
except ValueError as e :
result = None
return result |
def check ( self , triggers , data_reader ) :
"""Look for a single detector trigger that passes the thresholds in
the current data .""" | if len ( triggers [ 'snr' ] ) == 0 :
return None
i = triggers [ 'snr' ] . argmax ( )
# This uses the pycbc live convention of chisq always meaning the
# reduced chisq .
rchisq = triggers [ 'chisq' ] [ i ]
nsnr = ranking . newsnr ( triggers [ 'snr' ] [ i ] , rchisq )
dur = triggers [ 'template_duration' ] [ i ]
if nsnr > self . newsnr_threshold and rchisq < self . reduced_chisq_threshold and dur > self . duration_threshold :
fake_coinc = { 'foreground/%s/%s' % ( self . ifo , k ) : triggers [ k ] [ i ] for k in triggers }
fake_coinc [ 'foreground/stat' ] = nsnr
fake_coinc [ 'foreground/ifar' ] = self . fixed_ifar
fake_coinc [ 'HWINJ' ] = data_reader . near_hwinj ( )
return fake_coinc
return None |
def UNas ( self , to = 'name_short' ) :
"""Return UN member states in the specified classification
Parameters
to : str , optional
Output classification ( valid str for an index of
country _ data file ) , default : name _ short
Returns
Pandas DataFrame""" | if isinstance ( to , str ) :
to = [ to ]
return self . data [ self . data . UNmember > 0 ] [ to ] |
def authAddress ( val ) :
"""# The C1 Tag
extracts the address of the authors as given by WOS . * * Warning * * the mapping of author to address is not very good and is given in multiple ways .
# Parameters
_ val _ : ` list [ str ] `
> The raw data from a WOS file
# Returns
` list [ str ] `
> A list of addresses""" | ret = [ ]
for a in val :
if a [ 0 ] == '[' :
ret . append ( '] ' . join ( a . split ( '] ' ) [ 1 : ] ) )
else :
ret . append ( a )
return ret |
def extract ( self , content , output ) :
"""Try to extract lines from the invoice""" | # First apply default options .
plugin_settings = DEFAULT_OPTIONS . copy ( )
plugin_settings . update ( self [ 'lines' ] )
self [ 'lines' ] = plugin_settings
# Validate settings
assert 'start' in self [ 'lines' ] , 'Lines start regex missing'
assert 'end' in self [ 'lines' ] , 'Lines end regex missing'
assert 'line' in self [ 'lines' ] , 'Line regex missing'
start = re . search ( self [ 'lines' ] [ 'start' ] , content )
end = re . search ( self [ 'lines' ] [ 'end' ] , content )
if not start or not end :
logger . warning ( 'no lines found - start %s, end %s' , start , end )
return
content = content [ start . end ( ) : end . start ( ) ]
lines = [ ]
current_row = { }
if 'first_line' not in self [ 'lines' ] and 'last_line' not in self [ 'lines' ] :
self [ 'lines' ] [ 'first_line' ] = self [ 'lines' ] [ 'line' ]
for line in re . split ( self [ 'lines' ] [ 'line_separator' ] , content ) : # if the line has empty lines in it , skip them
if not line . strip ( '' ) . strip ( '\n' ) or not line :
continue
if 'first_line' in self [ 'lines' ] :
match = re . search ( self [ 'lines' ] [ 'first_line' ] , line )
if match :
if 'last_line' not in self [ 'lines' ] :
if current_row :
lines . append ( current_row )
current_row = { }
if current_row :
lines . append ( current_row )
current_row = { field : value . strip ( ) if value else '' for field , value in match . groupdict ( ) . items ( ) }
continue
if 'last_line' in self [ 'lines' ] :
match = re . search ( self [ 'lines' ] [ 'last_line' ] , line )
if match :
for field , value in match . groupdict ( ) . items ( ) :
current_row [ field ] = '%s%s%s' % ( current_row . get ( field , '' ) , current_row . get ( field , '' ) and '\n' or '' , value . strip ( ) if value else '' , )
if current_row :
lines . append ( current_row )
current_row = { }
continue
match = re . search ( self [ 'lines' ] [ 'line' ] , line )
if match :
for field , value in match . groupdict ( ) . items ( ) :
current_row [ field ] = '%s%s%s' % ( current_row . get ( field , '' ) , current_row . get ( field , '' ) and '\n' or '' , value . strip ( ) if value else '' , )
continue
logger . debug ( 'ignoring *%s* because it doesn\'t match anything' , line )
if current_row :
lines . append ( current_row )
types = self [ 'lines' ] . get ( 'types' , [ ] )
for row in lines :
for name in row . keys ( ) :
if name in types :
row [ name ] = self . coerce_type ( row [ name ] , types [ name ] )
if lines :
output [ 'lines' ] = lines |
def bind ( self , * pos , ** kw ) :
"""Implements proxy connection for UDP sockets ,
which happens during the bind ( ) phase .""" | proxy_type , proxy_addr , proxy_port , rdns , username , password = self . proxy
if not proxy_type or self . type != socket . SOCK_DGRAM :
return _orig_socket . bind ( self , * pos , ** kw )
if self . _proxyconn :
raise socket . error ( EINVAL , "Socket already bound to an address" )
if proxy_type != SOCKS5 :
msg = "UDP only supported by SOCKS5 proxy type"
raise socket . error ( EOPNOTSUPP , msg )
super ( socksocket , self ) . bind ( * pos , ** kw )
# Need to specify actual local port because
# some relays drop packets if a port of zero is specified .
# Avoid specifying host address in case of NAT though .
_ , port = self . getsockname ( )
dst = ( "0" , port )
self . _proxyconn = _orig_socket ( )
proxy = self . _proxy_addr ( )
self . _proxyconn . connect ( proxy )
UDP_ASSOCIATE = b"\x03"
_ , relay = self . _SOCKS5_request ( self . _proxyconn , UDP_ASSOCIATE , dst )
# The relay is most likely on the same host as the SOCKS proxy ,
# but some proxies return a private IP address ( 10 . x . y . z )
host , _ = proxy
_ , port = relay
super ( socksocket , self ) . connect ( ( host , port ) )
super ( socksocket , self ) . settimeout ( self . _timeout )
self . proxy_sockname = ( "0.0.0.0" , 0 ) |
def _resolve_folder ( project , parent_folder , folder_name ) :
""": param project : The project that the folder belongs to
: type project : string
: param parent _ folder : Full path to the parent folder that contains
folder _ name
: type parent _ folder : string
: param folder _ name : Name of the folder
: type folder _ name : string
: returns : The path to folder _ name , if it exists , in the form of
" < parent _ folder > / < folder _ name > "
: rtype : string
: raises : ResolutionError if folder _ name is not a folder , or if
folder _ name points to a folder that does not exist
Attempts to resolve folder _ name at location parent _ folder in project .""" | if '/' in folder_name : # Then there ' s no way it ' s supposed to be a folder
raise ResolutionError ( 'Object of name ' + str ( folder_name ) + ' could not be resolved in folder ' + str ( parent_folder ) + ' of project ID ' + str ( project ) )
possible_folder , _skip = clean_folder_path ( parent_folder + '/' + folder_name , 'folder' )
if not check_folder_exists ( project , parent_folder , folder_name ) :
raise ResolutionError ( 'Unable to resolve "' + folder_name + '" to a data object or folder name in \'' + parent_folder + "'" )
return possible_folder |
def remove_node ( self , node ) :
"""Remove the node from the graph , removes also all connections .
: param androguard . decompiler . dad . node . Node node : the node to remove""" | preds = self . reverse_edges . get ( node , [ ] )
for pred in preds :
self . edges [ pred ] . remove ( node )
succs = self . edges . get ( node , [ ] )
for suc in succs :
self . reverse_edges [ suc ] . remove ( node )
exc_preds = self . reverse_catch_edges . pop ( node , [ ] )
for pred in exc_preds :
self . catch_edges [ pred ] . remove ( node )
exc_succs = self . catch_edges . pop ( node , [ ] )
for suc in exc_succs :
self . reverse_catch_edges [ suc ] . remove ( node )
self . nodes . remove ( node )
if node in self . rpo :
self . rpo . remove ( node )
del node |
def realpath ( path ) :
"""Create the real absolute path for the given path .
Add supports for userdir & / supports .
Args :
* path : pathname to use for realpath .
Returns :
Platform independent real absolute path .""" | if path == '~' :
return userdir
if path == '/' :
return sysroot
if path . startswith ( '/' ) :
return os . path . abspath ( path )
if path . startswith ( '~/' ) :
return os . path . expanduser ( path )
if path . startswith ( './' ) :
return os . path . abspath ( os . path . join ( os . path . curdir , path [ 2 : ] ) )
return os . path . abspath ( path ) |
def list_processed_parameter_group_histogram ( self , group = None , start = None , stop = None , merge_time = 20 ) :
"""Reads index records related to processed parameter groups between the
specified start and stop time .
Each iteration returns a chunk of chronologically - sorted records .
: param float merge _ time : Maximum gap in seconds before two consecutive index records are merged together .
: rtype : ~ collections . Iterable [ . IndexGroup ]""" | params = { }
if group is not None :
params [ 'group' ] = group
if start is not None :
params [ 'start' ] = to_isostring ( start )
if stop is not None :
params [ 'stop' ] = to_isostring ( stop )
if merge_time is not None :
params [ 'mergeTime' ] = int ( merge_time * 1000 )
return pagination . Iterator ( client = self . _client , path = '/archive/{}/parameter-index' . format ( self . _instance ) , params = params , response_class = archive_pb2 . IndexResponse , items_key = 'group' , item_mapper = IndexGroup , ) |
def state ( self , time = None ) :
"""The most recently - created info of type State at the specfied time .
If time is None then it returns the most recent state as of now .""" | if time is None :
return max ( self . infos ( type = State ) , key = attrgetter ( 'creation_time' ) )
else :
states = [ s for s in self . infos ( type = State ) if s . creation_time < time ]
return max ( states , key = attrgetter ( 'creation_time' ) ) |
def set_voltage ( self , value , channel = 1 ) :
"""channel : 1 = OP1 , 2 = OP2 , AUX is not supported""" | cmd = "V%d %f" % ( channel , value )
self . write ( cmd ) |
def hash_data ( obj ) :
"""Generate a SHA1 from a complex object .""" | collect = sha1 ( )
for text in bytes_iter ( obj ) :
if isinstance ( text , six . text_type ) :
text = text . encode ( 'utf-8' )
collect . update ( text )
return collect . hexdigest ( ) |
def load_config ( filename = None ) :
"""Load a configuration from a file or stdin .
If ` filename ` is ` None ` or " - " , then configuration gets read from stdin .
Returns : A ` ConfigDict ` .
Raises : ConfigError : If there is an error loading the config .""" | try :
with _config_stream ( filename ) as handle :
filename = handle . name
return deserialize_config ( handle . read ( ) )
except ( OSError , toml . TomlDecodeError , UnicodeDecodeError ) as exc :
raise ConfigError ( 'Error loading configuration from {}' . format ( filename ) ) from exc |
def default_decode ( events , mode = 'full' ) :
"""Decode a XigtCorpus element .""" | event , elem = next ( events )
root = elem
# store root for later instantiation
while ( event , elem . tag ) not in [ ( 'start' , 'igt' ) , ( 'end' , 'xigt-corpus' ) ] :
event , elem = next ( events )
igts = None
if event == 'start' and elem . tag == 'igt' :
igts = ( decode_igt ( e ) for e in iter_elements ( 'igt' , events , root , break_on = [ ( 'end' , 'xigt-corpus' ) ] ) )
xc = decode_xigtcorpus ( root , igts = igts , mode = mode )
return xc |
def apply_caching ( response ) :
"""Applies the configuration ' s http headers to all responses""" | for k , v in config . get ( 'HTTP_HEADERS' ) . items ( ) :
response . headers [ k ] = v
return response |
def list_sinks ( self , project , page_size = 0 , page_token = None ) :
"""List sinks for the project associated with this client .
: type project : str
: param project : ID of the project whose sinks are to be listed .
: type page _ size : int
: param page _ size : maximum number of sinks to return , If not passed ,
defaults to a value set by the API .
: type page _ token : str
: param page _ token : opaque marker for the next " page " of sinks . If not
passed , the API will return the first page of
sinks .
: rtype : tuple , ( list , str )
: returns : list of mappings , plus a " next page token " string :
if not None , indicates that more sinks can be retrieved
with another call ( pass that value as ` ` page _ token ` ` ) .""" | path = "projects/%s" % ( project , )
page_iter = self . _gapic_api . list_sinks ( path , page_size = page_size )
page_iter . client = self . _client
page_iter . next_page_token = page_token
page_iter . item_to_value = _item_to_sink
return page_iter |
def get_map_matrix ( inputfile , sheet_name ) :
"""Return the matrix representation of the genetic map .
: arg inputfile : the path to the input file from which to retrieve the
genetic map .
: arg sheet _ name : the excel sheet containing the data on which to
retrieve the genetic map .""" | matrix = read_excel_file ( inputfile , sheet_name )
output = [ [ 'Locus' , 'Group' , 'Position' ] ]
for row in matrix :
if row [ 0 ] and not re . match ( r'c\d+\.loc[\d\.]+' , row [ 0 ] ) :
output . append ( [ row [ 0 ] , row [ 1 ] , row [ 2 ] ] )
return output |
def pad_to_multiple ( self , factor ) :
"""Pad the pianoroll with zeros at the end along the time axis with the
minimum length that makes the resulting pianoroll length a multiple of
` factor ` .
Parameters
factor : int
The value which the length of the resulting pianoroll will be
a multiple of .""" | remainder = self . pianoroll . shape [ 0 ] % factor
if remainder :
pad_width = ( ( 0 , ( factor - remainder ) ) , ( 0 , 0 ) )
self . pianoroll = np . pad ( self . pianoroll , pad_width , 'constant' ) |
def execute_epoch ( self , epoch_info , learner ) :
"""Prepare the phase for learning""" | for param_group in epoch_info . optimizer . param_groups :
param_group [ 'lr' ] = self . lr
epoch_result = learner . run_epoch ( epoch_info , self . _source )
return epoch_result |
def exec_command ( self , command , sudo = False , ** kwargs ) :
"""Wrapper to paramiko . SSHClient . exec _ command""" | channel = self . client . get_transport ( ) . open_session ( )
# stdin = channel . makefile ( ' wb ' )
stdout = channel . makefile ( 'rb' )
stderr = channel . makefile_stderr ( 'rb' )
if sudo :
command = 'sudo -S bash -c \'%s\'' % command
else :
command = 'bash -c \'%s\'' % command
logger . debug ( "Running command %s on '%s'" , command , self . host )
channel . exec_command ( command , ** kwargs )
while not ( channel . recv_ready ( ) or channel . closed or channel . exit_status_ready ( ) ) :
time . sleep ( .2 )
ret = { 'stdout' : stdout . read ( ) . strip ( ) , 'stderr' : stderr . read ( ) . strip ( ) , 'exit_code' : channel . recv_exit_status ( ) }
return ret |
def gauss_warp_arb ( X , l1 , l2 , lw , x0 ) :
r"""Warps the ` X ` coordinate with a Gaussian - shaped divot .
. . math : :
l = l _ 1 - ( l _ 1 - l _ 2 ) \ exp \ left ( - 4 \ ln 2 \ frac { ( X - x _ 0 ) ^ 2 } { l _ { w } ^ { 2 } } \ right )
Parameters
X : : py : class : ` Array ` , ( ` M ` , ) or scalar float
` M ` locations to evaluate length scale at .
l1 : positive float
Global value of the length scale .
l2 : positive float
Pedestal value of the length scale .
lw : positive float
Width of the dip .
x0 : float
Location of the center of the dip in length scale .
Returns
l : : py : class : ` Array ` , ( ` M ` , ) or scalar float
The value of the length scale at the specified point .""" | if isinstance ( X , scipy . ndarray ) :
if isinstance ( X , scipy . matrix ) :
X = scipy . asarray ( X , dtype = float )
return l1 - ( l1 - l2 ) * scipy . exp ( - 4.0 * scipy . log ( 2.0 ) * ( X - x0 ) ** 2.0 / ( lw ** 2.0 ) )
else :
return l1 - ( l1 - l2 ) * mpmath . exp ( - 4.0 * mpmath . log ( 2.0 ) * ( X - x0 ) ** 2.0 / ( lw ** 2.0 ) ) |
def vertically ( value , num_blocks , val_min , color , args ) :
"""Prepare the vertical graph .
The whole graph is printed through the print _ vertical function .""" | global maxi , value_list
value_list . append ( str ( value ) )
# In case the number of blocks at the end of the normalization is less
# than the default number , use the maxi variable to escape .
if maxi < num_blocks :
maxi = num_blocks
if num_blocks > 0 :
vertical_list . append ( ( TICK * num_blocks ) )
else :
vertical_list . append ( SM_TICK )
# Zip _ longest method in order to turn them vertically .
for row in zip_longest ( * vertical_list , fillvalue = ' ' ) :
zipped_list . append ( row )
counter , result_list = 0 , [ ]
# Combined with the maxi variable , escapes the appending method at
# the correct point or the default one ( width ) .
for i in reversed ( zipped_list ) :
result_list . append ( i )
counter += 1
if maxi == args [ 'width' ] :
if counter == ( args [ 'width' ] ) :
break
else :
if counter == maxi :
break
# Return a list of rows which will be used to print the result vertically .
return result_list |
def read_cert_from_file ( cert_file , cert_type ) :
"""Reads a certificate from a file . The assumption is that there is
only one certificate in the file
: param cert _ file : The name of the file
: param cert _ type : The certificate type
: return : A base64 encoded certificate as a string or the empty string""" | if not cert_file :
return ''
if cert_type == 'pem' :
_a = read_file ( cert_file , 'rb' ) . decode ( )
_b = _a . replace ( '\r\n' , '\n' )
lines = _b . split ( '\n' )
for pattern in ( '-----BEGIN CERTIFICATE-----' , '-----BEGIN PUBLIC KEY-----' ) :
if pattern in lines :
lines = lines [ lines . index ( pattern ) + 1 : ]
break
else :
raise CertificateError ( 'Strange beginning of PEM file' )
for pattern in ( '-----END CERTIFICATE-----' , '-----END PUBLIC KEY-----' ) :
if pattern in lines :
lines = lines [ : lines . index ( pattern ) ]
break
else :
raise CertificateError ( 'Strange end of PEM file' )
return make_str ( '' . join ( lines ) . encode ( ) )
if cert_type in [ 'der' , 'cer' , 'crt' ] :
data = read_file ( cert_file , 'rb' )
_cert = base64 . b64encode ( data )
return make_str ( _cert ) |
def stop ( self , * args ) :
"""Stops the TendrilManager . Requires cooperation from the
listener implementation , which must watch the ` ` running ` `
attribute and ensure that it stops accepting connections
should that attribute become False . Note that some tendril
managers will not exit from the listening thread until all
connections have been closed .""" | # Remove ourself from the dictionary of running managers
try :
del self . _running_managers [ self . _manager_key ]
except KeyError :
pass
self . running = False
self . _local_addr = None
self . _local_addr_event . clear ( ) |
def new_process_number ( self , name ) :
"""Increment the counter for the process id number for a given consumer
configuration .
: param str name : Consumer name
: rtype : int""" | self . consumers [ name ] . last_proc_num += 1
return self . consumers [ name ] . last_proc_num |
def cli ( ctx , dname , site ) :
"""Launches a MySQL CLI session for the database of the specified IPS installation .""" | assert isinstance ( ctx , Context )
log = logging . getLogger ( 'ipsv.mysql' )
dname = domain_parse ( dname ) . hostname
domain = Session . query ( Domain ) . filter ( Domain . name == dname ) . first ( )
# No such domain
if not domain :
click . secho ( 'No such domain: {dn}' . format ( dn = dname ) , fg = 'red' , bold = True , err = True )
return
site_name = site
site = Site . get ( domain , site_name )
# No such site
if not site :
click . secho ( 'No such site: {site}' . format ( site = site_name ) , fg = 'red' , bold = True , err = True )
return
# Connect to the MySQL database and exit
log . info ( 'Connecting to MySQL database: {db}' . format ( db = site . db_name ) )
log . debug ( 'MySQL host: {host}' . format ( host = site . db_host ) )
log . debug ( 'MySQL username: {user}' . format ( user = site . db_user ) )
log . debug ( 'MySQL password: {pwd}' . format ( pwd = site . db_pass ) )
os . execl ( '/usr/bin/mysql' , '/usr/bin/mysql' , '--database={db}' . format ( db = site . db_name ) , '--user={user}' . format ( user = site . db_user ) , '--password={pwd}' . format ( pwd = site . db_pass ) ) |
def assert_is_not ( self , actual_val , unexpected_type , failure_message = 'Expected type not to be "{1}," but was "{0}"' ) :
"""Calls smart _ assert , but creates its own assertion closure using
the expected and provided values with the ' is not ' operator""" | assertion = lambda : unexpected_type is not actual_val
self . webdriver_assert ( assertion , unicode ( failure_message ) . format ( actual_val , unexpected_type ) ) |
def IsPrimitiveType ( obj ) :
"""See if the passed in type is a Primitive Type""" | return ( isinstance ( obj , types . bool ) or isinstance ( obj , types . byte ) or isinstance ( obj , types . short ) or isinstance ( obj , six . integer_types ) or isinstance ( obj , types . double ) or isinstance ( obj , types . float ) or isinstance ( obj , six . string_types ) or isinstance ( obj , types . PropertyPath ) or isinstance ( obj , types . ManagedMethod ) or isinstance ( obj , types . datetime ) or isinstance ( obj , types . URI ) or isinstance ( obj , type ) ) |
def _on_del_route ( self , msg ) :
"""Stub : data : ` DEL _ ROUTE ` handler ; fires ' disconnect ' events on the
corresponding : attr : ` _ context _ by _ id ` member . This is replaced by
: class : ` mitogen . parent . RouteMonitor ` in an upgraded context .""" | LOG . error ( '%r._on_del_route() %r' , self , msg )
if msg . is_dead :
return
target_id_s , _ , name = bytes_partition ( msg . data , b ( ':' ) )
target_id = int ( target_id_s , 10 )
context = self . _context_by_id . get ( target_id )
if context :
fire ( context , 'disconnect' )
else :
LOG . debug ( 'DEL_ROUTE for unknown ID %r: %r' , target_id , msg ) |
def get_template_node_arguments ( cls , tokens ) :
"""Return the arguments taken from the templatetag that will be used to the
Node class .
Take a list of all tokens and return a list of real tokens . Here
should be done some validations ( number of tokens . . . ) and eventually
some parsing . . .""" | if len ( tokens ) < 3 :
raise template . TemplateSyntaxError ( "'%r' tag requires at least 2 arguments." % tokens [ 0 ] )
return tokens [ 1 ] , tokens [ 2 ] , tokens [ 3 : ] |
def write ( self , out ) :
"""Used in constructing an outgoing packet""" | out . write_string ( self . text , len ( self . text ) ) |
def add_transitions_to_closest_sibling_state_from_selected_state ( ) :
"""Generates the outcome transitions from outcomes with positive outcome _ id to the closest next state
: return :""" | task_string = "create transition"
sub_task_string = "to closest sibling state"
selected_state_m , msg = get_selected_single_state_model_and_check_for_its_parent ( )
if selected_state_m is None :
logger . warning ( "Can not {0} {1}: {2}" . format ( task_string , sub_task_string , msg ) )
return
logger . debug ( "Check to {0} {1} ..." . format ( task_string , sub_task_string ) )
state = selected_state_m . state
parent_state = state . parent
# find closest other state to connect to - > to _ state
closest_sibling_state_tuple = gui_helper_meta_data . get_closest_sibling_state ( selected_state_m , 'outcome' )
if closest_sibling_state_tuple is None :
logger . info ( "Can not {0} {1}: There is no other sibling state." . format ( task_string , sub_task_string ) )
return
distance , sibling_state_m = closest_sibling_state_tuple
to_state = sibling_state_m . state
# find all possible from outcomes
from_outcomes = get_all_outcomes_except_of_abort_and_preempt ( state )
from_oc_not_connected = [ oc for oc in from_outcomes if not state . parent . get_transition_for_outcome ( state , oc ) ]
# all ports not connected connect to next state income
if from_oc_not_connected :
logger . debug ( "Create transition {0} ..." . format ( sub_task_string ) )
for from_outcome in from_oc_not_connected :
parent_state . add_transition ( state . state_id , from_outcome . outcome_id , to_state . state_id , None )
# no transitions are removed if not all connected to the same other state
else :
target = remove_transitions_if_target_is_the_same ( from_outcomes )
if target :
target_state_id , _ = target
if not target_state_id == to_state . state_id :
logger . info ( "Removed transitions from outcomes {0} " "because all point to the same target." . format ( sub_task_string . replace ( 'closest ' , '' ) ) )
add_transitions_to_closest_sibling_state_from_selected_state ( )
else :
logger . info ( "Removed transitions from outcomes {0} " "because all point to the same target." . format ( sub_task_string ) )
return True
logger . info ( "Will not {0} {1}: Not clear situation of connected transitions." "There will be no transitions to other states be touched." . format ( task_string , sub_task_string ) )
return True |
def wide ( self ) :
"""` ` True ` ` if this instruction needs to be prefixed by the WIDE
opcode .""" | if not opcode_table [ self . opcode ] . get ( 'can_be_wide' ) :
return False
if self . operands [ 0 ] . value >= 255 :
return True
if self . opcode == 0x84 :
if self . operands [ 1 ] . value >= 255 :
return True
return False |
def build ( self , field : Field ) -> Mapping [ str , Any ] :
"""Build a parameter .""" | return dict ( self . iter_parsed_values ( field ) ) |
def get_airports ( self , country ) :
"""Returns a list of all the airports
For a given country this returns a list of dicts , one for each airport , with information like the iata code of the airport etc
Args :
country ( str ) : The country for which the airports will be fetched
Example : :
from pyflightdata import FlightData
f = FlightData ( )
f . get _ airports ( ' India ' )""" | url = AIRPORT_BASE . format ( country . replace ( " " , "-" ) )
return self . _fr24 . get_airports_data ( url ) |
def time ( arg ) :
"""Converts the value into a unix time ( seconds since unix epoch ) .
For example :
convert . time ( datetime . now ( ) )
# ' 1409810596'
: param arg : The time .
: type arg : datetime . datetime or int""" | # handle datetime instances .
if _has_method ( arg , "timetuple" ) :
arg = _time . mktime ( arg . timetuple ( ) )
if isinstance ( arg , float ) :
arg = int ( arg )
return str ( arg ) |
def get_bug_log ( nr ) :
"""Get Buglogs .
A buglog is a dictionary with the following mappings :
* " header " = > string
* " body " = > string
* " attachments " = > list
* " msg _ num " = > int
* " message " = > email . message . Message
Parameters
nr : int
the bugnumber
Returns
buglogs : list of dicts""" | reply = _soap_client_call ( 'get_bug_log' , nr )
items_el = reply ( 'soapenc:Array' )
buglogs = [ ]
for buglog_el in items_el . children ( ) :
buglog = { }
buglog [ "header" ] = _parse_string_el ( buglog_el ( "header" ) )
buglog [ "body" ] = _parse_string_el ( buglog_el ( "body" ) )
buglog [ "msg_num" ] = int ( buglog_el ( "msg_num" ) )
# server always returns an empty attachments array ?
buglog [ "attachments" ] = [ ]
mail_parser = email . feedparser . FeedParser ( )
mail_parser . feed ( buglog [ "header" ] )
mail_parser . feed ( "\n\n" )
mail_parser . feed ( buglog [ "body" ] )
buglog [ "message" ] = mail_parser . close ( )
buglogs . append ( buglog )
return buglogs |
def compose ( im , y , fns ) :
"""Apply a collection of transformation functions : fns : to images""" | for fn in fns : # pdb . set _ trace ( )
im , y = fn ( im , y )
return im if y is None else ( im , y ) |
def connect ( self ) :
"""Sets up your Phabricator session , it ' s not necessary to call
this directly""" | if self . token :
self . phab_session = { 'token' : self . token }
return
req = self . req_session . post ( '%s/api/conduit.connect' % self . host , data = { 'params' : json . dumps ( self . connect_params ) , 'output' : 'json' , '__conduit__' : True , } )
# Parse out the response ( error handling ommitted )
result = req . json ( ) [ 'result' ]
self . phab_session = { 'sessionKey' : result [ 'sessionKey' ] , 'connectionID' : result [ 'connectionID' ] , } |
def toList ( value ) :
"""Convert a value to a list , if possible .""" | if type ( value ) == list :
return value
elif type ( value ) in [ np . ndarray , tuple , xrange , array . array ] :
return list ( value )
elif isinstance ( value , Vector ) :
return list ( value . toArray ( ) )
else :
raise TypeError ( "Could not convert %s to list" % value ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.