signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def prune_to_subset ( self , subset , inplace = False ) :
"""Prunes the Tree to just the taxon set given in ` subset `""" | if not subset . issubset ( self . labels ) :
print ( '"subset" is not a subset' )
return
if not inplace :
t = self . copy ( )
else :
t = self
t . _tree . retain_taxa_with_labels ( subset )
t . _tree . encode_bipartitions ( )
t . _dirty = True
return t |
def is_session_storage_enabled ( self , subject = None ) :
"""Returns ` ` True ` ` if session storage is generally available ( as determined
by the super class ' s global configuration property is _ session _ storage _ enabled
and no request - specific override has turned off session storage , False
otherwise .
This means session storage is disabled if the is _ session _ storage _ enabled
property is False or if a request attribute is discovered that turns off
session storage for the current request .
: param subject : the ` ` Subject ` ` for which session state persistence may
be enabled
: returns : ` ` True ` ` if session storage is generally available ( as
determined by the super class ' s global configuration property
is _ session _ storage _ enabled and no request - specific override has
turned off session storage , False otherwise .""" | if subject . get_session ( False ) : # then use what already exists
return True
if not self . session_storage_enabled : # honor global setting :
return False
# non - web subject instances can ' t be saved to web - only session managers :
if ( not hasattr ( subject , 'web_registry' ) and self . session_manager and not isinstance ( self . session_manager , session_abcs . NativeSessionManager ) ) :
return False
return subject . web_registry . session_creation_enabled |
def _paint_icon ( self , iconic , painter , rect , mode , state , options ) :
"""Paint a single icon .""" | painter . save ( )
color = options [ 'color' ]
char = options [ 'char' ]
color_options = { QIcon . On : { QIcon . Normal : ( options [ 'color_on' ] , options [ 'on' ] ) , QIcon . Disabled : ( options [ 'color_on_disabled' ] , options [ 'on_disabled' ] ) , QIcon . Active : ( options [ 'color_on_active' ] , options [ 'on_active' ] ) , QIcon . Selected : ( options [ 'color_on_selected' ] , options [ 'on_selected' ] ) } , QIcon . Off : { QIcon . Normal : ( options [ 'color_off' ] , options [ 'off' ] ) , QIcon . Disabled : ( options [ 'color_off_disabled' ] , options [ 'off_disabled' ] ) , QIcon . Active : ( options [ 'color_off_active' ] , options [ 'off_active' ] ) , QIcon . Selected : ( options [ 'color_off_selected' ] , options [ 'off_selected' ] ) } }
color , char = color_options [ state ] [ mode ]
painter . setPen ( QColor ( color ) )
# A 16 pixel - high icon yields a font size of 14 , which is pixel perfect
# for font - awesome . 16 * 0.875 = 14
# The reason why the glyph size is smaller than the icon size is to
# account for font bearing .
draw_size = 0.875 * round ( rect . height ( ) * options [ 'scale_factor' ] )
prefix = options [ 'prefix' ]
# Animation setup hook
animation = options . get ( 'animation' )
if animation is not None :
animation . setup ( self , painter , rect )
painter . setFont ( iconic . font ( prefix , draw_size ) )
if 'offset' in options :
rect = QRect ( rect )
rect . translate ( options [ 'offset' ] [ 0 ] * rect . width ( ) , options [ 'offset' ] [ 1 ] * rect . height ( ) )
painter . setOpacity ( options . get ( 'opacity' , 1.0 ) )
painter . drawText ( rect , Qt . AlignCenter | Qt . AlignVCenter , char )
painter . restore ( ) |
def loop_until_timeout_or_not_none ( timeout_s , function , sleep_s = 1 ) : # pylint : disable = invalid - name
"""Loops until the specified function returns non - None or until a timeout .
Args :
timeout _ s : The number of seconds to wait until a timeout condition is
reached . As a convenience , this accepts None to mean never timeout . Can
also be passed a PolledTimeout object instead of an integer .
function : The function to call each iteration .
sleep _ s : The number of seconds to wait after calling the function .
Returns :
Whatever the function returned last .""" | return loop_until_timeout_or_valid ( timeout_s , function , lambda x : x is not None , sleep_s ) |
def validate ( self , request , response ) :
"""refreshes a resource when a validation response is received
: param request :
: param response :
: return :""" | element = self . search_response ( request )
if element is not None :
element . cached_response . options = response . options
element . freshness = True
element . max_age = response . max_age
element . creation_time = time . time ( )
element . uri = request . proxy_uri |
def install ( ) :
"""Function executed when running the script with the - install switch""" | # Create Spyder start menu folder
# Don ' t use CSIDL _ COMMON _ PROGRAMS because it requres admin rights
# This is consistent with use of CSIDL _ DESKTOPDIRECTORY below
# CSIDL _ COMMON _ PROGRAMS =
# C : \ ProgramData \ Microsoft \ Windows \ Start Menu \ Programs
# CSIDL _ PROGRAMS =
# C : \ Users \ < username > \ AppData \ Roaming \ Microsoft \ Windows \ Start Menu \ Programs
start_menu = osp . join ( get_special_folder_path ( 'CSIDL_PROGRAMS' ) , 'Spyder (Py%i.%i %i bit)' % ( sys . version_info [ 0 ] , sys . version_info [ 1 ] , struct . calcsize ( 'P' ) * 8 ) )
if not osp . isdir ( start_menu ) :
os . mkdir ( start_menu )
directory_created ( start_menu )
# Create Spyder start menu entries
python = osp . abspath ( osp . join ( sys . prefix , 'python.exe' ) )
pythonw = osp . abspath ( osp . join ( sys . prefix , 'pythonw.exe' ) )
script = osp . abspath ( osp . join ( sys . prefix , 'scripts' , 'spyder' ) )
if not osp . exists ( script ) : # if not installed to the site scripts dir
script = osp . abspath ( osp . join ( osp . dirname ( osp . abspath ( __file__ ) ) , 'spyder' ) )
workdir = "%HOMEDRIVE%%HOMEPATH%"
import distutils . sysconfig
lib_dir = distutils . sysconfig . get_python_lib ( plat_specific = 1 )
ico_dir = osp . join ( lib_dir , 'spyder' , 'windows' )
# if user is running - install manually then icons are in Scripts /
if not osp . isdir ( ico_dir ) :
ico_dir = osp . dirname ( osp . abspath ( __file__ ) )
desc = 'The Scientific Python Development Environment'
fname = osp . join ( start_menu , 'Spyder (full).lnk' )
create_shortcut ( python , desc , fname , '"%s"' % script , workdir , osp . join ( ico_dir , 'spyder.ico' ) )
file_created ( fname )
fname = osp . join ( start_menu , 'Spyder-Reset all settings.lnk' )
create_shortcut ( python , 'Reset Spyder settings to defaults' , fname , '"%s" --reset' % script , workdir )
file_created ( fname )
current = True
# only affects current user
root = winreg . HKEY_CURRENT_USER if current else winreg . HKEY_LOCAL_MACHINE
winreg . SetValueEx ( winreg . CreateKey ( root , KEY_C1 % ( "" , EWS ) ) , "" , 0 , winreg . REG_SZ , '"%s" "%s\Scripts\spyder" "%%1"' % ( pythonw , sys . prefix ) )
winreg . SetValueEx ( winreg . CreateKey ( root , KEY_C1 % ( "NoCon" , EWS ) ) , "" , 0 , winreg . REG_SZ , '"%s" "%s\Scripts\spyder" "%%1"' % ( pythonw , sys . prefix ) )
# Create desktop shortcut file
desktop_folder = get_special_folder_path ( "CSIDL_DESKTOPDIRECTORY" )
fname = osp . join ( desktop_folder , 'Spyder.lnk' )
desc = 'The Scientific Python Development Environment'
create_shortcut ( pythonw , desc , fname , '"%s"' % script , workdir , osp . join ( ico_dir , 'spyder.ico' ) )
file_created ( fname ) |
def available_readers ( as_dict = False ) :
"""Available readers based on current configuration .
Args :
as _ dict ( bool ) : Optionally return reader information as a dictionary .
Default : False
Returns : List of available reader names . If ` as _ dict ` is ` True ` then
a list of dictionaries including additionally reader information
is returned .""" | readers = [ ]
for reader_configs in configs_for_reader ( ) :
try :
reader_info = read_reader_config ( reader_configs )
except ( KeyError , IOError , yaml . YAMLError ) :
LOG . warning ( "Could not import reader config from: %s" , reader_configs )
LOG . debug ( "Error loading YAML" , exc_info = True )
continue
readers . append ( reader_info if as_dict else reader_info [ 'name' ] )
return readers |
def download ( directory , filename ) :
"""Download ( and unzip ) a file from the MNIST dataset if not already done .""" | filepath = os . path . join ( directory , filename )
if tf . gfile . Exists ( filepath ) :
return filepath
if not tf . gfile . Exists ( directory ) :
tf . gfile . MakeDirs ( directory )
url = 'http://yann.lecun.com/exdb/mnist/' + filename + '.gz'
_ , zipped_filepath = tempfile . mkstemp ( suffix = '.gz' )
print ( 'Downloading %s to %s' % ( url , zipped_filepath ) )
urllib . request . urlretrieve ( url , zipped_filepath )
with gzip . open ( zipped_filepath , 'rb' ) as f_in , tf . gfile . Open ( filepath , 'wb' ) as f_out :
shutil . copyfileobj ( f_in , f_out )
os . remove ( zipped_filepath )
return filepath |
def is_optional ( self ) :
"""Returns whether the parameter is optional or required
: return : Return True if optional , False if required""" | if ( ( 'optional' in self . attributes and bool ( self . attributes [ 'optional' ] . strip ( ) ) ) and ( 'minValue' in self . attributes and self . attributes [ 'minValue' ] == 0 ) ) :
return True
else :
return False |
def _GenerateChunk ( self , length ) :
"""Generates data for a single chunk .""" | while 1 :
to_read = min ( length , self . RECV_BLOCK_SIZE )
if to_read == 0 :
return
data = self . rfile . read ( to_read )
if not data :
return
yield data
length -= len ( data ) |
def functions_shadowed ( self ) :
'''Return the list of functions shadowed
Returns :
list ( core . Function )''' | candidates = [ c . functions_not_inherited for c in self . contract . inheritance ]
candidates = [ candidate for sublist in candidates for candidate in sublist ]
return [ f for f in candidates if f . full_name == self . full_name ] |
def _idle_register_view ( self , view ) :
"""Internal method that calls register _ view""" | assert ( self . view is None )
self . view = view
if self . handlers == "class" :
for name in dir ( self ) :
when , _ , what = partition ( name , '_' )
widget , _ , signal = partition ( what , '__' )
if when == "on" :
try :
view [ widget ] . connect ( signal , getattr ( self , name ) )
except IndexError : # Not a handler
pass
except KeyError :
logger . warn ( "Widget not found for handler: %s" , name )
elif self . handlers == "glade" :
self . __autoconnect_signals ( )
else :
raise NotImplementedError ( "%s is not a valid source of signal " "connections" % self . handlers )
self . register_view ( view )
self . register_adapters ( )
if self . __auto_adapt :
self . adapt ( )
return False |
def _recon_lcs ( x , y ) :
"""Returns the Longest Subsequence between x and y .
Source : http : / / www . algorithmist . com / index . php / Longest _ Common _ Subsequence
: param x : sequence of words
: param y : sequence of words
: returns sequence : LCS of x and y""" | table = _lcs ( x , y )
def _recon ( i , j ) :
if i == 0 or j == 0 :
return [ ]
elif x [ i - 1 ] == y [ j - 1 ] :
return _recon ( i - 1 , j - 1 ) + [ ( x [ i - 1 ] , i ) ]
elif table [ i - 1 , j ] > table [ i , j - 1 ] :
return _recon ( i - 1 , j )
else :
return _recon ( i , j - 1 )
i , j = _get_index_of_lcs ( x , y )
recon_tuple = tuple ( map ( lambda r : r [ 0 ] , _recon ( i , j ) ) )
return recon_tuple |
def integrate_predefined ( rhs , jac , y0 , xout , atol , rtol , dx0 = 0.0 , dx_max = 0.0 , check_callable = False , check_indexing = False , ** kwargs ) :
"""Integrates a system of ordinary differential equations .
Parameters
rhs : callable
Function with signature f ( t , y , fout ) which modifies fout * inplace * .
jac : callable
Function with signature j ( t , y , jmat _ out , dfdx _ out ) which modifies
jmat _ out and dfdx _ out * inplace * .
y0 : array _ like
Initial values of the dependent variables .
xout : array _ like
Values of the independent variable .
atol : float
Absolute tolerance .
rtol : float
Relative tolerance .
dx0 : float
Initial step - size .
dx _ max : float
Maximum step - size .
check _ callable : bool ( default : False )
Perform signature sanity checks on ` ` rhs ` ` and ` ` jac ` ` .
check _ indexing : bool ( default : False )
Perform item setting sanity checks on ` ` rhs ` ` and ` ` jac ` ` .
\*\*kwargs:
' method ' : str
One in ` ` ( ' rosenbrock4 ' , ' dopri5 ' , ' bs ' ) ` ` .
' return _ on _ error ' : bool
Returns on error without raising an excpetion ( with ` ` ' success ' = = False ` ` ) .
' autorestart ' : int
Useful for autonomous systems where conditions change during integration .
Will restart the integration with ` ` x = = 0 ` ` .
' dx0cb ' : callable
Callback for calculating dx0 ( make sure to pass ` ` dx0 = = 0.0 ` ` ) to enable .
Signature : ` ` f ( x , y [ : ] ) - > float ` ` .
Returns
( result , info ) :
result : 2 - dimensional array of the dependent variables ( axis 1 ) for
values corresponding to xout ( axis 0)
info : dictionary with information about the integration""" | # Sanity checks to reduce risk of having a segfault :
jac = _ensure_5args ( jac )
if check_callable :
_check_callable ( rhs , jac , xout [ 0 ] , y0 )
if check_indexing :
_check_indexing ( rhs , jac , xout [ 0 ] , y0 )
return predefined ( rhs , jac , np . asarray ( y0 , dtype = np . float64 ) , np . asarray ( xout , dtype = np . float64 ) , atol , rtol , dx0 , dx_max , ** _bs ( kwargs ) ) |
def unpy2exe ( filename , python_version = None , output_dir = None ) :
"""Process input params and produce output pyc files .""" | if output_dir is None :
output_dir = '.'
elif not os . path . exists ( output_dir ) :
os . makedirs ( output_dir )
pe = pefile . PE ( filename )
is_py2exe = check_py2exe_file ( pe )
if not is_py2exe :
raise ValueError ( 'Not a py2exe executable.' )
code_objects = extract_code_objects ( pe )
for co in code_objects :
dump_to_pyc ( co , python_version , output_dir ) |
def get_matching_text_in_strs ( a , b , match_min_size = 30 , ignore = '' , end_characters = '' ) : # type : ( str , str , int , str , str ) - > List [ str ]
"""Returns a list of matching blocks of text in a and b
Args :
a ( str ) : First string to match
b ( str ) : Second string to match
match _ min _ size ( int ) : Minimum block size to match on . Defaults to 30.
ignore ( str ) : Any characters to ignore in matching . Defaults to ' ' .
end _ characters ( str ) : End characters to look for . Defaults to ' ' .
Returns :
List [ str ] : List of matching blocks of text""" | compare = difflib . SequenceMatcher ( lambda x : x in ignore )
compare . set_seqs ( a = a , b = b )
matching_text = list ( )
for match in compare . get_matching_blocks ( ) :
start = match . a
text = a [ start : start + match . size ]
if end_characters :
prev_text = text
while len ( text ) != 0 and text [ 0 ] in end_characters :
text = text [ 1 : ]
while len ( text ) != 0 and text [ - 1 ] not in end_characters :
text = text [ : - 1 ]
if len ( text ) == 0 :
text = prev_text
if len ( text ) >= match_min_size :
matching_text . append ( text )
return matching_text |
def compute_node_positions ( self ) :
"""Uses the get _ cartesian function to compute the positions of each node
in the Circos plot .""" | xs = [ ]
ys = [ ]
node_r = self . nodeprops [ "radius" ]
radius = circos_radius ( n_nodes = len ( self . graph . nodes ( ) ) , node_r = node_r )
self . plot_radius = radius
self . nodeprops [ "linewidth" ] = radius * 0.01
for node in self . nodes :
x , y = get_cartesian ( r = radius , theta = node_theta ( self . nodes , node ) )
xs . append ( x )
ys . append ( y )
self . node_coords = { "x" : xs , "y" : ys } |
def scale_and_crop ( im , size , crop = False , upscale = False , zoom = None , target = None , ** kwargs ) :
"""Handle scaling and cropping the source image .
Images can be scaled / cropped against a single dimension by using zero
as the placeholder in the size . For example , ` ` size = ( 100 , 0 ) ` ` will cause
the image to be resized to 100 pixels wide , keeping the aspect ratio of
the source image .
crop
Crop the source image height or width to exactly match the requested
thumbnail size ( the default is to proportionally resize the source
image to fit within the requested thumbnail size ) .
By default , the image is centered before being cropped . To crop from
the edges , pass a comma separated string containing the ` ` x ` ` and ` ` y ` `
percentage offsets ( negative values go from the right / bottom ) . Some
examples follow :
* ` ` crop = " 0,0 " ` ` will crop from the left and top edges .
* ` ` crop = " - 10 , - 0 " ` ` will crop from the right edge ( with a 10 % offset )
and the bottom edge .
* ` ` crop = " , 0 " ` ` will keep the default behavior for the x axis
( horizontally centering the image ) and crop from the top edge .
The image can also be " smart cropped " by using ` ` crop = " smart " ` ` . The
image is incrementally cropped down to the requested size by removing
slices from edges with the least entropy .
Finally , you can use ` ` crop = " scale " ` ` to simply scale the image so that
at least one dimension fits within the size dimensions given ( you may
want to use the upscale option too ) .
upscale
Allow upscaling of the source image during scaling .
zoom
A percentage to zoom in on the scaled image . For example , a zoom of
` ` 40 ` ` will clip 20 % off each side of the source image before
thumbnailing .
target
Set the focal point as a percentage for the image if it needs to be
cropped ( defaults to ` ` ( 50 , 50 ) ` ` ) .
For example , ` ` target = " 10,20 " ` ` will set the focal point as 10 % and 20%
from the left and top of the image , respectively . If the image needs to
be cropped , it will trim off the right and bottom edges until the focal
point is centered .
Can either be set as a two - item tuple such as ` ` ( 20 , 30 ) ` ` or a comma
separated string such as ` ` " 20,10 " ` ` .
A null value such as ` ` ( 20 , None ) ` ` or ` ` " , 60 " ` ` will default to 50 % .""" | source_x , source_y = [ float ( v ) for v in im . size ]
target_x , target_y = [ int ( v ) for v in size ]
if crop or not target_x or not target_y :
scale = max ( target_x / source_x , target_y / source_y )
else :
scale = min ( target_x / source_x , target_y / source_y )
# Handle one - dimensional targets .
if not target_x :
target_x = round ( source_x * scale )
elif not target_y :
target_y = round ( source_y * scale )
if zoom :
if not crop :
target_x = round ( source_x * scale )
target_y = round ( source_y * scale )
crop = True
scale *= ( 100 + int ( zoom ) ) / 100.0
if scale < 1.0 or ( scale > 1.0 and upscale ) : # Resize the image to the target size boundary . Round the scaled
# boundary sizes to avoid floating point errors .
im = im . resize ( ( int ( round ( source_x * scale ) ) , int ( round ( source_y * scale ) ) ) , resample = Image . ANTIALIAS )
if crop : # Use integer values now .
source_x , source_y = im . size
# Difference between new image size and requested size .
diff_x = int ( source_x - min ( source_x , target_x ) )
diff_y = int ( source_y - min ( source_y , target_y ) )
if crop != 'scale' and ( diff_x or diff_y ) :
if isinstance ( target , six . string_types ) :
target = re . match ( r'(\d+)?,(\d+)?$' , target )
if target :
target = target . groups ( )
if target :
focal_point = [ int ( n ) if ( n or n == 0 ) else 50 for n in target ]
else :
focal_point = 50 , 50
# Crop around the focal point
halftarget_x , halftarget_y = int ( target_x / 2 ) , int ( target_y / 2 )
focal_point_x = int ( source_x * focal_point [ 0 ] / 100 )
focal_point_y = int ( source_y * focal_point [ 1 ] / 100 )
box = [ max ( 0 , min ( source_x - target_x , focal_point_x - halftarget_x ) ) , max ( 0 , min ( source_y - target_y , focal_point_y - halftarget_y ) ) , ]
box . append ( int ( min ( source_x , box [ 0 ] + target_x ) ) )
box . append ( int ( min ( source_y , box [ 1 ] + target_y ) ) )
# See if an edge cropping argument was provided .
edge_crop = ( isinstance ( crop , six . string_types ) and re . match ( r'(?:(-?)(\d+))?,(?:(-?)(\d+))?$' , crop ) )
if edge_crop and filter ( None , edge_crop . groups ( ) ) :
x_right , x_crop , y_bottom , y_crop = edge_crop . groups ( )
if x_crop :
offset = min ( int ( target_x ) * int ( x_crop ) // 100 , diff_x )
if x_right :
box [ 0 ] = diff_x - offset
box [ 2 ] = source_x - offset
else :
box [ 0 ] = offset
box [ 2 ] = source_x - ( diff_x - offset )
if y_crop :
offset = min ( int ( target_y ) * int ( y_crop ) // 100 , diff_y )
if y_bottom :
box [ 1 ] = diff_y - offset
box [ 3 ] = source_y - offset
else :
box [ 1 ] = offset
box [ 3 ] = source_y - ( diff_y - offset )
# See if the image should be " smart cropped " .
elif crop == 'smart' :
left = top = 0
right , bottom = source_x , source_y
while diff_x :
slice = min ( diff_x , max ( diff_x // 5 , 10 ) )
start = im . crop ( ( left , 0 , left + slice , source_y ) )
end = im . crop ( ( right - slice , 0 , right , source_y ) )
add , remove = _compare_entropy ( start , end , slice , diff_x )
left += add
right -= remove
diff_x = diff_x - add - remove
while diff_y :
slice = min ( diff_y , max ( diff_y // 5 , 10 ) )
start = im . crop ( ( 0 , top , source_x , top + slice ) )
end = im . crop ( ( 0 , bottom - slice , source_x , bottom ) )
add , remove = _compare_entropy ( start , end , slice , diff_y )
top += add
bottom -= remove
diff_y = diff_y - add - remove
box = ( left , top , right , bottom )
# Finally , crop the image !
im = im . crop ( box )
return im |
def write_file ( file , b ) :
"""Write ` ` b ` ` to file ` ` file ` ` .
: arg file type : path - like or file - like object .
: arg bytes b : The content .""" | if hasattr ( file , "write_bytes" ) :
file . write_bytes ( b )
elif hasattr ( file , "write" ) :
file . write ( b )
else :
with open ( file , "wb" ) as f :
f . write ( b ) |
def send ( self , load , tries = None , timeout = None , raw = False ) : # pylint : disable = unused - argument
'''Emulate the channel send method , the tries and timeout are not used''' | if 'cmd' not in load :
log . error ( 'Malformed request, no cmd: %s' , load )
return { }
cmd = load [ 'cmd' ] . lstrip ( '_' )
if cmd in self . cmd_stub :
return self . cmd_stub [ cmd ]
if not hasattr ( self . fs , cmd ) :
log . error ( 'Malformed request, invalid cmd: %s' , load )
return { }
return getattr ( self . fs , cmd ) ( load ) |
def _add_new_route ( dcidr , router_ip , vpc_info , con , route_table_id ) :
"""Add a new route to the route table .""" | try :
instance , eni = find_instance_and_eni_by_ip ( vpc_info , router_ip )
# Only set the route if the RT is associated with any of the subnets
# used for the cluster .
rt_subnets = set ( vpc_info [ 'rt_subnet_lookup' ] . get ( route_table_id , [ ] ) )
cluster_node_subnets = set ( vpc_info [ 'cluster_node_subnets' ] )
if not rt_subnets or not rt_subnets . intersection ( cluster_node_subnets ) :
logging . debug ( "--- skipping adding route in RT '%s' " "%s -> %s (%s, %s) since RT's subnets (%s) are not " "part of the cluster (%s)." % ( route_table_id , dcidr , router_ip , instance . id , eni . id , ", " . join ( rt_subnets ) if rt_subnets else "none" , ", " . join ( cluster_node_subnets ) ) )
return
logging . info ( "--- adding route in RT '%s' " "%s -> %s (%s, %s)" % ( route_table_id , dcidr , router_ip , instance . id , eni . id ) )
con . create_route ( route_table_id = route_table_id , destination_cidr_block = dcidr , instance_id = instance . id , interface_id = eni . id )
CURRENT_STATE . routes [ dcidr ] = ( router_ip , str ( instance . id ) , str ( eni . id ) )
_rt_state_update ( route_table_id , dcidr , router_ip , instance . id , eni . id , msg = "Added route" )
except Exception as e :
logging . error ( "*** failed to add route in RT '%s' " "%s -> %s (%s)" % ( route_table_id , dcidr , router_ip , e . message ) )
_rt_state_update ( route_table_id , dcidr , msg = "[ERROR add route: %s]" % e . message ) |
def __update_keywords ( uid , inkeywords ) :
'''Update with keywords .''' | entry = TabPost . update ( keywords = inkeywords ) . where ( TabPost . uid == uid )
entry . execute ( ) |
def p_generate_items ( self , p ) :
'generate _ items : generate _ items generate _ item' | p [ 0 ] = p [ 1 ] + ( p [ 2 ] , )
p . set_lineno ( 0 , p . lineno ( 1 ) ) |
def tuple ( self ) :
"""Return values as a tuple .""" | return tuple ( getattr ( self , k ) for k in self . __class__ . defaults ) |
def serve_assets ( path ) :
"""Serve Nikola assets .
This is meant to be used ONLY by the internal dev server .
Please configure your web server to handle requests to this URL : :
/ assets / = > output / assets""" | res = os . path . join ( app . config [ 'NIKOLA_ROOT' ] , _site . config [ "OUTPUT_FOLDER" ] , 'assets' )
return send_from_directory ( res , path ) |
def ram_dp_rf ( clka , clkb , wea , web , addra , addrb , dia , dib , doa , dob ) :
'''RAM : Dual - Port , Read - First''' | memL = [ Signal ( intbv ( 0 ) [ len ( dia ) : ] ) for _ in range ( 2 ** len ( addra ) ) ]
@ always ( clka . posedge )
def writea ( ) :
if wea :
memL [ int ( addra ) ] . next = dia
doa . next = memL [ int ( addra ) ]
@ always ( clkb . posedge )
def writeb ( ) :
if web :
memL [ int ( addrb ) ] . next = dib
dob . next = memL [ int ( addrb ) ]
return writea , writeb |
def _compute_faulting_style_term ( self , C , rake ) :
"""Compute and return fifth and sixth terms in equations ( 2a )
and ( 2b ) , pages 20.""" | Fn = float ( rake > - 135.0 and rake < - 45.0 )
Fr = float ( rake > 45.0 and rake < 135.0 )
return C [ 'a8' ] * Fn + C [ 'a9' ] * Fr |
def keys ( cls , fqdn , sort_by = None ) :
"""Display keys information about a domain .""" | meta = cls . get_fqdn_info ( fqdn )
url = meta [ 'domain_keys_href' ]
return cls . json_get ( cls . get_sort_url ( url , sort_by ) ) |
def load ( ctx , variant_source , family_file , family_type , root ) :
"""Load a variant source into the database .
If no database was found run puzzle init first .
1 . VCF : If a vcf file is used it can be loaded with a ped file
2 . GEMINI : Ped information will be retreived from the gemini db""" | root = root or ctx . obj . get ( 'root' ) or os . path . expanduser ( "~/.puzzle" )
if os . path . isfile ( root ) :
logger . error ( "'root' can't be a file" )
ctx . abort ( )
logger . info ( "Root directory is: {}" . format ( root ) )
db_path = os . path . join ( root , 'puzzle_db.sqlite3' )
logger . info ( "db path is: {}" . format ( db_path ) )
if not os . path . exists ( db_path ) :
logger . warn ( "database not initialized, run 'puzzle init'" )
ctx . abort ( )
if not os . path . isfile ( variant_source ) :
logger . error ( "Variant source has to be a file" )
ctx . abort ( )
mode = get_file_type ( variant_source )
if mode == 'unknown' :
logger . error ( "Unknown file type" )
ctx . abort ( )
# Test if gemini is installed
elif mode == 'gemini' :
logger . debug ( "Initialzing GEMINI plugin" )
if not GEMINI :
logger . error ( "Need to have gemini installed to use gemini plugin" )
ctx . abort ( )
logger . debug ( 'Set puzzle backend to {0}' . format ( mode ) )
variant_type = get_variant_type ( variant_source )
logger . debug ( 'Set variant type to {0}' . format ( variant_type ) )
cases = get_cases ( variant_source = variant_source , case_lines = family_file , case_type = family_type , variant_type = variant_type , variant_mode = mode )
if len ( cases ) == 0 :
logger . warning ( "No cases found" )
ctx . abort ( )
logger . info ( "Initializing sqlite plugin" )
store = SqlStore ( db_path )
for case_obj in cases :
if store . case ( case_obj . case_id ) is not None :
logger . warn ( "{} already exists in the database" . format ( case_obj . case_id ) )
continue
# extract case information
logger . debug ( "adding case: {} to puzzle db" . format ( case_obj . case_id ) )
store . add_case ( case_obj , vtype = variant_type , mode = mode ) |
def build_stoplist ( self , texts , basis = 'zou' , size = 100 , sort_words = True , inc_values = False , lower = True , remove_punctuation = True , remove_numbers = True , include = [ ] , exclude = [ ] ) :
""": param texts : list of strings used as document collection for extracting stopwords
: param basis : Define the basis for extracting stopwords from the corpus . Available methods are :
- ' frequency ' , word counts
- ' mean ' , mean probabilities
- ' variance ' , variance probabilities
- ' entropy ' , entropy
- ' zou ' , composite measure as defined in the following paper
Zou , F . , Wang , F . L . , Deng , X . , Han , S . , and Wang , L . S . 2006 . “ Automatic Construction of Chinese Stop Word List . ” In Proceedings of the 5th WSEAS International Conference on Applied Computer Science , 1010–1015 . https : / / pdfs . semanticscholar . org / c543/8e216071f6180c228cc557fb1d3c77edb3a3 . pdf .
: param size : Set the size of the output list
: param sort _ words : Sort output list alphabetically ? ( Otherwise return is descending by basis value )
: param inc _ values : Include basis value ; e . g . word counts for
' frequency ' , mean probabilities for ' mean ' ; for ' zou ' , the basis
value is the word ' s rank after the Borda sort
: param lower : Lowercase corpus or no ?
: param remove _ punctuation : Remove punctuation from corpus or no ?
: param remove _ numbers : Remove numbers from corpus or no ?
: param include : List of words in addition to stopwords that are
extracted from the document collection to be added to the final
list ; the ' value ' in the returned tuple is set to None
: param exclude : List of words in addition to stopwords that are
extracted from the document collection to be removed from the final
list
: type texts : list
: type basis : str
: type size : int
: type sort _ words : bool
: type inc _ values : bool
: type lower : bool
: type remove _ punctuation : bool
: type remove _ numbers : bool
: type include : list
: type exclude : list
: return : a list of stopwords extracted from the corpus
: rtype : list""" | # Check ' texts ' type for string
if isinstance ( texts , str ) :
texts = [ texts ]
# Move all of this preprocessing code outside ' build _ stoplist '
if lower :
texts = [ text . lower ( ) for text in texts ]
if remove_punctuation :
texts = self . _remove_punctuation ( texts , self . punctuation )
if remove_numbers :
translator = str . maketrans ( { key : " " for key in '0123456789' } )
texts = [ text . translate ( translator ) for text in texts ]
# Get DTM and basic descriptive info
dtm , vocab = self . _make_dtm_vocab ( texts )
tfidf , _ = self . _make_tfidf_vocab ( texts )
M = len ( vocab )
N = len ( texts )
# Calculate probabilities
raw_lengths = self . _get_raw_lengths ( texts )
l = self . _get_length_array ( raw_lengths )
P = self . _get_probabilities ( dtm , l )
if basis == 'frequency' : # Calculate plain frequencies
freq = self . np . ravel ( dtm . sum ( axis = 0 ) )
freq_list = self . _combine_vocabulary ( vocab , freq ) [ : size ]
stops = freq_list
elif basis == 'tfidf' : # Calculate tfidf
tfidf = self . np . ravel ( tfidf . sum ( axis = 0 ) )
tfidf_list = self . _combine_vocabulary ( vocab , tfidf ) [ : size ]
stops = tfidf_list
elif basis == 'mean' : # Calculate mean probabilities
MP = self . _get_mean_probabilities ( P , N )
mp_list = self . _combine_vocabulary ( vocab , MP ) [ : size ]
stops = mp_list
elif basis == 'variance' :
bP = dtm / sum ( raw_lengths )
VP = self . _get_variance_probabilities ( bP , P , N )
vp_list = self . _combine_vocabulary ( vocab , VP ) [ : size ]
stops = vp_list
elif basis == 'entropy' :
ent = self . _get_entropies ( P )
ent_list = self . _combine_vocabulary ( vocab , ent ) [ : size ]
stops = set ( ent_list )
elif basis == 'zou' :
MP = self . _get_mean_probabilities ( P , N )
mp_list = self . _combine_vocabulary ( vocab , MP )
mp_list = [ item [ 0 ] for item in mp_list ]
bP = dtm / sum ( raw_lengths )
VP = self . _get_variance_probabilities ( bP , P , N )
vp_list = self . _combine_vocabulary ( vocab , VP )
vp_list = [ item [ 0 ] for item in vp_list ]
ent = self . _get_entropies ( P )
ent_list = self . _combine_vocabulary ( vocab , ent )
ent_list = [ item [ 0 ] for item in ent_list ]
lists = [ mp_list , vp_list , ent_list ]
stops = self . _borda_sort ( lists ) [ : size ]
stops = [ ( stop , rank ) for rank , stop in enumerate ( stops ) ]
else :
raise ValueError ( "Basis '{}' not supported." . format ( basis ) )
if exclude :
stops = [ item for item in stops if item [ 0 ] not in exclude ]
if include :
stops . extend ( ( item , None ) for item in include if item not in stops )
if sort_words :
stops = sorted ( stops )
if inc_values :
return stops
else :
return [ item [ 0 ] for item in stops ] |
def part_lister ( mpupload , part_number_marker = None ) :
"""A generator function for listing parts of a multipart upload .""" | more_results = True
part = None
while more_results :
parts = mpupload . get_all_parts ( None , part_number_marker )
for part in parts :
yield part
part_number_marker = mpupload . next_part_number_marker
more_results = mpupload . is_truncated |
def detect_volumes ( self , vstype = None , method = None , force = False ) :
"""Iterator for detecting volumes within this volume system .
: param str vstype : The volume system type to use . If None , uses : attr : ` vstype `
: param str method : The detection method to use . If None , uses : attr : ` detection `
: param bool force : Specify if you wnat to force running the detection if has _ Detected is True .""" | if self . has_detected and not force :
logger . warning ( "Detection already ran." )
return
if vstype is None :
vstype = self . vstype
if method is None :
method = self . volume_detector
if method == 'auto' :
method = VolumeSystem . _determine_auto_detection_method ( )
if method in ALL_VOLUME_SYSTEM_DETECTORS :
for v in ALL_VOLUME_SYSTEM_DETECTORS [ method ] . detect ( self , vstype ) :
yield v
else :
logger . error ( "No viable detection method found" )
raise ArgumentError ( "No viable detection method found" )
self . has_detected = True |
def state_size ( self ) -> Sequence [ Shape ] :
'''Returns the MDP state size .''' | return self . _sizes ( self . _compiler . rddl . state_size ) |
def show_help ( command_name : str = None , raw_args : str = '' ) -> Response :
"""Prints the basic command help to the console""" | response = Response ( )
cmds = fetch ( )
if command_name and command_name in cmds :
parser , result = parse . get_parser ( cmds [ command_name ] , parse . explode_line ( raw_args ) , dict ( ) )
if parser is not None :
out = parser . format_help ( )
return response . notify ( kind = 'INFO' , code = 'COMMAND_DESCRIPTION' ) . kernel ( commands = out ) . console ( out , whitespace = 1 ) . response
environ . log_header ( 'Available Commands' )
response . consume ( print_module_help ( ) )
return response . fail ( code = 'NO_SUCH_COMMAND' , message = 'Failed to show command help for "{}"' . format ( command_name ) ) . console ( """
For more information on the various commands, enter help on the
specific command:
help [COMMAND]
""" , whitespace_bottom = 1 ) . response |
def wrap_error ( self , data , renderer_context , keys_are_fields , issue_is_title ) :
"""Convert error native data to the JSON API Error format
JSON API has a different format for errors , but Django REST Framework
doesn ' t have a separate rendering path for errors . This results in
some guesswork to determine if data is an error , what kind , and how
to handle it .
As of August 2014 , there is not a consensus about the error format in
JSON API . The format documentation defines an " errors " collection , and
some possible fields for that collection , but without examples for
common cases . If and when consensus is reached , this format will
probably change .""" | response = renderer_context . get ( "response" , None )
status_code = str ( response and response . status_code )
errors = [ ]
for field , issues in data . items ( ) :
if isinstance ( issues , six . string_types ) :
issues = [ issues ]
for issue in issues :
error = self . dict_class ( )
error [ "status" ] = status_code
if issue_is_title :
error [ "title" ] = issue
else :
error [ "detail" ] = issue
if keys_are_fields :
if field in ( 'non_field_errors' , NON_FIELD_ERRORS ) :
error [ "path" ] = '/-'
else :
error [ "path" ] = '/' + field
errors . append ( error )
wrapper = self . dict_class ( )
wrapper [ "errors" ] = errors
return wrapper |
def _add_spacer_to_menu ( self ) :
"""Create a spacer to the menu to separate action groups .""" | separator = QAction ( self . iface . mainWindow ( ) )
separator . setSeparator ( True )
self . iface . addPluginToMenu ( self . tr ( 'InaSAFE' ) , separator ) |
def get_weather_name ( self , ip ) :
'''Get weather _ name''' | rec = self . get_all ( ip )
return rec and rec . weather_name |
def apply_filters ( instance , html , field_name ) :
"""Run all filters for a given HTML snippet .
Returns the results of the pre - filter and post - filter as tuple .
This function can be called from the : meth : ` ~ django . db . models . Model . full _ clean ` method in the model .
That function is called when the form values are assigned .
For example :
. . code - block : : python
def full _ clean ( self , * args , * * kwargs ) :
super ( TextItem , self ) . full _ clean ( * args , * * kwargs )
self . html , self . html _ final = apply _ filters ( self , self . html , field _ name = ' html ' )
: type instance : fluent _ contents . models . ContentItem
: raise ValidationError : when one of the filters detects a problem .""" | try :
html = apply_pre_filters ( instance , html )
# Perform post processing . This does not effect the original ' html '
html_final = apply_post_filters ( instance , html )
except ValidationError as e :
if hasattr ( e , 'error_list' ) : # The filters can raise a " dump " ValidationError with a single error .
# However , during post _ clean it ' s expected that the fields are named .
raise ValidationError ( { field_name : e . error_list } )
raise
return html , html_final |
def initialize_request ( self , request , * args , ** kwargs ) :
"""Returns the initial request object .""" | parser_context = self . get_parser_context ( request )
return Request ( request , parsers = self . get_parsers ( ) , authenticators = self . get_authenticators ( ) , negotiator = self . get_content_negotiator ( ) , parser_context = parser_context ) |
def attributes ( self , full = 0 ) :
"""Return a dictionnary describing every global
attribute attached to the SD interface .
Args : :
full true to get complete info about each attribute
false to report only each attribute value
Returns : :
Empty dictionnary if no global attribute defined
Otherwise , dictionnary where each key is the name of a
global attribute . If parameter ' full ' is false ,
key value is the attribute value . If ' full ' is true ,
key value is a tuple with the following elements :
- attribute value
- attribute index number
- attribute type
- attribute length
C library equivalent : no equivalent""" | # Get the number of global attributes .
nsds , natts = self . info ( )
# Inquire each attribute
res = { }
for n in range ( natts ) :
a = self . attr ( n )
name , aType , nVal = a . info ( )
if full :
res [ name ] = ( a . get ( ) , a . index ( ) , aType , nVal )
else :
res [ name ] = a . get ( )
return res |
def i_ll ( self ) :
"""Second moment of inertia around the length axis .
: return :""" | d_values = [ ]
for i in range ( self . n_pads_w ) :
d_values . append ( self . pad_position_w ( i ) )
d_values = np . array ( d_values ) - self . width / 2
area_d_sqrd = sum ( self . pad_area * d_values ** 2 ) * self . n_pads_l
i_second = self . pad_i_ll * self . n_pads
return area_d_sqrd + i_second |
def rmdir ( self , directory , missing_okay = False ) :
"""Forcefully remove the specified directory and all its children .""" | # Build a script to walk an entire directory structure and delete every
# file and subfolder . This is tricky because MicroPython has no os . walk
# or similar function to walk folders , so this code does it manually
# with recursion and changing directories . For each directory it lists
# the files and deletes everything it can , i . e . all the files . Then
# it lists the files again and assumes they are directories ( since they
# couldn ' t be deleted in the first pass ) and recursively clears those
# subdirectories . Finally when finished clearing all the children the
# parent directory is deleted .
command = """
try:
import os
except ImportError:
import uos as os
def rmdir(directory):
os.chdir(directory)
for f in os.listdir():
try:
os.remove(f)
except OSError:
pass
for f in os.listdir():
rmdir(f)
os.chdir('..')
os.rmdir(directory)
rmdir('{0}')
""" . format ( directory )
self . _pyboard . enter_raw_repl ( )
try :
out = self . _pyboard . exec_ ( textwrap . dedent ( command ) )
except PyboardError as ex :
message = ex . args [ 2 ] . decode ( "utf-8" )
# Check if this is an OSError # 2 , i . e . directory doesn ' t exist
# and rethrow it as something more descriptive .
if message . find ( "OSError: [Errno 2] ENOENT" ) != - 1 :
if not missing_okay :
raise RuntimeError ( "No such directory: {0}" . format ( directory ) )
else :
raise ex
self . _pyboard . exit_raw_repl ( ) |
def reset_offsets_if_needed ( self , partitions ) :
"""Lookup and set offsets for any partitions which are awaiting an
explicit reset .
Arguments :
partitions ( set of TopicPartitions ) : the partitions to reset""" | for tp in partitions : # TODO : If there are several offsets to reset , we could submit offset requests in parallel
if self . _subscriptions . is_assigned ( tp ) and self . _subscriptions . is_offset_reset_needed ( tp ) :
self . _reset_offset ( tp ) |
def line_width ( default_width = DEFAULT_LINE_WIDTH , max_width = MAX_LINE_WIDTH ) :
"""Return the ideal column width for the output from : func : ` see . see ` , taking
the terminal width into account to avoid wrapping .""" | width = term_width ( )
if width : # pragma : no cover ( no terminal info in Travis CI )
return min ( width , max_width )
else :
return default_width |
def parse_docstring ( docstring ) :
'''Given a docstring , parse it into a description and epilog part''' | if docstring is None :
return '' , ''
parts = _DOCSTRING_SPLIT . split ( docstring )
if len ( parts ) == 1 :
return docstring , ''
elif len ( parts ) == 2 :
return parts [ 0 ] , parts [ 1 ]
else :
raise TooManySplitsError ( ) |
def __similarity ( s1 , s2 , ngrams_fn , n = 3 ) :
"""The fraction of n - grams matching between two sequences
Args :
s1 : a string
s2 : another string
n : an int for the n in n - gram
Returns :
float : the fraction of n - grams matching""" | ngrams1 , ngrams2 = set ( ngrams_fn ( s1 , n = n ) ) , set ( ngrams_fn ( s2 , n = n ) )
matches = ngrams1 . intersection ( ngrams2 )
return 2 * len ( matches ) / ( len ( ngrams1 ) + len ( ngrams2 ) ) |
def addend_ids ( self ) :
"""tuple of int ids of elements contributing to this subtotal .
Any element id not present in the dimension or present but
representing missing data is excluded .""" | return tuple ( arg for arg in self . _subtotal_dict . get ( "args" , [ ] ) if arg in self . valid_elements . element_ids ) |
def jboss_domain_server_log_dir ( broker ) :
"""Command : JBoss domain server log directory""" | ps = broker [ DefaultSpecs . ps_auxww ] . content
results = [ ]
findall = re . compile ( r"\-Djboss\.server\.log\.dir=(\S+)" ) . findall
# JBoss domain server progress command content should contain jboss . server . log . dir
for p in ps :
if '-D[Server:' in p :
found = findall ( p )
if found : # Only get the path which is absolute
results . extend ( f for f in found if f [ 0 ] == '/' )
return list ( set ( results ) ) |
def next ( self ) :
"""Return one of record in this batch in out - of - order .
: raises : ` StopIteration ` when no more record is in this batch""" | if self . _records_iter >= len ( self . _records ) :
raise StopIteration
self . _records_iter += 1
return self . _records [ self . _records_iter - 1 ] |
def cluster_2_json ( self ) :
"""transform this local object ot Ariane server JSON object
: return : the JSON object""" | LOGGER . debug ( "Cluster.cluster_2_json" )
json_obj = { 'clusterID' : self . id , 'clusterName' : self . name , 'clusterContainersID' : self . containers_id }
return json_obj |
def _equalizeHistogram ( img ) :
'''histogram equalisation not bounded to int ( ) or an image depth of 8 bit
works also with negative numbers''' | # to float if int :
intType = None
if 'f' not in img . dtype . str :
TO_FLOAT_TYPES = { np . dtype ( 'uint8' ) : np . float16 , np . dtype ( 'uint16' ) : np . float32 , np . dtype ( 'uint32' ) : np . float64 , np . dtype ( 'uint64' ) : np . float64 }
intType = img . dtype
img = img . astype ( TO_FLOAT_TYPES [ intType ] , copy = False )
# get image deph
DEPTH_TO_NBINS = { np . dtype ( 'float16' ) : 256 , # uint8
np . dtype ( 'float32' ) : 32768 , # uint16
np . dtype ( 'float64' ) : 2147483648 }
# uint32
nBins = DEPTH_TO_NBINS [ img . dtype ]
# scale to - 1 to 1 due to skikit - image restrictions
mn , mx = np . amin ( img ) , np . amax ( img )
if abs ( mn ) > abs ( mx ) :
mx = mn
img /= mx
img = exposure . equalize_hist ( img , nbins = nBins )
img *= mx
if intType :
img = img . astype ( intType )
return img |
def check_valid_rx_can_msg ( result ) :
"""Checks if function : meth : ` UcanServer . read _ can _ msg ` returns a valid CAN message .
: param ReturnCode result : Error code of the function .
: return : True if a valid CAN messages was received , otherwise False .
: rtype : bool""" | return ( result . value == ReturnCode . SUCCESSFUL ) or ( result . value > ReturnCode . WARNING ) |
def top_priority_effect_per_transcript_id ( self ) :
"""Highest priority effect for each unique transcript ID""" | return OrderedDict ( ( transcript_id , top_priority_effect ( variant_effects ) ) for ( transcript_id , variant_effects ) in self . groupby_transcript_id ( ) . items ( ) ) |
def get_runs ( ) :
"""Send a dictionary of runs associated with the selected project .
Usage description :
This function is usually called to get and display the list of runs associated with a selected project available
in the database .
: return : JSON , { < int _ keys > : < run _ name > }""" | assert request . method == "POST" , "POST request expected received {}" . format ( request . method )
if request . method == "POST" :
try :
selected_project = request . form [ "selected_project" ]
runs = utils . get_runs ( selected_project )
return jsonify ( runs )
except Exception as e :
logging . error ( e )
return jsonify ( { "0" : "__EMPTY" } ) |
def load ( self , format = None , * , kwargs = { } ) :
'''deserialize object from the file .
auto detect format by file extension name if ` format ` is None .
for example , ` . json ` will detect as ` json ` .
* raise ` FormatNotFoundError ` on unknown format .
* raise ` SerializeError ` on any serialize exceptions .''' | return load ( self , format = format , kwargs = kwargs ) |
def wait_for_conns ( self , timeout = 60 , start_delay = 0 , interval = 5 , ** kwargs ) :
'''delays unitil all connections are working
args :
timeout : number of seconds to try to connecting . Error out when
timeout is reached
start _ delay : number of seconds to wait before checking status
interval : number of seconds to wait between checks''' | log . setLevel ( kwargs . get ( 'log_level' , self . log_level ) )
timestamp = time . time ( )
last_check = time . time ( ) + start_delay - interval
last_delay_notification = time . time ( ) - interval
timeout += 1
failing = True
up_conns = { }
# loop until the server is up or the timeout is reached
while ( ( time . time ( ) - timestamp ) < timeout ) and failing : # if delaying , the start of the check , print waiting to start
if start_delay > 0 and time . time ( ) - timestamp < start_delay and ( time . time ( ) - last_delay_notification ) > 5 :
print ( "Delaying server status check until %ss. Current time: %ss" % ( start_delay , int ( time . time ( ) - timestamp ) ) )
last_delay_notification = time . time ( )
# check status at the specified ' interval ' until the server is up
first_check = True
while ( ( time . time ( ) - last_check ) > interval ) and failing :
msg = [ "\tChecked status of servers at %ss" % int ( ( time . time ( ) - timestamp ) ) , "\t** CONNECTION STATUS:" ]
last_check = time . time ( )
failing = self . failing
new_up = ( self . active . keys ( ) - failing . keys ( ) ) - up_conns . keys ( )
msg += [ "\t\t UP - %s: %s" % ( key , self . conns [ key ] ) for key in new_up ]
up_conns . update ( { key : self . conns [ key ] for key in new_up } )
msg . append ( "\t*** '%s' connection(s) up" % len ( up_conns ) )
msg += [ "\t\t FAILING - %s: %s" % ( key , self . conns [ key ] ) for key in failing ]
log . info ( "** CONNECTION STATUS:\n%s" , "\n" . join ( msg ) )
if not failing :
log . info ( "**** Servers up at %ss" % int ( ( time . time ( ) - timestamp ) ) )
break
if failing :
raise RuntimeError ( "Unable to establish connection(s): " , failing )
for conn in up_conns . values ( ) :
conn . delay_check_pass ( )
return not failing |
def key_handle_to_int ( this ) :
"""Turn " 123 " into 123 and " KSM1 " into 827151179
(0x314d534b , ' K ' = 0x4b , S = ' 0x53 ' , M = 0x4d ) .
YHSM is little endian , so this makes the bytes KSM1 appear
in the most human readable form in packet traces .""" | try :
num = int ( this )
return num
except ValueError :
if this [ : 2 ] == "0x" :
return int ( this , 16 )
if ( len ( this ) == 4 ) :
num = struct . unpack ( '<I' , this ) [ 0 ]
return num
raise pyhsm . exception . YHSM_Error ( "Could not parse key_handle '%s'" % ( this ) ) |
def _process_pub_dbxref ( self , limit ) :
"""Xrefs for publications ( ie FBrf = PMID )
: param limit :
: return :""" | if self . test_mode :
graph = self . testgraph
else :
graph = self . graph
model = Model ( graph )
raw = '/' . join ( ( self . rawdir , 'pub_dbxref' ) )
LOG . info ( "processing pub_dbxref" )
line_counter = 0
with open ( raw , 'r' ) as f :
filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' )
f . readline ( )
# read the header row ; skip
for line in filereader :
( pub_dbxref_id , pub_id , dbxref_id , is_current ) = line
# 4964843222395730t
pub_key = pub_id
pub_id = self . idhash [ 'publication' ] [ pub_key ]
if self . test_mode and int ( pub_key ) not in self . test_keys [ 'pub' ] :
continue
# get any dbxrefs for pubs , including pmids and dois
dbxref_key = dbxref_id
if str ( dbxref_key ) in self . dbxrefs :
dbxrefs = self . dbxrefs [ str ( dbxref_key ) ]
# pub _ dbs = [ 75 , 51 , 76 , 95 , 126]
pmid_ids = [ 50 , 77 , 275 , 286 , 347 ]
# flybase _ ids = [ 4 ] # TODO unused
isbn = [ 75 , 51 ]
for d in dbxrefs :
dbxref_id = None
if int ( d ) in pmid_ids :
if re . match ( r'^PMID' , dbxrefs [ d ] ) :
dbxref_id = dbxrefs [ d ] . strip ( )
else :
dbxref_id = 'PMID:' + dbxrefs [ d ] . strip ( )
model . makeLeader ( dbxref_id )
elif int ( d ) in isbn :
dbxref_id = 'ISBN:' + dbxrefs [ d ] . strip ( )
elif int ( d ) == 161 :
dbxref_id = 'DOI:' + dbxrefs [ d ] . strip ( )
# elif int ( d ) = = 4:
# dbxref _ id = ' FlyBase : ' + dbxrefs [ d ] . strip ( )
if dbxref_id is not None :
reference = Reference ( graph , dbxref_id , self . globaltt [ 'publication' ] )
reference . addRefToGraph ( )
model . addSameIndividual ( pub_id , dbxref_id )
line_counter += 1
if not self . test_mode and limit is not None and line_counter > limit :
break
return |
def selectOptimalChunk ( self , peer ) :
"""select an optimal chunk to send to a peer .
@ return : int ( chunkNumber ) , str ( chunkData ) if there is data to be sent ,
otherwise None , None""" | # stuff I have
have = sets . Set ( self . mask . positions ( 1 ) )
# stuff that this peer wants
want = sets . Set ( self . peers [ peer ] . mask . positions ( 0 ) )
exchangeable = have . intersection ( want )
finalSet = dict . fromkeys ( exchangeable , 0 )
# taking a page from bittorrent , rarest - first
for chunkNumber in exchangeable :
for otherPeer in self . peers . itervalues ( ) :
finalSet [ chunkNumber ] += not otherPeer . mask [ chunkNumber ]
rarityList = [ ( rarity , random . random ( ) , chunkNumber ) for ( chunkNumber , rarity ) in finalSet . iteritems ( ) ]
if not rarityList :
return None , None
rarityList . sort ( )
chunkNumber = rarityList [ - 1 ] [ - 1 ]
# sorted in ascending order of rarity
# sanity check
assert self . mask [ chunkNumber ] , "I wanted to send a chunk I didn't have"
self . file . seek ( chunkNumber * CHUNK_SIZE )
chunkData = self . file . read ( CHUNK_SIZE )
self . sha1sums [ chunkNumber ] = sha . new ( chunkData ) . digest ( )
return chunkNumber , chunkData |
def from_lasio ( cls , l , remap = None , funcs = None ) :
"""Make a Location object from a lasio object . Assumes we ' re starting
with a lasio object , l .
Args :
l ( lasio ) .
remap ( dict ) : Optional . A dict of ' old ' : ' new ' LAS field names .
funcs ( dict ) : Optional . A dict of ' las field ' : function ( ) for
implementing a transform before loading . Can be a lambda .
Returns :
Location . An instance of this class .""" | params = { }
funcs = funcs or { }
funcs [ 'location' ] = str
for field , ( sect , code ) in las_fields [ 'location' ] . items ( ) :
params [ field ] = utils . lasio_get ( l , sect , code , remap = remap , funcs = funcs )
return cls ( params ) |
def ifelse ( arg , true_expr , false_expr ) :
"""Shorthand for implementing ternary expressions
bool _ expr . ifelse ( 0 , 1)
e . g . , in SQL : CASE WHEN bool _ expr THEN 0 else 1 END""" | # Result will be the result of promotion of true / false exprs . These
# might be conflicting types ; same type resolution as case expressions
# must be used .
case = ops . SearchedCaseBuilder ( )
return case . when ( arg , true_expr ) . else_ ( false_expr ) . end ( ) |
def get_last_weeks ( number_of_weeks ) :
"""Get the last weeks .""" | time_now = datetime . now ( )
year = time_now . isocalendar ( ) [ 0 ]
week = time_now . isocalendar ( ) [ 1 ]
weeks = [ ]
for i in range ( 0 , number_of_weeks ) :
start = get_week_dates ( year , week - i , as_timestamp = True ) [ 0 ]
n_year , n_week = get_year_week ( start )
weeks . append ( ( n_year , n_week ) )
return weeks |
def get_channel_access_token ( self , channel ) :
"""Return the token and sig for the given channel
: param channel : the channel or channel name to get the access token for
: type channel : : class : ` channel ` | : class : ` str `
: returns : The token and sig for the given channel
: rtype : ( : class : ` unicode ` , : class : ` unicode ` )
: raises : None""" | if isinstance ( channel , models . Channel ) :
channel = channel . name
r = self . oldapi_request ( 'GET' , 'channels/%s/access_token' % channel ) . json ( )
return r [ 'token' ] , r [ 'sig' ] |
def fmap_info ( metadata , img , config , layout ) :
"""Generate a paragraph describing field map acquisition information .
Parameters
metadata : : obj : ` dict `
Data from the json file associated with the field map , in dictionary
form .
img : : obj : ` nibabel . Nifti1Image `
The nifti image of the field map .
config : : obj : ` dict `
A dictionary with relevant information regarding sequences , sequence
variants , phase encoding directions , and task names .
Returns
desc : : obj : ` str `
A description of the field map ' s acquisition information .""" | dir_ = config [ 'dir' ] [ metadata [ 'PhaseEncodingDirection' ] ]
n_slices , vs_str , ms_str , fov_str = get_sizestr ( img )
seqs , variants = get_seqstr ( config , metadata )
if 'EchoTime' in metadata . keys ( ) :
te = num_to_str ( metadata [ 'EchoTime' ] * 1000 )
else :
te = 'UNKNOWN'
if 'IntendedFor' in metadata . keys ( ) :
scans = metadata [ 'IntendedFor' ]
run_dict = { }
for scan in scans :
fn = basename ( scan )
iff_file = [ f for f in layout . get ( extensions = 'nii.gz' ) if fn in f . path ] [ 0 ]
run_num = int ( iff_file . run )
ty = iff_file . entities [ 'suffix' ] . upper ( )
if ty == 'BOLD' :
iff_meta = layout . get_metadata ( iff_file . path )
task = iff_meta . get ( 'TaskName' , iff_file . entities [ 'task' ] )
ty_str = '{0} {1} scan' . format ( task , ty )
else :
ty_str = '{0} scan' . format ( ty )
if ty_str not in run_dict . keys ( ) :
run_dict [ ty_str ] = [ ]
run_dict [ ty_str ] . append ( run_num )
for scan in run_dict . keys ( ) :
run_dict [ scan ] = [ num2words ( r , ordinal = True ) for r in sorted ( run_dict [ scan ] ) ]
out_list = [ ]
for scan in run_dict . keys ( ) :
if len ( run_dict [ scan ] ) > 1 :
s = 's'
else :
s = ''
run_str = list_to_str ( run_dict [ scan ] )
string = '{rs} run{s} of the {sc}' . format ( rs = run_str , s = s , sc = scan )
out_list . append ( string )
for_str = ' for the {0}' . format ( list_to_str ( out_list ) )
else :
for_str = ''
desc = '''
A {variants} {seqs} field map (phase encoding:
{dir_}; {n_slices} slices; repetition time, TR={tr}ms;
echo time, TE={te}ms; flip angle, FA={fa}<deg>;
field of view, FOV={fov}mm; matrix size={ms};
voxel size={vs}mm) was acquired{for_str}.
''' . format ( variants = variants , seqs = seqs , dir_ = dir_ , for_str = for_str , n_slices = n_slices , tr = num_to_str ( metadata [ 'RepetitionTime' ] * 1000 ) , te = te , fa = metadata . get ( 'FlipAngle' , 'UNKNOWN' ) , vs = vs_str , fov = fov_str , ms = ms_str )
desc = desc . replace ( '\n' , ' ' ) . lstrip ( )
while ' ' in desc :
desc = desc . replace ( ' ' , ' ' )
return desc |
def ExtractEvents ( self , parser_mediator , registry_key , ** kwargs ) :
"""Extracts events from a Windows Registry key .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
registry _ key ( dfwinreg . WinRegistryKey ) : Windows Registry key .
Raises :
ParseError : if the value data could not be parsed .""" | value = registry_key . GetValueByName ( 'AppCompatCache' )
if not value :
return
value_data = value . data
value_data_size = len ( value . data )
format_type = self . _CheckSignature ( value_data )
if not format_type :
parser_mediator . ProduceExtractionWarning ( 'Unsupported signature in AppCompatCache key: {0:s}' . format ( registry_key . path ) )
return
header_object = self . _ParseHeader ( format_type , value_data )
# On Windows Vista and 2008 when the cache is empty it will
# only consist of the header .
if value_data_size <= header_object . header_size :
return
cached_entry_offset = header_object . header_size
self . _cached_entry_data_type_map = self . _GetCachedEntryDataTypeMap ( format_type , value_data , cached_entry_offset )
if not self . _cached_entry_data_type_map :
raise errors . ParseError ( 'Unable to determine cached entry data type.' )
parse_cached_entry_function = None
if format_type == self . _FORMAT_TYPE_XP :
parse_cached_entry_function = self . _ParseCachedEntryXP
elif format_type == self . _FORMAT_TYPE_2003 :
parse_cached_entry_function = self . _ParseCachedEntry2003
elif format_type == self . _FORMAT_TYPE_VISTA :
parse_cached_entry_function = self . _ParseCachedEntryVista
elif format_type == self . _FORMAT_TYPE_7 :
parse_cached_entry_function = self . _ParseCachedEntry7
elif format_type == self . _FORMAT_TYPE_8 :
parse_cached_entry_function = self . _ParseCachedEntry8
elif format_type == self . _FORMAT_TYPE_10 :
parse_cached_entry_function = self . _ParseCachedEntry10
cached_entry_index = 0
while cached_entry_offset < value_data_size :
cached_entry_object = parse_cached_entry_function ( value_data , cached_entry_offset )
event_data = AppCompatCacheEventData ( )
event_data . entry_index = cached_entry_index + 1
event_data . key_path = registry_key . path
event_data . offset = cached_entry_offset
event_data . path = cached_entry_object . path
if cached_entry_object . last_modification_time is not None :
if not cached_entry_object . last_modification_time :
date_time = dfdatetime_semantic_time . SemanticTime ( 'Not set' )
else :
date_time = dfdatetime_filetime . Filetime ( timestamp = cached_entry_object . last_modification_time )
# TODO : refactor to file modification event .
event = time_events . DateTimeValuesEvent ( date_time , 'File Last Modification Time' )
parser_mediator . ProduceEventWithEventData ( event , event_data )
if cached_entry_object . last_update_time is not None :
if not cached_entry_object . last_update_time :
date_time = dfdatetime_semantic_time . SemanticTime ( 'Not set' )
else :
date_time = dfdatetime_filetime . Filetime ( timestamp = cached_entry_object . last_update_time )
# TODO : refactor to process run event .
event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_LAST_RUN )
parser_mediator . ProduceEventWithEventData ( event , event_data )
cached_entry_offset += cached_entry_object . cached_entry_size
cached_entry_index += 1
if ( header_object . number_of_cached_entries != 0 and cached_entry_index >= header_object . number_of_cached_entries ) :
break |
def focusInEvent ( self , e ) :
"""Qt Override .""" | super ( ShortcutsTable , self ) . focusInEvent ( e )
self . selectRow ( self . currentIndex ( ) . row ( ) ) |
def return_hdr ( self ) :
"""Return the header for further use .
Returns
subj _ id : str
subject identification code
start _ time : datetime
start time of the dataset
s _ freq : float
sampling frequency
chan _ name : list of str
list of all the channels
n _ samples : int
number of samples in the dataset
orig : dict
currently empty for open - ephys""" | subj_id = self . filename . stem
# use directory name as subject name
start_time = _read_date ( self . settings_xml )
s_freq , channels = _read_openephys ( self . openephys_file )
# only use channels that are actually in the folder
chan_name = [ ]
self . channels = [ ]
gain = [ ]
for chan in channels :
channel_filename = ( self . filename / chan [ 'filename' ] )
if channel_filename . exists ( ) :
chan_name . append ( chan [ 'name' ] )
self . channels . append ( channel_filename )
gain . append ( _check_header ( channel_filename , s_freq ) )
else :
lg . warning ( f'could not find {chan["filename"]} in {self.filename}' )
self . gain = array ( gain )
n_blocks , n_samples = _read_n_samples ( self . channels [ 0 ] )
self . blocks = ones ( n_blocks , dtype = 'int' ) * BLK_LENGTH
orig = { }
return subj_id , start_time , s_freq , chan_name , n_samples , orig |
def search_order ( self , limit = 100 , offset = 0 , common_name_pattern = None , status = None , contact_handle = None ) :
"""Search all SSL certificate orders .""" | response = self . request ( E . searchOrderSslCertRequest ( E . limit ( limit ) , E . offset ( offset ) , OE ( 'commonNamePattern' , common_name_pattern ) , OE ( 'status' , status , transform = _simple_array ) , OE ( 'contactHandle' , contact_handle ) , ) )
return response . as_models ( SSLOrder ) |
def validate_timeout_or_zero ( option , value ) :
"""Validates a timeout specified in milliseconds returning
a value in floating point seconds for the case where None is an error
and 0 is valid . Setting the timeout to nothing in the URI string is a
config error .""" | if value is None :
raise ConfigurationError ( "%s cannot be None" % ( option , ) )
if value == 0 or value == "0" :
return 0
return validate_positive_float ( option , value ) / 1000.0 |
def get_supported_methods ( self , url ) :
"""Get a list of supported methods for a url and optional host .
: param url : URL string ( including host )
: return : frozenset of supported methods""" | route = self . routes_all . get ( url )
# if methods are None then this logic will prevent an error
return getattr ( route , "methods" , None ) or frozenset ( ) |
async def set_led_mode ( self , led_id , mode , timeout = OTGW_DEFAULT_TIMEOUT ) :
"""Configure the functions of the six LEDs ( A - F ) that can
optionally be connected to pins RB3 / RB4 / RB6 / RB7 and the GPIO
pins of the PIC . The following functions are currently
available :
R Receiving an Opentherm message from the thermostat or boiler
X Transmitting an Opentherm message to the thermostat or boiler
T Transmitting or receiving a message on the master interface
B Transmitting or receiving a message on the slave interface
O Remote setpoint override is active
F Flame is on
H Central heating is on
W Hot water is on
C Comfort mode ( Domestic Hot Water Enable ) is on
E Transmission error has been detected
M Boiler requires maintenance
P Raised power mode active on thermostat interface .
Return the new mode for the specified led , or None on failure .
This method is a coroutine""" | if led_id in "ABCDEF" and mode in "RXTBOFHWCEMP" :
cmd = globals ( ) . get ( "OTGW_CMD_LED_{}" . format ( led_id ) )
status = { }
ret = await self . _wait_for_cmd ( cmd , mode , timeout )
if ret is None :
return
var = globals ( ) . get ( "OTGW_LED_{}" . format ( led_id ) )
status [ var ] = ret
self . _update_status ( status )
return ret |
def mimetype ( self ) :
"""The mimetype ( content type without charset etc . )""" | ct = self . headers . get ( "content-type" )
if ct :
return ct . split ( ";" ) [ 0 ] . strip ( ) |
def _disconnect ( self ) :
"""Disconnect from the transport .""" | if not self . protocol or not self . protocol . transport :
self . protocol = None
# Make sure protocol is None
return
_LOGGER . info ( 'Disconnecting from gateway' )
self . protocol . transport . close ( )
self . protocol = None |
def _deserialize ( self , value , attr , data ) :
"""Deserialize string value .""" | value = super ( TrimmedString , self ) . _deserialize ( value , attr , data )
return value . strip ( ) |
def _to_dict ( self ) :
"""Return a json dictionary representing this model .""" | _dict = { }
if hasattr ( self , 'type' ) and self . type is not None :
_dict [ 'type' ] = self . type
if hasattr ( self , 'data' ) and self . data is not None :
_dict [ 'data' ] = self . data . _to_dict ( )
return _dict |
def _get_consent_id ( self , requester , user_id , filtered_attr ) :
"""Get a hashed id based on requester , user id and filtered attributes
: type requester : str
: type user _ id : str
: type filtered _ attr : dict [ str , str ]
: param requester : The calling requester
: param user _ id : The authorized user id
: param filtered _ attr : a list containing all attributes to be sent
: return : an id""" | filtered_attr_key_list = sorted ( filtered_attr . keys ( ) )
hash_str = ""
for key in filtered_attr_key_list :
_hash_value = "" . join ( sorted ( filtered_attr [ key ] ) )
hash_str += key + _hash_value
id_string = "%s%s%s" % ( requester , user_id , hash_str )
return urlsafe_b64encode ( hashlib . sha512 ( id_string . encode ( "utf-8" ) ) . hexdigest ( ) . encode ( "utf-8" ) ) . decode ( "utf-8" ) |
def create ( description = '<Created by Python>' , connection = None ) :
"""Creates a new changelist
: param connection : Connection to use to create the changelist
: type connection : : class : ` . Connection `
: param description : Description for new changelist
: type description : str
: returns : : class : ` . Changelist `""" | connection = connection or Connection ( )
description = description . replace ( '\n' , '\n\t' )
form = NEW_FORMAT . format ( client = str ( connection . client ) , description = description )
result = connection . run ( [ 'change' , '-i' ] , stdin = form , marshal_output = False )
return Changelist ( int ( result . split ( ) [ 1 ] ) , connection ) |
def prior_transform ( self , unit_coords , priors , prior_args = [ ] ) :
"""An example of one way to use the ` Prior ` objects below to go from unit
cube to parameter space , for nested sampling . This takes and returns a
list instead of an array , to accomodate possible vector parameters . Thus
one will need something like ` ` theta _ array = np . concatenate ( * theta ) ` `
: param unit _ coords :
Coordinates on the unit prior hyper - cube . Iterable .
: param priors :
A list of ` Prior ` objects , iterable of same length as ` unit _ coords ` .
: param prior _ args : ( optional )
A list of dictionaries of prior function keyword arguments .
: returns theta :
A list of parameter values corresponding to the given coordinates on
the prior unit hypercube .""" | theta = [ ]
for i , ( u , p ) in enumerate ( zip ( unit_coords , priors ) ) :
func = p . unit_transform
try :
kwargs = prior_args [ i ]
except ( IndexError ) :
kwargs = { }
theta . append ( func ( u , ** kwargs ) )
return theta |
def build_groups ( self , tokens ) :
"""Build dict of groups from list of tokens""" | groups = { }
for token in tokens :
match_type = MatchType . start if token . group_end else MatchType . single
groups [ token . group_start ] = ( token , match_type )
if token . group_end :
groups [ token . group_end ] = ( token , MatchType . end )
return groups |
def create_volume ( self , volume_name : str , driver_spec : str = None ) :
"""Create new docker volumes .
Only the manager nodes can create a volume
Args :
volume _ name ( string ) : Name for the new docker volume
driver _ spec ( string ) : Driver for the docker volume""" | # Default values
if driver_spec :
driver = driver_spec
else :
driver = 'local'
# Raise an exception if we are not a manager
if not self . _manager :
raise RuntimeError ( 'Services can only be deleted ' 'on swarm manager nodes' )
self . _client . volumes . create ( name = volume_name , driver = driver ) |
def clear ( self ) :
"""Erase the contents of the object""" | self . country_code = None
self . national_number = None
self . extension = None
self . italian_leading_zero = None
self . number_of_leading_zeros = None
self . raw_input = None
self . country_code_source = CountryCodeSource . UNSPECIFIED
self . preferred_domestic_carrier_code = None |
def pretty_objname ( self , obj = None , maxlen = 50 , color = "boldcyan" ) :
"""Pretty prints object name
@ obj : the object whose name you want to pretty print
@ maxlen : # int maximum length of an object name to print
@ color : your choice of : mod : colors or | None |
- > # str pretty object name
from vital . debug import Look
print ( Look . pretty _ objname ( dict ) )
# - > ' dict \x1b [1;36m < builtins > \x1b [1 ; m '""" | parent_name = lambda_sub ( "" , get_parent_name ( obj ) or "" )
objname = get_obj_name ( obj )
if color :
objname += colorize ( "<{}>" . format ( parent_name ) , color , close = False )
else :
objname += "<{}>" . format ( parent_name )
objname = objname if len ( objname ) < maxlen else objname [ : ( maxlen - 1 ) ] + "…>"
if color :
objname += colors . RESET
return objname |
def encode ( self ) :
'''Compress the associated encodable payload ,
prepend the header then encode with base64 if requested
Returns :
the b64 encoded wire encoding of the histogram ( as a string )
or the compressed payload ( as a string , if b64 wrappinb is disabled )''' | # only compress the first non zero buckets
# if histogram is empty we do not encode any counter
if self . histogram . total_count :
relevant_length = self . histogram . get_counts_array_index ( self . histogram . max_value ) + 1
else :
relevant_length = 0
cpayload = self . payload . compress ( relevant_length )
if self . b64_wrap :
self . header . length = len ( cpayload )
header_str = ctypes . string_at ( addressof ( self . header ) , ext_header_size )
return base64 . b64encode ( header_str + cpayload )
return cpayload |
def install_program ( self ) :
"""install supervisor program config file""" | text = templ_program . render ( ** self . options )
config = Configuration ( self . buildout , self . program + '.conf' , { 'deployment' : self . deployment_name , 'directory' : os . path . join ( self . options [ 'etc-directory' ] , 'conf.d' ) , 'text' : text } )
return [ config . install ( ) ] |
def group_by_month ( self ) :
"""Return a dictionary of this collection ' s values grouped by each month .
Key values are between 1-12.""" | hourly_data_by_month = OrderedDict ( )
for d in xrange ( 1 , 13 ) :
hourly_data_by_month [ d ] = [ ]
a_per = self . header . analysis_period
a_per_months = a_per . months_int
indx = 24 * a_per . timestep * abs ( a_per . st_day - 1 - a_per . _num_of_days_each_month [ a_per_months [ 0 ] - 1 ] )
hourly_data_by_month [ a_per_months [ 0 ] ] = self . _values [ 0 : indx + 1 ]
if len ( a_per_months ) > 1 :
for mon in a_per_months [ 1 : ] :
interval = a_per . _num_of_days_each_month [ mon - 1 ] * 24 * a_per . timestep
try :
hourly_data_by_month [ mon ] = self . _values [ indx : indx + interval + 1 ]
except IndexError :
hourly_data_by_month [ mon ] = self . _values [ indx : ]
# last items
indx += interval
return hourly_data_by_month |
def get_assessment_notification_session ( self , assessment_receiver ) :
"""Gets the notification session for notifications pertaining to assessment changes .
arg : assessment _ receiver
( osid . assessment . AssessmentReceiver ) : the assessment
receiver interface
return : ( osid . assessment . AssessmentNotificationSession ) - an
` ` AssessmentNotificationSession ` `
raise : NullArgument - ` ` assessment _ receiver ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ assessment _ notification ( ) ` `
is ` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ assessment _ notification ( ) ` ` is ` ` true ` ` . *""" | if not self . supports_assessment_notification ( ) :
raise errors . Unimplemented ( )
# pylint : disable = no - member
return sessions . ItemNotificationSession ( runtime = self . _runtime , receiver = assessment_receiver ) |
def plot ( config , image , file ) :
"""Plot a single CIFAR image .""" | image = np . squeeze ( image )
print ( file , image . shape )
imsave ( file , image ) |
def current_target ( self ) :
"""Return current target .""" | actions = self . state [ self . state [ 'current_step' ] ] [ 'actions' ]
for action , value in reversed ( actions ) :
if action == 'target' :
return value |
def validate ( collection , onerror : Callable [ [ str , List ] , None ] = None ) :
"""Validate BioC data structure .""" | BioCValidator ( onerror ) . validate ( collection ) |
def make_filename_hash ( key ) :
"""Convert the given key ( a simple Python object ) to a unique - ish hash
suitable for a filename .""" | key_repr = repr ( key ) . replace ( BASE_DIR , '' ) . encode ( 'utf8' )
# This is really stupid but necessary for making the repr ( ) s be the same on
# Python 2 and 3 and thus allowing the test suite to run on both .
# TODO better solutions include : not using a repr , not embedding hashes in
# the expected test results
if sys . platform == 'win32' : # this is to make sure the hash is the same on win and unix platforms
key_repr = key_repr . replace ( b'\\\\' , b'/' )
key_repr = re . sub ( b"\\bu'" , b"'" , key_repr )
key_hash = hashlib . md5 ( key_repr ) . digest ( )
return base64 . b64encode ( key_hash , b'__' ) . decode ( 'ascii' ) . rstrip ( '=' ) |
def handle_detached ( sender , device ) :
"""Handles detached events from USBDevice . start _ detection ( ) .""" | vendor , product , sernum , ifcount , description = device
# Close and remove the device from our list .
if sernum in list ( __devices . keys ( ) ) :
__devices [ sernum ] . close ( )
del __devices [ sernum ]
print ( 'detached' , sernum ) |
def require_remote_ref_path ( func ) :
"""A decorator raising a TypeError if we are not a valid remote , based on the path""" | def wrapper ( self , * args ) :
if not self . is_remote ( ) :
raise ValueError ( "ref path does not point to a remote reference: %s" % self . path )
return func ( self , * args )
# END wrapper
wrapper . __name__ = func . __name__
return wrapper |
def _oval_string ( self , p1 , p2 , p3 , p4 ) :
"""Return / AP string defining an oval within a 4 - polygon provided as points""" | def bezier ( p , q , r ) :
f = "%f %f %f %f %f %f c\n"
return f % ( p . x , p . y , q . x , q . y , r . x , r . y )
kappa = 0.55228474983
# magic number
ml = p1 + ( p4 - p1 ) * 0.5
# middle points . . .
mo = p1 + ( p2 - p1 ) * 0.5
# for each . . .
mr = p2 + ( p3 - p2 ) * 0.5
# polygon . . .
mu = p4 + ( p3 - p4 ) * 0.5
# side
ol1 = ml + ( p1 - ml ) * kappa
# the 8 bezier
ol2 = mo + ( p1 - mo ) * kappa
# helper points
or1 = mo + ( p2 - mo ) * kappa
or2 = mr + ( p2 - mr ) * kappa
ur1 = mr + ( p3 - mr ) * kappa
ur2 = mu + ( p3 - mu ) * kappa
ul1 = mu + ( p4 - mu ) * kappa
ul2 = ml + ( p4 - ml ) * kappa
# now draw , starting from middle point of left side
ap = "%f %f m\n" % ( ml . x , ml . y )
ap += bezier ( ol1 , ol2 , mo )
ap += bezier ( or1 , or2 , mr )
ap += bezier ( ur1 , ur2 , mu )
ap += bezier ( ul1 , ul2 , ml )
return ap |
def get_all_indirect_statements ( self ) :
"""Get all indirect increases / decreases BEL statements .
This method stores the results of the query in self . all _ indirect _ stmts
as a list of strings . The SPARQL query used to find indirect BEL
statements searches for all statements whose predicate is either
Increases or Decreases .""" | q_stmts = prefixes + """
SELECT ?stmt
WHERE {
?stmt a belvoc:Statement .
{
{ ?stmt belvoc:hasRelationship belvoc:Increases . }
UNION
{ ?stmt belvoc:hasRelationship belvoc:Decreases . }
}
}
"""
res_stmts = self . g . query ( q_stmts )
self . all_indirect_stmts = [ strip_statement ( stmt [ 0 ] ) for stmt in res_stmts ] |
def read_sj_out_tab ( filename ) :
"""Read an SJ . out . tab file as produced by the RNA - STAR aligner into a
pandas Dataframe .
Parameters
filename : str of filename or file handle
Filename of the SJ . out . tab file you want to read in
Returns
sj : pandas . DataFrame
Dataframe of splice junctions""" | def int_to_intron_motif ( n ) :
if n == 0 :
return 'non-canonical'
if n == 1 :
return 'GT/AG'
if n == 2 :
return 'CT/AC'
if n == 3 :
return 'GC/AG'
if n == 4 :
return 'CT/GC'
if n == 5 :
return 'AT/AC'
if n == 6 :
return 'GT/AT'
sj = pd . read_table ( filename , header = None , names = COLUMN_NAMES , low_memory = False )
sj . intron_motif = sj . intron_motif . map ( int_to_intron_motif )
sj . annotated = sj . annotated . map ( bool )
sj . strand . astype ( 'object' )
sj . strand = sj . strand . apply ( lambda x : [ 'unk' , '+' , '-' ] [ x ] )
# See https : / / groups . google . com / d / msg / rna - star / B0Y4oH8ZSOY / NO4OJbbUU4cJ for
# definition of strand in SJout files .
sj = sj . sort_values ( by = [ 'chrom' , 'start' , 'end' ] )
return sj |
def annotations_func ( func ) :
"""Works like annotations , but is only applicable to functions ,
methods and properties .""" | if not has_type_hints ( func ) : # What about defaults ?
func . __annotations__ = { }
func . __annotations__ = _get_type_hints ( func , infer_defaults = False )
return func |
def openOrders ( self ) -> List [ Order ] :
"""List of all open orders .""" | return [ trade . order for trade in self . wrapper . trades . values ( ) if trade . orderStatus . status not in OrderStatus . DoneStates ] |
def close_client_stream ( client_stream , unix_path ) :
"""Closes provided client stream""" | try :
client_stream . shutdown ( socket . SHUT_RDWR )
if unix_path :
logger . debug ( '%s: Connection closed' , unix_path )
else :
peer = client_stream . getpeername ( )
logger . debug ( '%s:%s: Connection closed' , peer [ 0 ] , peer [ 1 ] )
except ( socket . error , OSError ) as exception :
logger . debug ( 'Connection closing error: %s' , exception )
client_stream . close ( ) |
def OpenUrlWithBasicAuth ( url , user = 'root' , pwd = '' ) :
"""Open the specified URL , using HTTP basic authentication to provide
the specified credentials to the server as part of the request .
Returns the response as a file - like object .""" | return requests . get ( url , auth = HTTPBasicAuth ( user , pwd ) , verify = False ) |
def p_contextualize_items ( self , t ) :
"""contextualize _ items : contextualize _ items contextualize _ item
| contextualize _ item
| empty""" | if len ( t ) == 3 :
t [ 0 ] = t [ 1 ]
t [ 0 ] . append ( t [ 2 ] )
elif t [ 1 ] :
t [ 0 ] = [ t [ 1 ] ]
else :
t [ 0 ] = [ ] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.