signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def remove_node ( node_id = str , force = bool ) :
'''Remove a node from a swarm and the target needs to be a swarm manager
node _ id
The node id from the return of swarm . node _ ls
force
Forcefully remove the node / minion from the service
CLI Example :
. . code - block : : bash
salt ' * ' swarm . remove _ node node _ id = z4gjbe9rwmqahc2a91snvolm5 force = false'''
|
client = docker . APIClient ( base_url = 'unix://var/run/docker.sock' )
try :
if force == 'True' :
service = client . remove_node ( node_id , force = True )
return service
else :
service = client . remove_node ( node_id , force = False )
return service
except TypeError :
salt_return = { }
salt_return . update ( { 'Error' : 'Is the node_id and/or force=True/False missing?' } )
return salt_return
|
def logoff ( self ) :
"""Send a logoff request .
: rtype : bool"""
|
self . send ( C1218LogoffRequest ( ) )
data = self . recv ( )
if data == b'\x00' :
self . _initialized = False
return True
return False
|
def change_site ( self , new_name , new_location = None , new_er_data = None , new_pmag_data = None , replace_data = False ) :
"""Update a site ' s name , location , er _ data , and pmag _ data .
By default , new data will be added in to pre - existing data , overwriting existing values .
If replace _ data is True , the new data dictionary will simply take the place of the existing dict ."""
|
self . name = new_name
if new_location :
self . location = new_location
self . update_data ( new_er_data , new_pmag_data , replace_data )
|
def gpu_iuwt_recomposition ( in1 , scale_adjust , store_on_gpu , smoothed_array ) :
"""This function calls the a trous algorithm code to recompose the input into a single array . This is the
implementation of the isotropic undecimated wavelet transform recomposition for a GPU .
INPUTS :
in1 ( no default ) : Array containing wavelet coefficients .
scale _ adjust ( no default ) : Indicates the number of omitted array pages .
store _ on _ gpu ( no default ) : Boolean specifier for whether the decomposition is stored on the gpu or not .
OUTPUTS :
recomposiiton Array containing the reconstructed array ."""
|
wavelet_filter = ( 1. / 16 ) * np . array ( [ 1 , 4 , 6 , 4 , 1 ] , dtype = np . float32 )
# Filter - bank for use in the a trous algorithm .
wavelet_filter = gpuarray . to_gpu_async ( wavelet_filter )
# Determines scale with adjustment and creates a zero array on the GPU to store the output , unless smoothed _ array
# is given .
max_scale = in1 . shape [ 0 ] + scale_adjust
if smoothed_array is None :
recomposition = gpuarray . zeros ( [ in1 . shape [ 1 ] , in1 . shape [ 2 ] ] , np . float32 )
else :
recomposition = gpuarray . to_gpu ( smoothed_array . astype ( np . float32 ) )
# Determines whether the array is already on the GPU or not . If not , moves it to the GPU .
try :
gpu_in1 = gpuarray . to_gpu_async ( in1 . astype ( np . float32 ) )
except :
gpu_in1 = in1
# Creates a working array on the GPU .
gpu_tmp = gpuarray . empty_like ( recomposition )
# Creates and fills an array with the appropriate scale value .
gpu_scale = gpuarray . zeros ( [ 1 ] , np . int32 )
gpu_scale += max_scale - 1
# Fetches the a trous kernels .
gpu_a_trous_row_kernel , gpu_a_trous_col_kernel = gpu_a_trous ( )
grid_rows = int ( in1 . shape [ 1 ] // 32 )
grid_cols = int ( in1 . shape [ 2 ] // 32 )
# The following loops call the a trous algorithm code to recompose the input . The first loop assumes that there are
# non - zero wavelet coefficients at scales above scale _ adjust , while the second loop completes the recomposition
# on the scales less than scale _ adjust .
for i in range ( max_scale - 1 , scale_adjust - 1 , - 1 ) :
gpu_a_trous_row_kernel ( recomposition , gpu_tmp , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) )
gpu_a_trous_col_kernel ( gpu_tmp , recomposition , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) )
recomposition = recomposition [ : , : ] + gpu_in1 [ i - scale_adjust , : , : ]
gpu_scale -= 1
if scale_adjust > 0 :
for i in range ( scale_adjust - 1 , - 1 , - 1 ) :
gpu_a_trous_row_kernel ( recomposition , gpu_tmp , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) )
gpu_a_trous_col_kernel ( gpu_tmp , recomposition , wavelet_filter , gpu_scale , block = ( 32 , 32 , 1 ) , grid = ( grid_cols , grid_rows ) )
gpu_scale -= 1
# Return values depend on mode .
if store_on_gpu :
return recomposition
else :
return recomposition . get ( )
|
def get_agent_sock_path ( env = None , sp = subprocess ) :
"""Parse gpgconf output to find out GPG agent UNIX socket path ."""
|
args = [ util . which ( 'gpgconf' ) , '--list-dirs' ]
output = check_output ( args = args , env = env , sp = sp )
lines = output . strip ( ) . split ( b'\n' )
dirs = dict ( line . split ( b':' , 1 ) for line in lines )
log . debug ( '%s: %s' , args , dirs )
return dirs [ b'agent-socket' ]
|
def purge_archives ( self ) :
"""Delete older archived items .
Use the class attribute NUM _ KEEP _ ARCHIVED to control
how many items are kept ."""
|
klass = self . get_version_class ( )
qs = klass . normal . filter ( object_id = self . object_id , state = self . ARCHIVED ) . order_by ( '-last_save' ) [ self . NUM_KEEP_ARCHIVED : ]
for obj in qs :
obj . _delete_reverses ( )
klass . normal . filter ( vid = obj . vid ) . delete ( )
|
def save ( self ) :
"""Called by the parent settings dialog when the user clicks on the Save button .
Stores the current settings in the ConfigManager ."""
|
logger . debug ( "User requested to save settings. New settings: " + self . _settings_str ( ) )
cm . ConfigManager . SETTINGS [ cm . PROMPT_TO_SAVE ] = not self . autosave_checkbox . isChecked ( )
cm . ConfigManager . SETTINGS [ cm . SHOW_TRAY_ICON ] = self . show_tray_checkbox . isChecked ( )
# cm . ConfigManager . SETTINGS [ cm . MENU _ TAKES _ FOCUS ] = self . allow _ kb _ nav _ checkbox . isChecked ( )
cm . ConfigManager . SETTINGS [ cm . SORT_BY_USAGE_COUNT ] = self . sort_by_usage_checkbox . isChecked ( )
cm . ConfigManager . SETTINGS [ cm . UNDO_USING_BACKSPACE ] = self . enable_undo_checkbox . isChecked ( )
cm . ConfigManager . SETTINGS [ cm . NOTIFICATION_ICON ] = self . system_tray_icon_theme_combobox . currentData ( Qt . UserRole )
# TODO : After saving the notification icon , apply it to the currently running instance .
self . _save_autostart_settings ( )
|
def buffer ( self , geometries , inSR , distances , units , outSR = None , bufferSR = None , unionResults = True , geodesic = True ) :
"""The buffer operation is performed on a geometry service resource
The result of this operation is buffered polygons at the
specified distances for the input geometry array . Options are
available to union buffers and to use geodesic distance .
Inputs :
geometries - array of geometries ( structured as JSON geometry
objects returned by the ArcGIS REST API ) .
inSR - spatial reference of the input geometries WKID .
outSR - spatial reference for the returned geometries .
bufferSR - WKID or a spatial reference JSON object in
which the geometries are buffered .
distances - distances that each of the input geometries is buffered
unit - units for calculating each buffer distance .
unionResults - if true , all geometries buffered at a given distance
are unioned into a single ( possibly multipart ) polygon ,
and the unioned geometry is placed in the output array .
geodesic - set geodesic to true to buffer the using geodesic distance ."""
|
url = self . _url + "/buffer"
params = { "f" : "json" , "inSR" : inSR , "geodesic" : geodesic , "unionResults" : unionResults }
if isinstance ( geometries , list ) and len ( geometries ) > 0 :
g = geometries [ 0 ]
if isinstance ( g , Polygon ) :
params [ 'geometries' ] = { "geometryType" : "esriGeometryPolygon" , "geometries" : self . __geomToStringArray ( geometries , "list" ) }
elif isinstance ( g , Point ) :
params [ 'geometries' ] = { "geometryType" : "esriGeometryPoint" , "geometries" : self . __geomToStringArray ( geometries , "list" ) }
elif isinstance ( g , Polyline ) :
params [ 'geometries' ] = { "geometryType" : "esriGeometryPolyline" , "geometries" : self . __geomToStringArray ( geometries , "list" ) }
else :
return None
if isinstance ( distances , list ) :
distances = [ str ( d ) for d in distances ]
params [ 'distances' ] = "," . join ( distances )
else :
params [ 'distances' ] = str ( distances )
params [ 'units' ] = units
if bufferSR is not None :
params [ 'bufferSR' ] = bufferSR
if outSR is not None :
params [ 'outSR' ] = outSR
return self . _get ( url , param_dict = params , proxy_port = self . _proxy_port , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url )
|
def format_item ( format_spec , item , defaults = None ) :
"""Format an item according to the given output format .
The format can be gioven as either an interpolation string ,
or a Tempita template ( which has to start with " E { lb } E { lb } " ) ,
@ param format _ spec : The output format .
@ param item : The object , which is automatically wrapped for interpolation .
@ param defaults : Optional default values ."""
|
template_engine = getattr ( format_spec , "__engine__" , None )
# TODO : Make differences between engines transparent
if template_engine == "tempita" or ( not template_engine and format_spec . startswith ( "{{" ) ) : # Set item , or field names for column titles
namespace = dict ( headers = not bool ( item ) )
if item :
namespace [ "d" ] = item
else :
namespace [ "d" ] = Bunch ( )
for name in engine . FieldDefinition . FIELDS :
namespace [ "d" ] [ name ] = name . upper ( )
# Justify headers to width of a formatted value
namespace . update ( ( name [ 4 : ] , lambda x , m = method : str ( x ) . rjust ( len ( str ( m ( 0 ) ) ) ) ) for name , method in globals ( ) . items ( ) if name . startswith ( "fmt_" ) )
return expand_template ( format_spec , namespace )
else : # Interpolation
format_spec = getattr ( format_spec , "fmt" , format_spec )
if item is None : # For headers , ensure we only have string formats
format_spec = re . sub ( r"(\([_.a-zA-Z0-9]+\)[-#+0 ]?[0-9]*?)[.0-9]*[diouxXeEfFgG]" , lambda m : m . group ( 1 ) + 's' , format_spec )
return format_spec % OutputMapping ( item , defaults )
|
def traverseItems ( self , mode = TraverseMode . DepthFirst , parent = None ) :
"""Generates a tree iterator that will traverse the items of this tree
in either a depth - first or breadth - first fashion .
: param mode | < XTreeWidget . TraverseMode >
recurse | < bool >
: return < generator >"""
|
try :
if parent :
count = parent . childCount ( )
func = parent . child
else :
count = self . topLevelItemCount ( )
func = self . topLevelItem
except RuntimeError : # can be raised when iterating on a deleted tree widget .
return
next = [ ]
for i in range ( count ) :
try :
item = func ( i )
except RuntimeError : # can be raised when iterating on a deleted tree widget
return
else :
yield item
if mode == XTreeWidget . TraverseMode . DepthFirst :
for child in self . traverseItems ( mode , item ) :
yield child
else :
next . append ( item )
for item in next :
for child in self . traverseItems ( mode , item ) :
yield child
|
def musixmatch ( song ) :
"""Returns the lyrics found in musixmatch for the specified mp3 file or an
empty string if not found ."""
|
escape = re . sub ( "'-¡¿" , '' , URLESCAPE )
translate = { escape : '' , ' ' : '-' }
artist = song . artist . title ( )
artist = re . sub ( r"( '|' )" , '' , artist )
artist = re . sub ( r"'" , '-' , artist )
title = song . title
title = re . sub ( r"( '|' )" , '' , title )
title = re . sub ( r"'" , '-' , title )
artist = normalize ( artist , translate )
artist = re . sub ( r'\-{2,}' , '-' , artist )
title = normalize ( title , translate )
title = re . sub ( r'\-{2,}' , '-' , title )
url = 'https://www.musixmatch.com/lyrics/{}/{}' . format ( artist , title )
soup = get_url ( url )
text = ''
contents = soup . find_all ( 'p' , class_ = 'mxm-lyrics__content' )
for p in contents :
text += p . get_text ( ) . strip ( )
if p != contents [ - 1 ] :
text += '\n\n'
return text . strip ( )
|
def _compress ( self , data , operation ) :
"""This private method compresses some data in a given mode . This is used
because almost all of the code uses the exact same setup . It wouldn ' t
have to , but it doesn ' t hurt at all ."""
|
# The ' algorithm ' for working out how big to make this buffer is from
# the Brotli source code , brotlimodule . cc .
original_output_size = int ( math . ceil ( len ( data ) + ( len ( data ) >> 2 ) + 10240 ) )
available_out = ffi . new ( "size_t *" )
available_out [ 0 ] = original_output_size
output_buffer = ffi . new ( "uint8_t []" , available_out [ 0 ] )
ptr_to_output_buffer = ffi . new ( "uint8_t **" , output_buffer )
input_size = ffi . new ( "size_t *" , len ( data ) )
input_buffer = ffi . new ( "uint8_t []" , data )
ptr_to_input_buffer = ffi . new ( "uint8_t **" , input_buffer )
rc = lib . BrotliEncoderCompressStream ( self . _encoder , operation , input_size , ptr_to_input_buffer , available_out , ptr_to_output_buffer , ffi . NULL )
if rc != lib . BROTLI_TRUE : # pragma : no cover
raise Error ( "Error encountered compressing data." )
assert not input_size [ 0 ]
size_of_output = original_output_size - available_out [ 0 ]
return ffi . buffer ( output_buffer , size_of_output ) [ : ]
|
def split_sources ( srcs ) :
""": param srcs : sources
: returns : a pair ( split sources , split time ) or just the split _ sources"""
|
from openquake . hazardlib . source import splittable
sources = [ ]
split_time = { }
# src . id - > time
for src in srcs :
t0 = time . time ( )
mag_a , mag_b = src . get_min_max_mag ( )
min_mag = src . min_mag
if mag_b < min_mag : # discard the source completely
continue
has_serial = hasattr ( src , 'serial' )
if has_serial :
src . serial = numpy . arange ( src . serial , src . serial + src . num_ruptures )
if not splittable ( src ) :
sources . append ( src )
split_time [ src . id ] = time . time ( ) - t0
continue
if min_mag :
splits = [ ]
for s in src :
s . min_mag = min_mag
mag_a , mag_b = s . get_min_max_mag ( )
if mag_b < min_mag :
continue
s . num_ruptures = s . count_ruptures ( )
if s . num_ruptures :
splits . append ( s )
else :
splits = list ( src )
split_time [ src . id ] = time . time ( ) - t0
sources . extend ( splits )
has_samples = hasattr ( src , 'samples' )
if len ( splits ) > 1 :
start = 0
for i , split in enumerate ( splits ) :
split . source_id = '%s:%s' % ( src . source_id , i )
split . src_group_id = src . src_group_id
split . id = src . id
if has_serial :
nr = split . num_ruptures
split . serial = src . serial [ start : start + nr ]
start += nr
if has_samples :
split . samples = src . samples
elif splits : # single source
splits [ 0 ] . id = src . id
if has_serial :
splits [ 0 ] . serial = src . serial
if has_samples :
splits [ 0 ] . samples = src . samples
return sources , split_time
|
def has_equal_value ( state , ordered = False , ndigits = None , incorrect_msg = None ) :
"""Verify if a student and solution query result match up .
This function must always be used after ' zooming ' in on certain columns or records ( check _ column , check _ row or check _ result ) .
` ` has _ equal _ value ` ` then goes over all columns that are still left in the solution query result , and compares each column with the
corresponding column in the student query result .
Args :
ordered : if set to False , the default , all rows are sorted ( according
to the first column and the following columns as tie breakers ) .
if set to True , the order of rows in student and solution query have to match .
digits : if specified , number of decimals to use when comparing column values .
incorrect _ msg : if specified , this overrides the automatically generated feedback
message in case a column in the student query result does not match
a column in the solution query result .
: Example :
Suppose we are testing the following SELECT statements
* solution : ` ` SELECT artist _ id as id , name FROM artists ORDER BY name ` `
* student : ` ` SELECT artist _ id , name FROM artists ` `
We can write the following SCTs : : :
# passes , as order is not important by default
Ex ( ) . check _ column ( ' name ' ) . has _ equal _ value ( )
# fails , as order is deemed important
Ex ( ) . check _ column ( ' name ' ) . has _ equal _ value ( ordered = True )
# check _ column fails , as id is not in the student query result
Ex ( ) . check _ column ( ' id ' ) . has _ equal _ value ( )
# check _ all _ columns fails , as id not in the student query result
Ex ( ) . check _ all _ columns ( ) . has _ equal _ value ( )"""
|
if not hasattr ( state , "parent" ) :
raise ValueError ( "You can only use has_equal_value() on the state resulting from check_column, check_row or check_result." )
if incorrect_msg is None :
incorrect_msg = "Column `{{col}}` seems to be incorrect.{{' Make sure you arranged the rows correctly.' if ordered else ''}}"
# First of all , check if number of rows correspond
has_nrows ( state )
if not ordered :
stu_res , sol_res = sort_rows ( state )
else :
stu_res = state . student_result
sol_res = state . solution_result
for sol_col_name , sol_col_vals in sol_res . items ( ) :
stu_col_vals = stu_res [ sol_col_name ]
if ndigits is not None :
try :
sol_col_vals = round_seq ( sol_col_vals , ndigits )
stu_col_vals = round_seq ( stu_col_vals , ndigits )
except :
pass
if sol_col_vals != stu_col_vals :
_msg = state . build_message ( incorrect_msg , fmt_kwargs = { "col" : sol_col_name , "ordered" : ordered } )
state . do_test ( _msg )
return state
|
def get_fieldsets ( self , request , obj = None ) :
"""Add fieldsets of placeholders to the list of already
existing fieldsets ."""
|
# some ugly business to remove freeze _ date
# from the field list
general_module = { 'fields' : list ( self . general_fields ) , 'classes' : ( 'module-general' , ) , }
default_fieldsets = list ( self . fieldsets )
if not request . user . has_perm ( 'pages.can_freeze' ) :
general_module [ 'fields' ] . remove ( 'freeze_date' )
if not request . user . has_perm ( 'pages.can_publish' ) :
general_module [ 'fields' ] . remove ( 'status' )
default_fieldsets [ 0 ] [ 1 ] = general_module
placeholder_fieldsets = [ ]
template = get_template_from_request ( request , obj )
for placeholder in get_placeholders ( template ) :
if placeholder . name not in self . mandatory_placeholders :
placeholder_fieldsets . append ( placeholder . name )
additional_fieldsets = [ ]
# meta fields
metadata_fieldsets = [ f [ 'name' ] for f in self . metadata_fields ]
additional_fieldsets . append ( ( _ ( 'Metadata' ) , { 'fields' : metadata_fieldsets , 'classes' : ( 'module-content' , 'grp-collapse grp-closed' ) , } ) )
additional_fieldsets . append ( ( _ ( 'Content' ) , { 'fields' : placeholder_fieldsets , 'classes' : ( 'module-content' , ) , } ) )
return default_fieldsets + additional_fieldsets
|
def create_zip_dir ( zipfile_path , * file_list ) :
"""This function creates a zipfile located in zipFilePath with the files in
the file list
# fileList can be both a comma separated list or an array"""
|
try :
if isinstance ( file_list , ( list , tuple ) ) : # unfolding list of list or tuple
if len ( file_list ) == 1 :
if isinstance ( file_list [ 0 ] , ( list , tuple ) ) :
file_list = file_list [ 0 ]
# converting string to iterable list
if isinstance ( file_list , str ) :
file_list = [ file_list ]
if file_list :
with ZipFile ( zipfile_path , 'w' ) as zf :
for cur_file in file_list :
if '/' in cur_file :
os . chdir ( '/' . join ( cur_file . split ( '/' ) [ : - 1 ] ) )
elif '/' in zipfile_path :
os . chdir ( '/' . join ( zipfile_path . split ( '/' ) [ : - 1 ] ) )
zf . write ( cur_file . split ( '/' ) [ - 1 ] )
else :
debug . log ( 'Error: No Files in list!' , zipfile_path + ' was not created!' )
except Exception as e :
debug . log ( 'Error: Could not create zip dir! argtype: ' + str ( type ( file_list ) ) , "FileList: " + str ( file_list ) , "Errormessage: " + str ( e ) )
|
def _handle_skip_feature ( self , test_dict ) :
"""handle skip feature for test
- skip : skip current test unconditionally
- skipIf : skip current test if condition is true
- skipUnless : skip current test unless condition is true
Args :
test _ dict ( dict ) : test info
Raises :
SkipTest : skip test"""
|
# TODO : move skip to initialize
skip_reason = None
if "skip" in test_dict :
skip_reason = test_dict [ "skip" ]
elif "skipIf" in test_dict :
skip_if_condition = test_dict [ "skipIf" ]
if self . session_context . eval_content ( skip_if_condition ) :
skip_reason = "{} evaluate to True" . format ( skip_if_condition )
elif "skipUnless" in test_dict :
skip_unless_condition = test_dict [ "skipUnless" ]
if not self . session_context . eval_content ( skip_unless_condition ) :
skip_reason = "{} evaluate to False" . format ( skip_unless_condition )
if skip_reason :
raise SkipTest ( skip_reason )
|
def append ( self , func , * args , ** kwargs ) :
"""add a task to the chain
takes the same parameters as async _ task ( )"""
|
self . chain . append ( ( func , args , kwargs ) )
# remove existing results
if self . started :
delete_group ( self . group )
self . started = False
return self . length ( )
|
def del_doc ( self , doc ) :
"""Delete a document"""
|
if not self . index_writer :
self . index_writer = self . index . writer ( )
if not self . label_guesser_updater :
self . label_guesser_updater = self . label_guesser . get_updater ( )
logger . info ( "Removing doc from the index: %s" % doc )
if doc . docid in self . _docs_by_id :
self . _docs_by_id . pop ( doc . docid )
if isinstance ( doc , str ) : # annoying case : we can ' t know which labels were on it
# so we can ' t roll back the label guesser training . . .
self . _delete_doc_from_index ( self . index_writer , doc )
return
self . _delete_doc_from_index ( self . index_writer , doc . docid )
self . label_guesser_updater . del_doc ( doc )
|
def decode_run_length ( run_length_list ) :
"""Function to decode a given list encoded with run - length encoding .
Here run - length encoding means : [ N , E ] represents N occurrences of element E .
If an element is not in such run - length format , it means this element appears once .
Examples :
decode _ run _ length ( [ [ 2 , 1 ] , 2 , 3 , [ 2 , 4 ] , 5 , 1 ] )
Output : [ 1 , 1 , 2 , 3 , 4 , 4 , 5 , 1]
decode _ run _ length ( [ ' a ' , ' u ' , ' t ' , ' o ' , ' m ' , ' a ' , ' t ' , ' i ' , ' c ' , ' a ' , [ 2 , ' l ' ] , ' y ' ] )
Output : [ ' a ' , ' u ' , ' t ' , ' o ' , ' m ' , ' a ' , ' t ' , ' i ' , ' c ' , ' a ' , ' l ' , ' l ' , ' y ' ]
decode _ run _ length ( [ ' p ' , ' y ' , ' t ' , ' h ' , ' o ' , ' n ' ] )
Output : [ ' p ' , ' y ' , ' t ' , ' h ' , ' o ' , ' n ' ]
Args :
run _ length _ list : A list possibly containing singleton and run - length encoded pair elements .
Returns :
A decoded list ."""
|
decoded_list = [ ]
for element in run_length_list :
if type ( element ) is list :
decoded_list . extend ( [ element [ 1 ] ] * element [ 0 ] )
else :
decoded_list . append ( element )
return decoded_list
|
def __directory_list_descriptor ( self , configs ) :
"""Builds a directory list for an API .
Args :
configs : List of dicts containing the service configurations to list .
Returns :
A dictionary that can be deserialized into JSON in discovery list format .
Raises :
ApiConfigurationError : If there ' s something wrong with the API
configuration , such as a multiclass API decorated with different API
descriptors ( see the docstring for api ( ) ) , or a repeated method
signature ."""
|
descriptor = { 'kind' : 'discovery#directoryList' , 'discoveryVersion' : 'v1' , }
items = [ ]
for config in configs :
item_descriptor = self . __item_descriptor ( config )
if item_descriptor :
items . append ( item_descriptor )
if items :
descriptor [ 'items' ] = items
return descriptor
|
def get ( self , section , option , * args ) :
"""Get option value from section . If an option is secure ,
populates the plain text ."""
|
if self . is_secure_option ( section , option ) and self . keyring_available :
s_option = "%s%s" % ( section , option )
if self . _unsaved . get ( s_option , [ '' ] ) [ 0 ] == 'set' :
res = self . _unsaved [ s_option ] [ 1 ]
else :
res = keyring . get_password ( self . keyring_name , s_option )
else :
res = ConfigParser . get ( self , section , option , * args )
if res == '!!False!!' :
return False
return res
|
def raw_order_book ( self , pair , prec = None , ** kwargs ) :
"""Subscribe to the passed pair ' s raw order book channel .
: param pair : str , Pair to request data for .
: param kwargs :
: return :"""
|
prec = 'R0' if prec is None else prec
self . _subscribe ( 'book' , pair = pair , prec = prec , ** kwargs )
|
def windchill ( temperature , speed , face_level_winds = False , mask_undefined = True ) :
r"""Calculate the Wind Chill Temperature Index ( WCTI ) .
Calculates WCTI from the current temperature and wind speed using the formula
outlined by the FCM [ FCMR192003 ] _ .
Specifically , these formulas assume that wind speed is measured at
10m . If , instead , the speeds are measured at face level , the winds
need to be multiplied by a factor of 1.5 ( this can be done by specifying
` face _ level _ winds ` as ` True ` . )
Parameters
temperature : ` pint . Quantity `
The air temperature
speed : ` pint . Quantity `
The wind speed at 10m . If instead the winds are at face level ,
` face _ level _ winds ` should be set to ` True ` and the 1.5 multiplicative
correction will be applied automatically .
face _ level _ winds : bool , optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m , thus requiring a correction . Defaults to
` False ` .
mask _ undefined : bool , optional
A flag indicating whether a masked array should be returned with
values where wind chill is undefined masked . These are values where
the temperature > 50F or wind speed < = 3 miles per hour . Defaults
to ` True ` .
Returns
` pint . Quantity `
The corresponding Wind Chill Temperature Index value ( s )
See Also
heat _ index"""
|
# Correct for lower height measurement of winds if necessary
if face_level_winds : # No in - place so that we copy
# noinspection PyAugmentAssignment
speed = speed * 1.5
temp_limit , speed_limit = 10. * units . degC , 3 * units . mph
speed_factor = speed . to ( 'km/hr' ) . magnitude ** 0.16
wcti = units . Quantity ( ( 0.6215 + 0.3965 * speed_factor ) * temperature . to ( 'degC' ) . magnitude - 11.37 * speed_factor + 13.12 , units . degC ) . to ( temperature . units )
# See if we need to mask any undefined values
if mask_undefined :
mask = np . array ( ( temperature > temp_limit ) | ( speed <= speed_limit ) )
if mask . any ( ) :
wcti = masked_array ( wcti , mask = mask )
return wcti
|
def ext_pillar ( minion_id , pillar , # pylint : disable = W0613
use_grain = False , minion_ids = None , tag_match_key = None , tag_match_value = 'asis' , tag_list_key = None , tag_list_sep = ';' ) :
'''Execute a command and read the output as YAML'''
|
valid_tag_match_value = [ 'uqdn' , 'asis' ]
# meta - data : instance - id
grain_instance_id = __grains__ . get ( 'meta-data' , { } ) . get ( 'instance-id' , None )
if not grain_instance_id : # dynamic : instance - identity : document : instanceId
grain_instance_id = __grains__ . get ( 'dynamic' , { } ) . get ( 'instance-identity' , { } ) . get ( 'document' , { } ) . get ( 'instance-id' , None )
if grain_instance_id and re . search ( r'^i-([0-9a-z]{17}|[0-9a-z]{8})$' , grain_instance_id ) is None :
log . error ( 'External pillar %s, instance-id \'%s\' is not valid for ' '\'%s\'' , __name__ , grain_instance_id , minion_id )
grain_instance_id = None
# invalid instance id found , remove it from use .
# Check AWS Tag restrictions . i . e . letters , spaces , and numbers and + - = . _ : / @
if tag_match_key and re . match ( r'[\w=.:/@-]+$' , tag_match_key ) is None :
log . error ( 'External pillar %s, tag_match_key \'%s\' is not valid ' , __name__ , tag_match_key if isinstance ( tag_match_key , six . text_type ) else 'non-string' )
return { }
if tag_match_key and tag_match_value not in valid_tag_match_value :
log . error ( 'External pillar %s, tag_value \'%s\' is not valid must be one ' 'of %s' , __name__ , tag_match_value , ' ' . join ( valid_tag_match_value ) )
return { }
if not tag_match_key :
base_msg = ( 'External pillar %s, querying EC2 tags for minion id \'%s\' ' 'against instance-id' , __name__ , minion_id )
else :
base_msg = ( 'External pillar %s, querying EC2 tags for minion id \'%s\' ' 'against instance-id or \'%s\' against \'%s\'' , __name__ , minion_id , tag_match_key , tag_match_value )
log . debug ( base_msg )
find_filter = None
find_id = None
if re . search ( r'^i-([0-9a-z]{17}|[0-9a-z]{8})$' , minion_id ) is not None :
find_filter = None
find_id = minion_id
elif tag_match_key :
if tag_match_value == 'uqdn' :
find_filter = { 'tag:{0}' . format ( tag_match_key ) : minion_id . split ( '.' , 1 ) [ 0 ] }
else :
find_filter = { 'tag:{0}' . format ( tag_match_key ) : minion_id }
if grain_instance_id : # we have an untrusted grain _ instance _ id , use it to narrow the search
# even more . Combination will be unique even if uqdn is set .
find_filter . update ( { 'instance-id' : grain_instance_id } )
# Add this if running state is not dependant on EC2Config
# find _ filter . update ( ' instance - state - name ' : ' running ' )
# no minion - id is instance - id and no suitable filter , try use _ grain if enabled
if not find_filter and not find_id and use_grain :
if not grain_instance_id :
log . debug ( 'Minion-id is not in AWS instance-id formation, and there ' 'is no instance-id grain for minion %s' , minion_id )
return { }
if minion_ids is not None and minion_id not in minion_ids :
log . debug ( 'Minion-id is not in AWS instance ID format, and minion_ids ' 'is set in the ec2_pillar configuration, but minion %s is ' 'not in the list of allowed minions %s' , minion_id , minion_ids )
return { }
find_id = grain_instance_id
if not ( find_filter or find_id ) :
log . debug ( 'External pillar %s, querying EC2 tags for minion id \'%s\' against ' 'instance-id or \'%s\' against \'%s\' noughthing to match against' , __name__ , minion_id , tag_match_key , tag_match_value )
return { }
myself = boto . utils . get_instance_metadata ( timeout = 0.1 , num_retries = 1 )
if not myself :
log . info ( "%s: salt master not an EC2 instance, skipping" , __name__ )
return { }
# Get the Master ' s instance info , primarily the region
( _ , region ) = _get_instance_info ( )
# If the Minion ' s region is available , use it instead
if use_grain :
region = __grains__ . get ( 'ec2' , { } ) . get ( 'region' , region )
try :
conn = boto . ec2 . connect_to_region ( region )
except boto . exception . AWSConnectionError as exc :
log . error ( '%s: invalid AWS credentials, %s' , __name__ , exc )
return { }
if conn is None :
log . error ( '%s: Could not connect to region %s' , __name__ , region )
return { }
try :
if find_id :
instance_data = conn . get_only_instances ( instance_ids = [ find_id ] , dry_run = False )
else : # filters and max _ results can not be used togther .
instance_data = conn . get_only_instances ( filters = find_filter , dry_run = False )
except boto . exception . EC2ResponseError as exc :
log . error ( '%s failed with \'%s\'' , base_msg , exc )
return { }
if not instance_data :
log . debug ( '%s no match using \'%s\'' , base_msg , find_id if find_id else find_filter )
return { }
# Find a active instance , i . e . ignore terminated and stopped instances
active_inst = [ ]
for inst in range ( 0 , len ( instance_data ) ) :
if instance_data [ inst ] . state not in [ 'terminated' , 'stopped' ] :
active_inst . append ( inst )
valid_inst = len ( active_inst )
if not valid_inst :
log . debug ( '%s match found but not active \'%s\'' , base_msg , find_id if find_id else find_filter )
return { }
if valid_inst > 1 :
log . error ( '%s multiple matches, ignored, using \'%s\'' , base_msg , find_id if find_id else find_filter )
return { }
instance = instance_data [ active_inst [ 0 ] ]
if instance . tags :
ec2_tags = instance . tags
ec2_tags_list = { }
log . debug ( 'External pillar %s, for minion id \'%s\', tags: %s' , __name__ , minion_id , instance . tags )
if tag_list_key and isinstance ( tag_list_key , list ) :
for item in tag_list_key :
if item in ec2_tags :
ec2_tags_list [ item ] = ec2_tags [ item ] . split ( tag_list_sep )
del ec2_tags [ item ]
# make sure its only in ec2 _ tags _ list
else :
ec2_tags_list [ item ] = [ ]
# always return a result
return { 'ec2_tags' : ec2_tags , 'ec2_tags_list' : ec2_tags_list }
return { }
|
def follow_link ( self , link = None , * args , ** kwargs ) :
"""Follow a link .
If ` ` link ` ` is a bs4 . element . Tag ( i . e . from a previous call to
: func : ` links ` or : func : ` find _ link ` ) , then follow the link .
If ` ` link ` ` doesn ' t have a * href * - attribute or is None , treat
` ` link ` ` as a url _ regex and look it up with : func : ` find _ link ` .
Any additional arguments specified are forwarded to this function .
If the link is not found , raise : class : ` LinkNotFoundError ` .
Before raising , if debug is activated , list available links in the
page and launch a browser .
: return : Forwarded from : func : ` open _ relative ` ."""
|
link = self . _find_link_internal ( link , args , kwargs )
referer = self . get_url ( )
headers = { 'Referer' : referer } if referer else None
return self . open_relative ( link [ 'href' ] , headers = headers )
|
def SetSerializersProfiler ( self , serializers_profiler ) :
"""Sets the serializers profiler .
Args :
serializers _ profiler ( SerializersProfiler ) : serializers profiler ."""
|
self . _serializers_profiler = serializers_profiler
if self . _storage_file :
self . _storage_file . SetSerializersProfiler ( serializers_profiler )
|
def _nextNonSpaceColumn ( block , column ) :
"""Returns the column with a non - whitespace characters
starting at the given cursor position and searching forwards ."""
|
textAfter = block . text ( ) [ column : ]
if textAfter . strip ( ) :
spaceLen = len ( textAfter ) - len ( textAfter . lstrip ( ) )
return column + spaceLen
else :
return - 1
|
def get_network ( families = [ socket . AF_INET ] ) :
"""# > > > from psutil . _ common import snic
> > > import mock
> > > from collections import namedtuple
> > > snic = namedtuple ( ' snic ' , [ ' family ' , ' address ' , ' netmask ' , ' broadcast ' , ' ptp ' ] )
> > > MOCK = {
. . . " awdl0 " : [ snic ( family = 30 , address = " fe80 : : 3854:80ff : fe54:7bf8 % awdl0 " , netmask = " ffff : ffff : ffff : ffff : : " , broadcast = None , ptp = None ) ] ,
. . . " en0 " : [ snic ( family = 2 , address = " 192.168.10.200 " , netmask = " 255.255.255.0 " , broadcast = " 192.168.10.255 " , ptp = None ) ,
. . . snic ( family = 30 , address = " fe80 : : 6e40:8ff : feac : 4f94 % en0 " , netmask = " ffff : ffff : ffff : ffff : : " , broadcast = None , ptp = None ) ] ,
. . . " bridge0 " : [ snic ( family = 18 , address = " 6e : 40:08 : ca : 60:00 " , netmask = None , broadcast = None , ptp = None ) ] ,
. . . " lo0 " : [ snic ( family = 2 , address = " 127.0.0.1 " , netmask = " 255.0.0.0 " , broadcast = None , ptp = None ) ,
. . . snic ( family = 30 , address = " fe80 : : 1 % lo0 " , netmask = " ffff : ffff : ffff : ffff : : " , broadcast = None , ptp = None ) ] }
> > > with mock . patch ( " psutil . net _ if _ addrs " , side _ effect = lambda : MOCK ) :
. . . data _ inet = get _ network ( [ socket . AF _ INET ] )
. . . sorted ( data _ inet . keys ( ) )
[ ' en0 ' , ' lo0 ' ]
> > > with mock . patch ( " psutil . net _ if _ addrs " , side _ effect = lambda : MOCK ) :
. . . sorted ( data _ inet . values ( ) )
[ [ u ' 127.0.0.1/255.0.0.0 ' ] , [ u ' 192.168.10.200/255.255.255.0 ' ] ]
> > > with mock . patch ( " psutil . net _ if _ addrs " , side _ effect = lambda : MOCK ) :
. . . data _ inet6 = get _ network ( [ socket . AF _ INET6 ] )
. . . sorted ( flatten ( data _ inet6 . values ( ) ) )
[ ' fe80 : : 1 % lo0 / ffff : ffff : ffff : ffff : : ' , ' fe80 : : 3854:80ff : fe54:7bf8 % awdl0 / ffff : ffff : ffff : ffff : : ' , ' fe80 : : 6e40:8ff : feac : 4f94 % en0 / ffff : ffff : ffff : ffff : : ' ]"""
|
nic = psutil . net_if_addrs ( )
ips = defaultdict ( list )
# return nic
for card , addresses in nic . items ( ) :
for address in addresses :
if address . family in families :
ips [ card ] . append ( "{0.address}/{0.netmask}" . format ( address ) )
return dict ( ips )
|
def sample ( self , n ) :
"""Samples data into a Pandas DataFrame . Note that it calls BigQuery so it will
incur cost .
Args :
n : number of sampled counts . Note that the number of counts returned is approximated .
Returns :
A dataframe containing sampled data .
Raises :
Exception if n is larger than number of rows ."""
|
total = bq . Query ( 'select count(*) from %s' % self . _get_source ( ) ) . execute ( ) . result ( ) [ 0 ] . values ( ) [ 0 ]
if n > total :
raise ValueError ( 'sample larger than population' )
sampling = bq . Sampling . random ( percent = n * 100.0 / float ( total ) )
if self . _query is not None :
source = self . _query
else :
source = 'SELECT * FROM `%s`' % self . _table
sample = bq . Query ( source ) . execute ( sampling = sampling ) . result ( )
df = sample . to_dataframe ( )
return df
|
def select_tag ( self , uid ) :
"""Selects tag for further usage .
uid - - list or tuple with four bytes tag ID
Returns error state ."""
|
back_data = [ ]
buf = [ ]
buf . append ( self . act_select )
buf . append ( 0x70 )
for i in range ( 5 ) :
buf . append ( uid [ i ] )
crc = self . calculate_crc ( buf )
buf . append ( crc [ 0 ] )
buf . append ( crc [ 1 ] )
( error , back_data , back_length ) = self . card_write ( self . mode_transrec , buf )
if ( not error ) and ( back_length == 0x18 ) :
return False
else :
return True
|
def _inserts ( self ) :
"""thwe"""
|
return { concat ( a , c , b ) for a , b in self . slices for c in ALPHABET }
|
def send ( self , message , callback , timeout = 0 ) :
"""Add a single message to the internal pending queue to be processed
by the Connection without waiting for it to be sent .
: param message : The message to send .
: type message : ~ uamqp . message . Message
: param callback : The callback to be run once a disposition is received
in receipt of the message . The callback must take three arguments , the message ,
the send result and the optional delivery condition ( exception ) .
: type callback :
callable [ ~ uamqp . message . Message , ~ uamqp . constants . MessageSendResult , ~ uamqp . errors . MessageException ]
: param timeout : An expiry time for the message added to the queue . If the
message is not sent within this timeout it will be discarded with an error
state . If set to 0 , the message will not expire . The default is 0."""
|
# pylint : disable = protected - access
try :
raise self . _error
except TypeError :
pass
except Exception as e :
_logger . warning ( "%r" , e )
raise
c_message = message . get_message ( )
message . _on_message_sent = callback
try :
self . _session . _connection . lock ( timeout = - 1 )
return self . _sender . send ( c_message , timeout , message )
finally :
self . _session . _connection . release ( )
|
def dump2sqlite ( records , output_file ) :
"""Dumps tests results to database ."""
|
results_keys = list ( records . results [ 0 ] . keys ( ) )
pad_data = [ ]
for key in REQUIRED_KEYS :
if key not in results_keys :
results_keys . append ( key )
pad_data . append ( "" )
conn = sqlite3 . connect ( os . path . expanduser ( output_file ) , detect_types = sqlite3 . PARSE_DECLTYPES )
# in each row there needs to be data for every column
# last column is current time
pad_data . append ( datetime . datetime . utcnow ( ) )
to_db = [ list ( row . values ( ) ) + pad_data for row in records . results ]
cur = conn . cursor ( )
cur . execute ( "CREATE TABLE testcases ({},sqltime TIMESTAMP)" . format ( "," . join ( "{} TEXT" . format ( key ) for key in results_keys ) ) )
cur . executemany ( "INSERT INTO testcases VALUES ({},?)" . format ( "," . join ( [ "?" ] * len ( results_keys ) ) ) , to_db )
if records . testrun :
cur . execute ( "CREATE TABLE testrun (testrun TEXT)" )
cur . execute ( "INSERT INTO testrun VALUES (?)" , ( records . testrun , ) )
conn . commit ( )
conn . close ( )
logger . info ( "Data written to '%s'" , output_file )
|
def set_dhw_on ( self , until = None ) :
"""Sets the DHW on until a given time , or permanently ."""
|
if until is None :
data = { "Mode" : "PermanentOverride" , "State" : "On" , "UntilTime" : None }
else :
data = { "Mode" : "TemporaryOverride" , "State" : "On" , "UntilTime" : until . strftime ( '%Y-%m-%dT%H:%M:%SZ' ) }
self . _set_dhw ( data )
|
def isDocumentCollection ( cls , name ) :
"""return true or false wether ' name ' is the name of a document collection ."""
|
try :
col = cls . getCollectionClass ( name )
return issubclass ( col , Collection )
except KeyError :
return False
|
def conv3x3 ( in_planes , out_planes , stride = 1 ) :
"3x3 convolution with padding"
|
return nn . Conv2d ( in_planes , out_planes , kernel_size = 3 , stride = stride , padding = 1 , bias = True )
|
def sparse_decom2 ( inmatrix , inmask = ( None , None ) , sparseness = ( 0.01 , 0.01 ) , nvecs = 3 , its = 20 , cthresh = ( 0 , 0 ) , statdir = None , perms = 0 , uselong = 0 , z = 0 , smooth = 0 , robust = 0 , mycoption = 0 , initialization_list = [ ] , initialization_list2 = [ ] , ell1 = 10 , prior_weight = 0 , verbose = False , rejector = 0 , max_based = False , version = 1 ) :
"""Decomposes two matrices into paired sparse eigenevectors to
maximize canonical correlation - aka Sparse CCA .
Note : we do not scale the matrices internally . We leave
scaling choices to the user .
ANTsR function : ` sparseDecom2 `
Arguments
inmatrix : 2 - tuple of ndarrays
input as inmatrix = ( mat1 , mat2 ) . n by p input matrix and n by q input matrix , spatial variable lies along columns .
inmask : 2 - tuple of ANTsImage types ( optional - one or both )
optional pair of image masks
sparseness : tuple
a pair of float values e . g c ( 0.01,0.1 ) enforces an unsigned 99 percent and 90 percent sparse solution for each respective view
nvecs : integer
number of eigenvector pairs
its : integer
number of iterations , 10 or 20 usually sufficient
cthresh : 2 - tuple
cluster threshold pair
statdir : string ( optional )
temporary directory if you want to look at full output
perms : integer
number of permutations . settings permutations greater than 0 will estimate significance per vector empirically . For small datasets , these may be conservative . p - values depend on how one scales the input matrices .
uselong : boolean
enforce solutions of both views to be the same - requires matrices to be the same size
z : float
subject space ( low - dimensional space ) sparseness value
smooth : float
smooth the data ( only available when mask is used )
robust : boolean
rank transform input matrices
mycoption : integer
enforce 1 - spatial orthogonality , 2 - low - dimensional orthogonality or 0 - both
initialization _ list : list
initialization for first view
initialization _ list2 : list
initialization for 2nd view
ell1 : float
gradient descent parameter , if negative then l0 otherwise use l1
prior _ weight : scalar
Scalar value weight on prior between 0 ( prior is weak ) and 1 ( prior is strong ) . Only engaged if initialization is used
verbose : boolean
activates verbose output to screen
rejector : scalar
rejects small correlation solutions
max _ based : boolean
whether to choose max - based thresholding
Returns
dict w / following key / value pairs :
` projections ` : ndarray
X projections
` projections2 ` : ndarray
Y projections
` eig1 ` : ndarray
X components
` eig2 ` : ndarray
Y components
` summary ` : pd . DataFrame
first column is canonical correlations ,
second column is p - values ( these are ` None ` if perms > 0)
Example
> > > import numpy as np
> > > import ants
> > > mat = np . random . randn ( 20 , 100)
> > > mat2 = np . random . randn ( 20 , 90)
> > > mydecom = ants . sparse _ decom2 ( inmatrix = ( mat , mat2 ) ,
sparseness = ( 0.1,0.3 ) , nvecs = 3,
its = 3 , perms = 0)"""
|
if inmatrix [ 0 ] . shape [ 0 ] != inmatrix [ 1 ] . shape [ 0 ] :
raise ValueError ( 'Matrices must have same number of rows (samples)' )
idim = 3
if isinstance ( inmask [ 0 ] , iio . ANTsImage ) :
maskx = inmask [ 0 ] . clone ( 'float' )
idim = inmask [ 0 ] . dimension
hasmaskx = 1
elif isinstance ( inmask [ 0 ] , np . ndarray ) :
maskx = core . from_numpy ( inmask [ 0 ] , pixeltype = 'float' )
idim = inmask [ 0 ] . ndim
hasmaskx = 1
else :
maskx = core . make_image ( [ 1 ] * idim , pixeltype = 'float' )
hasmaskx = - 1
if isinstance ( inmask [ 1 ] , iio . ANTsImage ) :
masky = inmask [ 1 ] . clone ( 'float' )
idim = inmask [ 1 ] . dimension
hasmasky = 1
elif isinstance ( inmask [ 1 ] , np . ndarray ) :
masky = core . from_numpy ( inmask [ 1 ] , pixeltype = 'float' )
idim = inmask [ 1 ] . ndim
hasmasky = 1
else :
masky = core . make_image ( [ 1 ] * idim , pixeltype = 'float' )
hasmasky = - 1
inmask = [ maskx , masky ]
if robust > 0 :
raise NotImplementedError ( 'robust > 0 not currently implemented' )
else :
input_matrices = inmatrix
if idim == 2 :
if version == 1 :
sccancpp_fn = utils . get_lib_fn ( 'sccanCpp2D' )
elif version == 2 :
sccancpp_fn = utils . get_lib_fn ( 'sccanCpp2DV2' )
input_matrices = ( input_matrices [ 0 ] . tolist ( ) , input_matrices [ 1 ] . tolist ( ) )
elif idim == 3 :
if version == 1 :
sccancpp_fn = utils . get_lib_fn ( 'sccanCpp3D' )
elif version == 2 :
sccancpp_fn = utils . get_lib_fn ( 'sccanCpp3DV2' )
input_matrices = ( input_matrices [ 0 ] . tolist ( ) , input_matrices [ 1 ] . tolist ( ) )
outval = sccancpp_fn ( input_matrices [ 0 ] , input_matrices [ 1 ] , inmask [ 0 ] . pointer , inmask [ 1 ] . pointer , hasmaskx , hasmasky , sparseness [ 0 ] , sparseness [ 1 ] , nvecs , its , cthresh [ 0 ] , cthresh [ 1 ] , z , smooth , initialization_list , initialization_list2 , ell1 , verbose , prior_weight , mycoption , max_based )
p1 = np . dot ( input_matrices [ 0 ] , outval [ 'eig1' ] . T )
p2 = np . dot ( input_matrices [ 1 ] , outval [ 'eig2' ] . T )
outcorrs = np . array ( [ pearsonr ( p1 [ : , i ] , p2 [ : , i ] ) [ 0 ] for i in range ( p1 . shape [ 1 ] ) ] )
if prior_weight < 1e-10 :
myord = np . argsort ( np . abs ( outcorrs ) ) [ : : - 1 ]
outcorrs = outcorrs [ myord ]
p1 = p1 [ : , myord ]
p2 = p2 [ : , myord ]
outval [ 'eig1' ] = outval [ 'eig1' ] [ myord , : ]
outval [ 'eig2' ] = outval [ 'eig2' ] [ myord , : ]
cca_summary = np . vstack ( ( outcorrs , [ None ] * len ( outcorrs ) ) ) . T
if perms > 0 :
cca_summary [ : , 1 ] = 0
nsubs = input_matrices [ 0 ] . shape [ 0 ]
for permer in range ( perms ) :
m1 = input_matrices [ 0 ] [ np . random . permutation ( nsubs ) , : ]
m2 = input_matrices [ 1 ] [ np . random . permutation ( nsubs ) , : ]
outvalperm = sccancpp_fn ( m1 , m2 , inmask [ 0 ] . pointer , inmask [ 1 ] . pointer , hasmaskx , hasmasky , sparseness [ 0 ] , sparseness [ 1 ] , nvecs , its , cthresh [ 0 ] , cthresh [ 1 ] , z , smooth , initialization_list , initialization_list2 , ell1 , verbose , prior_weight , mycoption , max_based )
p1perm = np . dot ( m1 , outvalperm [ 'eig1' ] . T )
p2perm = np . dot ( m2 , outvalperm [ 'eig2' ] . T )
outcorrsperm = np . array ( [ pearsonr ( p1perm [ : , i ] , p2perm [ : , i ] ) [ 0 ] for i in range ( p1perm . shape [ 1 ] ) ] )
if prior_weight < 1e-10 :
myord = np . argsort ( np . abs ( outcorrsperm ) ) [ : : - 1 ]
outcorrsperm = outcorrsperm [ myord ]
counter = np . abs ( cca_summary [ : , 0 ] ) < np . abs ( outcorrsperm )
counter = counter . astype ( 'int' )
cca_summary [ : , 1 ] = cca_summary [ : , 1 ] + counter
cca_summary [ : , 1 ] = cca_summary [ : , 1 ] / float ( perms )
return { 'projections' : p1 , 'projections2' : p2 , 'eig1' : outval [ 'eig1' ] . T , 'eig2' : outval [ 'eig2' ] . T , 'summary' : pd . DataFrame ( cca_summary , columns = [ 'corrs' , 'pvalues' ] ) }
|
def allow_exception ( self , exc_class ) :
"""Allow raising this class of exceptions from commands .
When a command fails on the server side due to an exception , by
default it is turned into a string and raised on the client side as an
ExternalError . The original class name is sent but ignored . If you
would like to instead raise an instance of the same exception on the
client side , you can pass the exception class object to this method
and instances of that exception will be reraised .
The caveat is that the exception must be creatable with a single
string parameter and it should have a ` ` msg ` ` property .
Args :
exc _ class ( class ) : A class object with the exception that
we should allow to pass from server to client ."""
|
name = exc_class . __name__
self . _allowed_exceptions [ name ] = exc_class
|
def rotation_substring_check ( main_string , check_string ) :
"""Checks if check _ string or any of its rotations is a substring of main _ string .
Returns True if it is , False otherwise .
Args :
main _ string ( str ) : The main string in which to check for substrings .
check _ string ( str ) : The string to check for rotation substrings .
Returns :
bool : True if a rotation of check _ string is a substring of main _ string , False otherwise .
Examples :
> > > rotation _ substring _ check ( " abcd " , " abd " )
False
> > > rotation _ substring _ check ( " hello " , " ell " )
True
> > > rotation _ substring _ check ( " whassup " , " psus " )
False
> > > rotation _ substring _ check ( " abab " , " baa " )
True
> > > rotation _ substring _ check ( " efef " , " eeff " )
False
> > > rotation _ substring _ check ( " himenss " , " simen " )
True"""
|
check_length = len ( check_string )
double_check_string = check_string + check_string
for index in range ( len ( main_string ) - check_length + 1 ) :
for subset_index in range ( check_length + 1 ) :
if main_string [ index : index + check_length ] == double_check_string [ subset_index : subset_index + check_length ] :
return True
return False
|
def _next_sample_index ( self ) :
"""StochasticMux chooses its next sample stream randomly"""
|
return self . rng . choice ( self . n_active , p = ( self . stream_weights_ / self . weight_norm_ ) )
|
async def datacenters ( self ) :
"""Queries for WAN coordinates of Consul servers
Returns :
Mapping : WAN network coordinates for all Consul
servers , organized by DCs .
It returns a body like this : :
" dc1 " : {
" Datacenter " : " dc1 " ,
" Coordinates " : [
" Node " : " agent - one " ,
" Coord " : {
" Adjustment " : 0,
" Error " : 1.5,
" Height " : 0,
" Vec " : [ 0,0,0,0,0,0,0,0]
This endpoint serves data out of the server ' s local Serf data about
the WAN , so its results may vary as requests are handled by different
servers in the cluster .
Also , it does not support blocking queries or any consistency modes ."""
|
response = await self . _api . get ( "/v1/coordinate/datacenters" )
return { data [ "Datacenter" ] : data for data in response . body }
|
def rex ( expr ) :
"""Regular expression matcher to use together with transform functions"""
|
r = re . compile ( expr )
return lambda key : isinstance ( key , six . string_types ) and r . match ( key )
|
def rename ( self , node ) :
"""Translate a rename node into latex qtree node .
: param node : a treebrd node
: return : a qtree subtree rooted at the node"""
|
child = self . translate ( node . child )
attributes = ''
if node . attributes :
attributes = '({})' . format ( ', ' . join ( node . attributes . names ) )
return '[.${op}_{{{name}{attributes}}}$ {child} ]' . format ( op = latex_operator [ node . operator ] , name = node . name , attributes = attributes , child = child )
|
def create_notification ( self ) :
"""Instead of the typical create _ widget we use ` create _ notification `
because after it ' s closed it needs created again ."""
|
d = self . declaration
builder = self . builder = Notification . Builder ( self . get_context ( ) , d . channel_id )
d = self . declaration
# Apply any custom settings
if d . settings :
builder . update ( ** d . settings )
for k , v in self . get_declared_items ( ) :
handler = getattr ( self , 'set_{}' . format ( k ) )
handler ( v )
builder . setSmallIcon ( d . icon or '@mipmap/ic_launcher' )
# app = self . get _ context ( )
# intent = Intent ( )
# intent . setClass ( app , )
# builder . setContentIntent ( PendingIntent . getActivity ( app , 0 , intent , 0 ) )
# : Set custom content if present
for view in self . child_widgets ( ) :
builder . setCustomContentView ( view )
break
|
def is_citeable ( publication_info ) :
"""Check some fields in order to define if the article is citeable .
: param publication _ info : publication _ info field
already populated
: type publication _ info : list"""
|
def _item_has_pub_info ( item ) :
return all ( key in item for key in ( 'journal_title' , 'journal_volume' ) )
def _item_has_page_or_artid ( item ) :
return any ( key in item for key in ( 'page_start' , 'artid' ) )
has_pub_info = any ( _item_has_pub_info ( item ) for item in publication_info )
has_page_or_artid = any ( _item_has_page_or_artid ( item ) for item in publication_info )
return has_pub_info and has_page_or_artid
|
def parse_meta ( meta_str ) :
"""Parse the metadata for a single ds9 region string .
Parameters
meta _ str : ` str `
Meta string , the metadata is everything after the close - paren of the
region coordinate specification . All metadata is specified as
key = value pairs separated by whitespace , but sometimes the values
can also be whitespace separated .
Returns
meta : ` ~ collections . OrderedDict `
Dictionary containing the meta data"""
|
keys_vals = [ ( x , y ) for x , _ , y in regex_meta . findall ( meta_str . strip ( ) ) ]
extra_text = regex_meta . split ( meta_str . strip ( ) ) [ - 1 ]
result = OrderedDict ( )
for key , val in keys_vals : # regex can include trailing whitespace or inverted commas
# remove it
val = val . strip ( ) . strip ( "'" ) . strip ( '"' )
if key == 'text' :
val = val . lstrip ( "{" ) . rstrip ( "}" )
if key in result :
if key == 'tag' :
result [ key ] . append ( val )
else :
raise ValueError ( "Duplicate key {0} found" . format ( key ) )
else :
if key == 'tag' :
result [ key ] = [ val ]
else :
result [ key ] = val
if extra_text :
result [ 'comment' ] = extra_text
return result
|
def has_more_pages ( self ) :
""": return : ` ` True ` ` if there are more pages available on the server ."""
|
# if has _ next property exists , it represents whether more pages exist
if self . has_next is not None :
return self . has_next
# otherwise , try to compute whether or not more pages exist
total_pages = self . get_total_pages ( )
if self . page_number is None or total_pages is None :
return None
else :
return self . page_number + 1 < total_pages
|
def render_stop_display ( step : 'projects.ProjectStep' , message : str ) :
"""Renders a stop action to the Cauldron display ."""
|
stack = render_stack . get_formatted_stack_frame ( project = step . project , error_stack = False )
try :
names = [ frame [ 'filename' ] for frame in stack ]
index = names . index ( os . path . realpath ( __file__ ) )
frame = stack [ index - 1 ]
except Exception :
frame = { }
stop_message = ( '{}' . format ( message ) if message else 'This step was explicitly stopped prior to its completion' )
dom = templating . render_template ( 'step-stop.html' , message = stop_message , frame = frame )
step . report . append_body ( dom )
|
def delete_namespaced_daemon_set ( self , name , namespace , ** kwargs ) : # noqa : E501
"""delete _ namespaced _ daemon _ set # noqa : E501
delete a DaemonSet # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . delete _ namespaced _ daemon _ set ( name , namespace , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the DaemonSet ( required )
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: param int grace _ period _ seconds : The duration in seconds before the object should be deleted . Value must be non - negative integer . The value zero indicates delete immediately . If this value is nil , the default grace period for the specified type will be used . Defaults to a per object value if not specified . zero means delete immediately .
: param bool orphan _ dependents : Deprecated : please use the PropagationPolicy , this field will be deprecated in 1.7 . Should the dependent objects be orphaned . If true / false , the \" orphan \" finalizer will be added to / removed from the object ' s finalizers list . Either this field or PropagationPolicy may be set , but not both .
: param str propagation _ policy : Whether and how garbage collection will be performed . Either this field or OrphanDependents may be set , but not both . The default policy is decided by the existing finalizer set in the metadata . finalizers and the resource - specific default policy . Acceptable values are : ' Orphan ' - orphan the dependents ; ' Background ' - allow the garbage collector to delete the dependents in the background ; ' Foreground ' - a cascading policy that deletes all dependents in the foreground .
: param V1DeleteOptions body :
: return : V1Status
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . delete_namespaced_daemon_set_with_http_info ( name , namespace , ** kwargs )
# noqa : E501
else :
( data ) = self . delete_namespaced_daemon_set_with_http_info ( name , namespace , ** kwargs )
# noqa : E501
return data
|
def start ( self ) :
"""Start all the registered services .
A new container is created for each service using the container
class provided in the _ _ init _ _ method .
All containers are started concurrently and the method will block
until all have completed their startup routine ."""
|
service_names = ', ' . join ( self . service_names )
_log . info ( 'starting services: %s' , service_names )
SpawningProxy ( self . containers ) . start ( )
_log . debug ( 'services started: %s' , service_names )
|
def makedirs ( self ) :
"""Create all parent folders if they do not exist ."""
|
normpath = os . path . normpath ( self . path )
parentfolder = os . path . dirname ( normpath )
if parentfolder :
try :
os . makedirs ( parentfolder )
except OSError :
pass
|
def Chisholm ( m , x , rhol , rhog , mul , mug , D , roughness = 0 , L = 1 , rough_correction = False ) :
r'''Calculates two - phase pressure drop with the Chisholm ( 1973 ) correlation
from [ 1 ] _ , also in [ 2 ] _ and [ 3 ] _ .
. . math : :
\ frac { \ Delta P _ { tp } } { \ Delta P _ { lo } } = \ phi _ { ch } ^ 2
. . math : :
\ phi _ { ch } ^ 2 = 1 + ( \ Gamma ^ 2 - 1 ) \ left \ { B x ^ { ( 2 - n ) / 2 } ( 1 - x ) ^ { ( 2 - n ) / 2}
+ x ^ { 2 - n } \ right \ }
. . math : :
\ Gamma ^ 2 = \ frac { \ left ( \ frac { \ Delta P } { L } \ right ) _ { go } } { \ left ( \ frac {
\ Delta P } { L } \ right ) _ { lo } }
For Gamma < 9.5:
. . math : :
B = \ frac { 55 } { G _ { tp } ^ { 0.5 } } \ text { for } G _ { tp } > 1900
. . math : :
B = \ frac { 2400 } { G _ { tp } } \ text { for } 500 < G _ { tp } < 1900
. . math : :
B = 4.8 \ text { for } G _ { tp } < 500
For 9.5 < Gamma < 28:
. . math : :
B = \ frac { 520 } { \ Gamma G _ { tp } ^ { 0.5 } } \ text { for } G _ { tp } < 600
. . math : :
B = \ frac { 21 } { \ Gamma } \ text { for } G _ { tp } > 600
For Gamma > 28:
. . math : :
B = \ frac { 15000 } { \ Gamma ^ 2 G _ { tp } ^ { 0.5 } }
If ` rough _ correction ` is True , the following correction to B is applied :
. . math : :
\ frac { B _ { rough } } { B _ { smooth } } = \ left [ 0.5 \ left \ { 1 + \ left ( \ frac { \ mu _ g }
{ \ mu _ l } \ right ) ^ 2 + 10 ^ { - 600 \ epsilon / D } \ right \ } \ right ] ^ { \ frac { 0.25 - n }
{0.25 } }
. . math : :
n = \ frac { \ log \ frac { f _ { d , lo } } { f _ { d , go } } } { \ log \ frac { Re _ { go } } { Re _ { lo } } }
Parameters
m : float
Mass flow rate of fluid , [ kg / s ]
x : float
Quality of fluid , [ - ]
rhol : float
Liquid density , [ kg / m ^ 3]
rhog : float
Gas density , [ kg / m ^ 3]
mul : float
Viscosity of liquid , [ Pa * s ]
mug : float
Viscosity of gas , [ Pa * s ]
D : float
Diameter of pipe , [ m ]
roughness : float , optional
Roughness of pipe for use in calculating friction factor , [ m ]
L : float , optional
Length of pipe , [ m ]
rough _ correction : bool , optional
Whether or not to use the roughness correction proposed in the 1968
version of the correlation
Returns
dP : float
Pressure drop of the two - phase flow , [ Pa ]
Notes
Applicable for 0 < x < 1 . n = 0.25 , the exponent in the Blassius equation .
Originally developed for smooth pipes , a roughness correction is included
as well from the Chisholm ' s 1968 work [ 4 ] _ . Neither [ 2 ] _ nor [ 3 ] _ have any
mention of the correction however .
Examples
> > > Chisholm ( m = 0.6 , x = 0.1 , rhol = 915 . , rhog = 2.67 , mul = 180E - 6,
. . . mug = 14E - 6 , D = 0.05 , roughness = 0 , L = 1)
1084.1489922923738
References
. . [ 1 ] Chisholm , D . " Pressure Gradients due to Friction during the Flow of
Evaporating Two - Phase Mixtures in Smooth Tubes and Channels . "
International Journal of Heat and Mass Transfer 16 , no . 2 ( February
1973 ) : 347-58 . doi : 10.1016/0017-9310(73)90063 - X .
. . [ 2 ] Mekisso , Henock Mateos . " Comparison of Frictional Pressure Drop
Correlations for Isothermal Two - Phase Horizontal Flow . " Thesis , Oklahoma
State University , 2013 . https : / / shareok . org / handle / 11244/11109.
. . [ 3 ] Thome , John R . " Engineering Data Book III . " Wolverine Tube Inc
(2004 ) . http : / / www . wlv . com / heat - transfer - databook /
. . [ 4 ] Chisholm , D . " Research Note : Influence of Pipe Surface Roughness on
Friction Pressure Gradient during Two - Phase Flow . " Journal of Mechanical
Engineering Science 20 , no . 6 ( December 1 , 1978 ) : 353-354.
doi : 10.1243 / JMES _ JOUR _ 1978_020_061_02.'''
|
G_tp = m / ( pi / 4 * D ** 2 )
n = 0.25
# Blasius friction factor exponent
# Liquid - only properties , for calculation of dP _ lo
v_lo = m / rhol / ( pi / 4 * D ** 2 )
Re_lo = Reynolds ( V = v_lo , rho = rhol , mu = mul , D = D )
fd_lo = friction_factor ( Re = Re_lo , eD = roughness / D )
dP_lo = fd_lo * L / D * ( 0.5 * rhol * v_lo ** 2 )
# Gas - only properties , for calculation of dP _ go
v_go = m / rhog / ( pi / 4 * D ** 2 )
Re_go = Reynolds ( V = v_go , rho = rhog , mu = mug , D = D )
fd_go = friction_factor ( Re = Re_go , eD = roughness / D )
dP_go = fd_go * L / D * ( 0.5 * rhog * v_go ** 2 )
Gamma = ( dP_go / dP_lo ) ** 0.5
if Gamma <= 9.5 :
if G_tp <= 500 :
B = 4.8
elif G_tp < 1900 :
B = 2400. / G_tp
else :
B = 55 * G_tp ** - 0.5
elif Gamma <= 28 :
if G_tp <= 600 :
B = 520. * G_tp ** - 0.5 / Gamma
else :
B = 21. / Gamma
else :
B = 15000. * G_tp ** - 0.5 / Gamma ** 2
if rough_correction :
n = log ( fd_lo / fd_go ) / log ( Re_go / Re_lo )
B_ratio = ( 0.5 * ( 1 + ( mug / mul ) ** 2 + 10 ** ( - 600 * roughness / D ) ) ) ** ( ( 0.25 - n ) / 0.25 )
B = B * B_ratio
phi2_ch = 1 + ( Gamma ** 2 - 1 ) * ( B * x ** ( ( 2 - n ) / 2. ) * ( 1 - x ) ** ( ( 2 - n ) / 2. ) + x ** ( 2 - n ) )
return phi2_ch * dP_lo
|
def unique_hash ( filepath : str , blocksize : int = 80 ) -> str :
"""Small function to generate a hash to uniquely generate a file .
Default blocksize is ` 500 `"""
|
s = sha1 ( )
with open ( filepath , "rb" ) as f :
buf = f . read ( blocksize )
s . update ( buf )
return s . hexdigest ( )
|
def vasp_version_from_outcar ( filename = 'OUTCAR' ) :
"""Returns the first line from a VASP OUTCAR file , to get the VASP source version string .
Args :
filename ( Str , optional ) : OUTCAR filename . Defaults to ' OUTCAR ' .
Returns :
( Str ) : The first line read from the OUTCAR file ."""
|
with open ( filename ) as f :
line = f . readline ( ) . strip ( )
return line
|
def get_hla ( sample , cromwell_dir , hla_glob ) :
"""Retrieve HLA calls and input fastqs for a sample ."""
|
hla_dir = glob . glob ( os . path . join ( cromwell_dir , hla_glob , "align" , sample , "hla" ) ) [ 0 ]
fastq = os . path . join ( hla_dir , "OptiType-HLA-A_B_C-input.fq" )
calls = os . path . join ( hla_dir , "%s-optitype.csv" % sample )
return fastq , calls
|
def _optimal_size ( self , capacity , error ) :
"""Calculates minimum number of bits in filter array and
number of hash functions given a number of enteries ( maximum )
and the desired error rate ( falese positives ) .
Example :
m , k = self . _ optimal _ size ( 3000 , 0.01 ) # m = 28756 , k = 7"""
|
m = math . ceil ( ( capacity * math . log ( error ) ) / math . log ( 1.0 / ( math . pow ( 2.0 , math . log ( 2.0 ) ) ) ) )
k = math . ceil ( math . log ( 2.0 ) * m / capacity )
return int ( m ) , int ( k )
|
def get ( self , key , default = NoDefault ) :
"""Retrieve a value from its key .
Retrieval steps are :
1 ) Normalize the key
2 ) For each option group :
a ) Retrieve the value at that key
b ) If no value exists , continue
c ) If the value is an instance of ' Default ' , continue
d ) Otherwise , return the value
3 ) If no option had a non - default value for the key , return the
first Default ( ) option for the key ( or : arg : ` default ` ) ."""
|
key = normalize_key ( key )
if default is NoDefault :
defaults = [ ]
else :
defaults = [ default ]
for options in self . options :
try :
value = options [ key ]
except KeyError :
continue
if isinstance ( value , Default ) :
defaults . append ( value . value )
continue
else :
return value
if defaults :
return defaults [ 0 ]
return NoDefault
|
def execute ( ) :
"""Ensure provisioning"""
|
boto_server_error_retries = 3
# Ensure provisioning
for table_name , table_key in sorted ( dynamodb . get_tables_and_gsis ( ) ) :
try :
table_num_consec_read_checks = CHECK_STATUS [ 'tables' ] [ table_name ] [ 'reads' ]
except KeyError :
table_num_consec_read_checks = 0
try :
table_num_consec_write_checks = CHECK_STATUS [ 'tables' ] [ table_name ] [ 'writes' ]
except KeyError :
table_num_consec_write_checks = 0
try : # The return var shows how many times the scale - down criteria
# has been met . This is coupled with a var in config ,
# " num _ intervals _ scale _ down " , to delay the scale - down
table_num_consec_read_checks , table_num_consec_write_checks = table . ensure_provisioning ( table_name , table_key , table_num_consec_read_checks , table_num_consec_write_checks )
CHECK_STATUS [ 'tables' ] [ table_name ] = { 'reads' : table_num_consec_read_checks , 'writes' : table_num_consec_write_checks }
gsi_names = set ( )
# Add regexp table names
for gst_instance in dynamodb . table_gsis ( table_name ) :
gsi_name = gst_instance [ u'IndexName' ]
try :
gsi_keys = get_table_option ( table_key , 'gsis' ) . keys ( )
except AttributeError : # Continue if there are not GSIs configured
continue
for gsi_key in gsi_keys :
try :
if re . match ( gsi_key , gsi_name ) :
logger . debug ( 'Table {0} GSI {1} matches ' 'GSI config key {2}' . format ( table_name , gsi_name , gsi_key ) )
gsi_names . add ( ( gsi_name , gsi_key ) )
except re . error :
logger . error ( 'Invalid regular expression: "{0}"' . format ( gsi_key ) )
sys . exit ( 1 )
for gsi_name , gsi_key in sorted ( gsi_names ) :
unique_gsi_name = ':' . join ( [ table_name , gsi_name ] )
try :
gsi_num_consec_read_checks = CHECK_STATUS [ 'gsis' ] [ unique_gsi_name ] [ 'reads' ]
except KeyError :
gsi_num_consec_read_checks = 0
try :
gsi_num_consec_write_checks = CHECK_STATUS [ 'gsis' ] [ unique_gsi_name ] [ 'writes' ]
except KeyError :
gsi_num_consec_write_checks = 0
gsi_num_consec_read_checks , gsi_num_consec_write_checks = gsi . ensure_provisioning ( table_name , table_key , gsi_name , gsi_key , gsi_num_consec_read_checks , gsi_num_consec_write_checks )
CHECK_STATUS [ 'gsis' ] [ unique_gsi_name ] = { 'reads' : gsi_num_consec_read_checks , 'writes' : gsi_num_consec_write_checks }
except JSONResponseError as error :
exception = error . body [ '__type' ] . split ( '#' ) [ 1 ]
if exception == 'ResourceNotFoundException' :
logger . error ( '{0} - Table {1} does not exist anymore' . format ( table_name , table_name ) )
continue
except BotoServerError as error :
if boto_server_error_retries > 0 :
logger . error ( 'Unknown boto error. Status: "{0}". ' 'Reason: "{1}". Message: {2}' . format ( error . status , error . reason , error . message ) )
logger . error ( 'Please bug report if this error persists' )
boto_server_error_retries -= 1
continue
else :
raise
# Sleep between the checks
if not get_global_option ( 'run_once' ) :
logger . debug ( 'Sleeping {0} seconds until next check' . format ( get_global_option ( 'check_interval' ) ) )
time . sleep ( get_global_option ( 'check_interval' ) )
|
def run ( args ) :
"""Args :
args ( argparse . Namespace )"""
|
regex_extractor = RegexExtractor ( pattern = args . pattern )
with warnings . catch_warnings ( ) :
warnings . simplefilter ( 'ignore' )
for line in args . input_file :
extractions = regex_extractor . extract ( line )
for e in extractions :
print ( e . value )
|
def read_block_data ( self , i2c_addr , register , force = None ) :
"""Read a block of up to 32 - bytes from a given register .
: param i2c _ addr : i2c address
: type i2c _ addr : int
: param register : Start register
: type register : int
: param force :
: type force : Boolean
: return : List of bytes
: rtype : list"""
|
self . _set_address ( i2c_addr , force = force )
msg = i2c_smbus_ioctl_data . create ( read_write = I2C_SMBUS_READ , command = register , size = I2C_SMBUS_BLOCK_DATA )
ioctl ( self . fd , I2C_SMBUS , msg )
length = msg . data . contents . block [ 0 ]
return msg . data . contents . block [ 1 : length + 1 ]
|
def remove_from_organization ( self , delete_account = False ) :
"""Remove a user from the organization ' s list of visible users . Optionally also delete the account .
Deleting the account can only be done if the organization owns the account ' s domain .
: param delete _ account : Whether to delete the account after removing from the organization ( default false )
: return : None , because you cannot follow this command with another ."""
|
self . append ( removeFromOrg = { "deleteAccount" : True if delete_account else False } )
return None
|
def widont_html ( value ) :
"""Add an HTML non - breaking space between the final two words at the end of
( and in sentences just outside of ) block level tags to avoid " widowed "
words .
Examples :
> > > print ( widont _ html ( ' < h2 > Here is a simple example < / h2 > < p > Single < / p > ' ) )
< h2 > Here is a simple & nbsp ; example < / h2 > < p > Single < / p >
> > > print ( widont _ html ( ' < p > test me < br / > out < / p > < h2 > Ok ? < / h2 > Not in a p < p title = " test me " > and this < / p > ' ) )
< p > test & nbsp ; me < br / > out < / p > < h2 > Ok ? < / h2 > Not in a & nbsp ; p < p title = " test me " > and & nbsp ; this < / p >
> > > print ( widont _ html ( ' leading text < p > test me out < / p > trailing text ' ) )
leading & nbsp ; text < p > test me & nbsp ; out < / p > trailing & nbsp ; text"""
|
def replace ( matchobj ) :
return force_text ( '%s %s%s' % matchobj . groups ( ) )
return re_widont_html . sub ( replace , force_text ( value ) )
|
def normalize_total ( adata , target_sum = None , fraction = 1 , key_added = None , layers = None , layer_norm = None , inplace = True ) :
"""{ norm _ descr }
{ params _ bulk }
{ norm _ return }
{ examples }"""
|
if fraction < 0 or fraction > 1 :
raise ValueError ( 'Choose fraction between 0 and 1.' )
X = adata . X
gene_subset = None
if not inplace : # not recarray because need to support sparse
dat = { }
if fraction < 1 :
logg . msg ( 'normalizing by count per cell for \
genes that make up less than fraction * total count per cell' , r = True )
X = adata . X
counts_per_cell = X . sum ( 1 )
counts_per_cell = np . ravel ( counts_per_cell )
gene_subset = ( X > counts_per_cell [ : , None ] * fraction ) . sum ( 0 )
gene_subset = ( np . ravel ( gene_subset ) == 0 )
else :
logg . msg ( 'normalizing by total count per cell' , r = True )
X = X if gene_subset is None else adata [ : , gene_subset ] . X
counts_per_cell = X . sum ( 1 )
# get rid of adata view
counts_per_cell = np . ravel ( counts_per_cell ) . copy ( )
del X
del gene_subset
if key_added is not None :
adata . obs [ key_added ] = counts_per_cell
cell_subset = counts_per_cell > 0
if not np . all ( cell_subset ) :
logg . warn ( 'Some cells have total count of genes equal to zero' )
if layer_norm == 'after' :
after = target_sum
elif layer_norm == 'X' :
after = np . median ( counts_per_cell [ cell_subset ] )
elif layer_norm is None :
after = None
else :
raise ValueError ( 'layer_norm should be "after", "X" or None' )
del cell_subset
if inplace :
if hasattr ( adata . X , '__itruediv__' ) :
_normalize_data ( adata . X , counts_per_cell , target_sum )
else :
adata . X = _normalize_data ( adata . X , counts_per_cell , target_sum , copy = True )
else :
dat [ 'X' ] = _normalize_data ( adata . X , counts_per_cell , target_sum , copy = True )
layers = adata . layers . keys ( ) if layers == 'all' else layers
if layers is not None :
for layer in layers :
L = adata . layers [ layer ]
counts = np . ravel ( L . sum ( 1 ) )
if inplace :
if hasattr ( L , '__itruediv__' ) :
_normalize_data ( L , counts , after )
else :
adata . layers [ layer ] = _normalize_data ( L , counts , after , copy = True )
else :
dat [ layer ] = _normalize_data ( L , counts , after , copy = True )
logg . msg ( ' finished' , t = True , end = ': ' )
logg . msg ( 'normalized adata.X' )
if key_added is not None :
logg . msg ( 'and added \'{}\', counts per cell before normalization (adata.obs)' . format ( key_added ) )
return dat if not inplace else None
|
def get_cell_dimensions ( self ) :
"""Calculate centroid , width , length and area of each mesh cell .
: returns :
Tuple of four elements , each being 2d numpy array .
Each array has both dimensions less by one the dimensions
of the mesh , since they represent cells , not vertices .
Arrays contain the following cell information :
# . centroids , 3d vectors in a Cartesian space ,
# . length ( size along row of points ) in km ,
# . width ( size along column of points ) in km ,
# . area in square km ."""
|
points , along_azimuth , updip , diag = self . triangulate ( )
top = along_azimuth [ : - 1 ]
left = updip [ : , : - 1 ]
tl_area = geo_utils . triangle_area ( top , left , diag )
top_length = numpy . sqrt ( numpy . sum ( top * top , axis = - 1 ) )
left_length = numpy . sqrt ( numpy . sum ( left * left , axis = - 1 ) )
bottom = along_azimuth [ 1 : ]
right = updip [ : , 1 : ]
br_area = geo_utils . triangle_area ( bottom , right , diag )
bottom_length = numpy . sqrt ( numpy . sum ( bottom * bottom , axis = - 1 ) )
right_length = numpy . sqrt ( numpy . sum ( right * right , axis = - 1 ) )
cell_area = tl_area + br_area
tl_center = ( points [ : - 1 , : - 1 ] + points [ : - 1 , 1 : ] + points [ 1 : , : - 1 ] ) / 3
br_center = ( points [ : - 1 , 1 : ] + points [ 1 : , : - 1 ] + points [ 1 : , 1 : ] ) / 3
cell_center = ( ( tl_center * tl_area . reshape ( tl_area . shape + ( 1 , ) ) + br_center * br_area . reshape ( br_area . shape + ( 1 , ) ) ) / cell_area . reshape ( cell_area . shape + ( 1 , ) ) )
cell_length = ( ( top_length * tl_area + bottom_length * br_area ) / cell_area )
cell_width = ( ( left_length * tl_area + right_length * br_area ) / cell_area )
return cell_center , cell_length , cell_width , cell_area
|
def _sub_ms_char ( self , match ) :
"""Changes a MS smart quote character to an XML or HTML
entity , or an ASCII character ."""
|
orig = match . group ( 1 )
if self . smart_quotes_to == 'ascii' :
sub = self . MS_CHARS_TO_ASCII . get ( orig ) . encode ( )
else :
sub = self . MS_CHARS . get ( orig )
if type ( sub ) == tuple :
if self . smart_quotes_to == 'xml' :
sub = '&#x' . encode ( ) + sub [ 1 ] . encode ( ) + ';' . encode ( )
else :
sub = '&' . encode ( ) + sub [ 0 ] . encode ( ) + ';' . encode ( )
else :
sub = sub . encode ( )
return sub
|
def error_codes ( self ) :
"""ThreatConnect error codes ."""
|
if self . _error_codes is None :
from . tcex_error_codes import TcExErrorCodes
self . _error_codes = TcExErrorCodes ( )
return self . _error_codes
|
def IntegerField ( default = NOTHING , required = True , repr = True , cmp = True , key = None ) :
"""Create new int field on a model .
: param default : any integer value
: param bool required : whether or not the object is invalid if not provided .
: param bool repr : include this field should appear in object ' s repr .
: param bool cmp : include this field in generated comparison .
: param string key : override name of the value when converted to dict ."""
|
default = _init_fields . init_default ( required , default , None )
validator = _init_fields . init_validator ( required , int )
return attrib ( default = default , converter = converters . int_if_not_none , validator = validator , repr = repr , cmp = cmp , metadata = dict ( key = key ) )
|
def init_original_response ( self ) :
"""Get the original response for comparing , confirm ` ` is _ cookie _ necessary ` `"""
|
if 'json' in self . request :
self . request [ 'data' ] = json . dumps ( self . request . pop ( 'json' ) ) . encode ( self . encoding )
r1 = self . req . request ( retry = self . retry , timeout = self . timeout , ** self . request )
resp = r1 . x
assert resp , ValueError ( 'original_response should not be failed. %s' % self . request )
self . encoding = self . encoding or resp . encoding
self . original_response = self . ensure_response ( r1 )
return self . original_response
|
def db990 ( self , value = None ) :
"""Corresponds to IDD Field ` db990 `
Dry - bulb temperature corresponding to 90.0 % annual cumulative
frequency of occurrence ( cold conditions )
Args :
value ( float ) : value for IDD Field ` db990 `
Unit : C
if ` value ` is None it will not be checked against the
specification and is assumed to be a missing value
Raises :
ValueError : if ` value ` is not a valid value"""
|
if value is not None :
try :
value = float ( value )
except ValueError :
raise ValueError ( 'value {} need to be of type float ' 'for field `db990`' . format ( value ) )
self . _db990 = value
|
def touch_tip ( self , location = None , radius = 1.0 , v_offset = - 1.0 , speed = 60.0 ) :
"""Touch the : any : ` Pipette ` tip to the sides of a well ,
with the intent of removing left - over droplets
Notes
If no ` location ` is passed , the pipette will touch _ tip
from it ' s current position .
Parameters
location : : any : ` Placeable ` or tuple ( : any : ` Placeable ` , : any : ` Vector ` )
The : any : ` Placeable ` ( : any : ` Well ` ) to perform the touch _ tip .
Can also be a tuple with first item : any : ` Placeable ` ,
second item relative : any : ` Vector `
radius : float
Radius is a floating point describing the percentage of a well ' s
radius . When radius = 1.0 , : any : ` touch _ tip ( ) ` will move to 100 % of
the wells radius . When radius = 0.5 , : any : ` touch _ tip ( ) ` will move to
50 % of the wells radius .
Default : 1.0 ( 100 % )
speed : float
The speed for touch tip motion , in mm / s .
Default : 60.0 mm / s , Max : 80.0 mm / s , Min : 20.0 mm / s
v _ offset : float
The offset in mm from the top of the well to touch tip .
Default : - 1.0 mm
Returns
This instance of : class : ` Pipette ` .
Examples
> > > from opentrons import instruments , labware , robot # doctest : + SKIP
> > > robot . reset ( ) # doctest : + SKIP
> > > plate = labware . load ( ' 96 - flat ' , ' 8 ' ) # doctest : + SKIP
> > > p300 = instruments . P300 _ Single ( mount = ' left ' ) # doctest : + SKIP
> > > p300 . aspirate ( 50 , plate [ 0 ] ) # doctest : + SKIP
> > > p300 . dispense ( plate [ 1 ] ) . touch _ tip ( ) # doctest : + SKIP"""
|
if not self . tip_attached :
log . warning ( "Cannot touch tip without a tip attached." )
if speed > 80.0 :
log . warning ( "Touch tip speeds greater than 80mm/s not allowed" )
speed = 80.0
if speed < 20.0 :
log . warning ( "Touch tip speeds greater than 80mm/s not allowed" )
speed = 20.0
if helpers . is_number ( location ) : # Deprecated syntax
log . warning ( "Please use the `v_offset` named parameter" )
v_offset = location
location = None
# if no location specified , use the previously
# associated placeable to get Well dimensions
if location is None :
location = self . previous_placeable
do_publish ( self . broker , commands . touch_tip , self . touch_tip , 'before' , None , None , self , location , radius , v_offset , speed )
# move to location if we ' re not already there
if location != self . previous_placeable :
self . move_to ( location )
v_offset = ( 0 , 0 , v_offset )
well_edges = [ location . from_center ( x = radius , y = 0 , z = 1 ) , # right edge
location . from_center ( x = radius * - 1 , y = 0 , z = 1 ) , # left edge
location . from_center ( x = 0 , y = radius , z = 1 ) , # back edge
location . from_center ( x = 0 , y = radius * - 1 , z = 1 ) # front edge
]
# Apply vertical offset to well edges
well_edges = map ( lambda x : x + v_offset , well_edges )
self . robot . gantry . push_speed ( )
self . robot . gantry . set_speed ( speed )
[ self . move_to ( ( location , e ) , strategy = 'direct' ) for e in well_edges ]
self . robot . gantry . pop_speed ( )
do_publish ( self . broker , commands . touch_tip , self . touch_tip , 'after' , self , None , self , location , radius , v_offset , speed )
return self
|
def mogrify ( self , query , args = None ) :
"""Returns the exact string that is sent to the database by calling
the execute ( ) method . This method follows the extension to the DB
API 2.0 followed by Psycopg .
: param query : ` ` str ` ` sql statement
: param args : ` ` tuple ` ` or ` ` list ` ` of arguments for sql query"""
|
conn = self . _get_db ( )
if args is not None :
query = query % self . _escape_args ( args , conn )
return query
|
def palettize ( arr , colors , values ) :
"""From start * values * apply * colors * to * data * ."""
|
new_arr = np . digitize ( arr . ravel ( ) , np . concatenate ( ( values , [ max ( np . nanmax ( arr ) , values . max ( ) ) + 1 ] ) ) )
new_arr -= 1
new_arr = new_arr . clip ( min = 0 , max = len ( values ) - 1 )
try :
new_arr = np . ma . array ( new_arr . reshape ( arr . shape ) , mask = arr . mask )
except AttributeError :
new_arr = new_arr . reshape ( arr . shape )
return new_arr , tuple ( colors )
|
def _grid_in_property ( field_name , docstring , read_only = False , closed_only = False ) :
"""Create a GridIn property ."""
|
def getter ( self ) :
if closed_only and not self . _closed :
raise AttributeError ( "can only get %r on a closed file" % field_name )
# Protect against PHP - 237
if field_name == 'length' :
return self . _file . get ( field_name , 0 )
return self . _file . get ( field_name , None )
def setter ( self , value ) :
if self . _closed :
self . _coll . files . update_one ( { "_id" : self . _file [ "_id" ] } , { "$set" : { field_name : value } } )
self . _file [ field_name ] = value
if read_only :
docstring += "\n\nThis attribute is read-only."
elif closed_only :
docstring = "%s\n\n%s" % ( docstring , "This attribute is read-only and " "can only be read after :meth:`close` " "has been called." )
if not read_only and not closed_only :
return property ( getter , setter , doc = docstring )
return property ( getter , doc = docstring )
|
def depth_first_search ( graph , root_node = None ) :
"""Searches through the tree in a breadth - first fashion .
If root _ node is None , an arbitrary node will be used as the root .
If root _ node is not None , it will be used as the root for the search tree .
Returns a list of nodes , in the order that they were reached ."""
|
ordering , parent_lookup , children_lookup = depth_first_search_with_parent_data ( graph , root_node )
return ordering
|
def dot_product_self_attention_relative_v2 ( q , k , v , bias , max_relative_position = None , dropout_rate = 0.0 , image_shapes = None , name = None , make_image_summary = True , dropout_broadcast_dims = None , heads_share_relative_embedding = False , add_relative_to_values = False ) :
"""Calculate relative position - aware dot - product self - attention .
Only works for masked self - attention ( no looking forward ) .
The attention calculation is augmented with learned representations for the
relative position between each element in q and each element in k and v .
Args :
q : a Tensor with shape [ batch , heads , length , depth ] .
k : a Tensor with shape [ batch , heads , length , depth ] .
v : a Tensor with shape [ batch , heads , length , depth ] .
bias : bias Tensor .
max _ relative _ position : an integer indicating the maximum relative distance
to look back - changing this invalidates checkpoints
dropout _ rate : a floating point number .
image _ shapes : optional tuple of integer scalars .
name : an optional string .
make _ image _ summary : Whether to make an attention image summary .
dropout _ broadcast _ dims : an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions .
saves memory .
heads _ share _ relative _ embedding : a boolean indicating wheather to share
relative embeddings between attention heads .
add _ relative _ to _ values : a boolean for whether to add relative component to
values .
Returns :
A Tensor .
Raises :
ValueError : if max _ relative _ position is not > 0."""
|
if not max_relative_position :
raise ValueError ( "Max relative position (%s) should be > 0 when using " "relative self attention." % ( max_relative_position ) )
with tf . variable_scope ( name , default_name = "dot_product_self_attention_relative_v2" , values = [ q , k , v ] ) : # This calculation only works for self attention .
# q , k and v must therefore have the same shape .
q . get_shape ( ) . assert_is_compatible_with ( k . get_shape ( ) )
q . get_shape ( ) . assert_is_compatible_with ( v . get_shape ( ) )
# Use separate embeddings suitable for keys and values .
_ , num_heads , length , depth_k = common_layers . shape_list ( k )
# [ batch , num _ heads , query _ length , memory _ length ]
logits = tf . matmul ( q , k , transpose_b = True )
key_relative_embeddings = get_relative_embeddings_left ( max_relative_position , length , depth_k , num_heads , heads_share_relative_embedding , "key_relative_embeddings" )
rel_logits = matmul_with_relative_keys ( q , key_relative_embeddings , heads_share_relative_embedding )
rel_logits = _relative_position_to_absolute_position_masked ( rel_logits )
logits += rel_logits
if bias is not None :
logits += bias
weights = tf . nn . softmax ( logits , name = "attention_weights" )
# Dropping out the attention links for each of the heads .
weights = common_layers . dropout_with_broadcast_dims ( weights , 1.0 - dropout_rate , broadcast_dims = dropout_broadcast_dims )
if common_layers . should_generate_summaries ( ) and make_image_summary :
attention_image_summary ( weights , image_shapes )
output = tf . matmul ( weights , v )
if add_relative_to_values : # [ batch , num _ heads , query _ length , memory _ length ]
relative_weights = _absolute_position_to_relative_position_masked ( weights )
depth_v = common_layers . shape_list ( v ) [ 3 ]
value_relative_embeddings = get_relative_embeddings_left ( max_relative_position , length , depth_v , num_heads , heads_share_relative_embedding , "value_relative_embeddings" )
output += matmul_with_relative_values ( relative_weights , value_relative_embeddings , heads_share_relative_embedding )
return output
|
def append ( self , item ) :
"""Appends a ` Monomer to the ` Polymer ` .
Notes
Does not update labelling ."""
|
if isinstance ( item , Monomer ) :
self . _monomers . append ( item )
else :
raise TypeError ( 'Only Monomer objects can be appended to an Polymer.' )
return
|
def serialize_unicode ( self , data ) :
"""Special handling for serializing unicode strings in Py2.
Encode to UTF - 8 if unicode , otherwise handle as a str .
: param data : Object to be serialized .
: rtype : str"""
|
try :
return data . value
except AttributeError :
pass
try :
if isinstance ( data , unicode ) :
return data . encode ( encoding = 'utf-8' )
except NameError :
return str ( data )
else :
return str ( data )
|
def client_for ( self , config_path , quiet = False , bootstrap_server = False , create_client = False ) :
"""Get a cached client for a project , otherwise create one ."""
|
client = None
abs_path = os . path . abspath ( config_path )
if abs_path in self . clients :
client = self . clients [ abs_path ]
elif create_client :
client = self . create_client ( config_path )
if client . setup ( quiet = quiet , bootstrap_server = bootstrap_server ) :
self . clients [ abs_path ] = client
return client
|
def get ( self , * args , ** kwargs ) -> "QuerySet" :
"""Fetch exactly one object matching the parameters ."""
|
queryset = self . filter ( * args , ** kwargs )
queryset . _limit = 2
queryset . _get = True
return queryset
|
def set_display_name ( self , display_name ) :
'''Set display name of a system independently of upload .'''
|
if self . config . legacy_upload :
return self . _legacy_set_display_name ( display_name )
system = self . _fetch_system_by_machine_id ( )
if not system :
return system
inventory_id = system [ 0 ] [ 'id' ]
req_url = self . base_url + '/inventory/v1/hosts/' + inventory_id
try :
net_logger . info ( "PATCH %s" , req_url )
res = self . session . patch ( req_url , json = { 'display_name' : display_name } )
except ( requests . ConnectionError , requests . Timeout ) as e :
logger . error ( e )
logger . error ( 'The Insights API could not be reached.' )
return False
if ( self . handle_fail_rcs ( res ) ) :
logger . error ( 'Could not update display name.' )
return False
logger . info ( 'Display name updated to ' + display_name + '.' )
return True
|
def global_state ( self ) :
"""Returns global variables for generating function from ` ` func _ code ` ` . Includes
compiled regular expressions and imports , so it does not have to do it every
time when validation function is called ."""
|
self . _generate_func_code ( )
return dict ( REGEX_PATTERNS = self . _compile_regexps , re = re , JsonSchemaException = JsonSchemaException , )
|
def start_search ( self ) :
"""Start the Gateway Search Request and return the address information
: rtype : ( string , int )
: return : a tuple ( string ( IP ) , int ( Port ) when found or None when
timeout occurs"""
|
self . _asyncio_loop = asyncio . get_event_loop ( )
# Creating Broadcast Receiver
coroutine_listen = self . _asyncio_loop . create_datagram_endpoint ( lambda : self . KNXSearchBroadcastReceiverProtocol ( self . _process_response , self . _timeout_handling , self . _timeout , self . _asyncio_loop ) , local_addr = ( self . _broadcast_ip_address , 0 ) )
self . _listener_transport , listener_protocol = self . _asyncio_loop . run_until_complete ( coroutine_listen )
# We are ready to fire the broadcast message
coroutine_broadcaster = self . _asyncio_loop . create_datagram_endpoint ( lambda : self . KNXSearchBroadcastProtocol ( self . _asyncio_loop , self . _listener_transport . get_extra_info ( 'sockname' ) [ 1 ] ) , remote_addr = ( self . _broadcast_address , self . _broadcast_port ) )
self . _broadcaster_transport , broadcast_protocol = self . _asyncio_loop . run_until_complete ( coroutine_broadcaster )
# Waiting for all Broadcast receive or timeout
self . _asyncio_loop . run_forever ( )
# Got Response or Timeout
if self . _resolved_gateway_ip_address is None and self . _resolved_gateway_ip_port is None :
LOGGER . debug ( "Gateway not found!" )
return None
else :
LOGGER . debug ( "Gateway found at %s:%s" , self . _resolved_gateway_ip_address , self . _resolved_gateway_ip_port )
return self . _resolved_gateway_ip_address , self . _resolved_gateway_ip_port
|
def _clean_result ( self , text ) :
"""Remove double spaces , punctuation and escapes apostrophes ."""
|
text = re . sub ( '\s\s+' , ' ' , text )
text = re . sub ( '\.\.+' , '.' , text )
text = text . replace ( "'" , "\\'" )
return text
|
def OnExport ( self , event ) :
"""File export event handler
Currently , only CSV export is supported"""
|
code_array = self . main_window . grid . code_array
tab = self . main_window . grid . current_table
selection = self . main_window . grid . selection
# Check if no selection is present
selection_bbox = selection . get_bbox ( )
f2w = get_filetypes2wildcards ( [ "csv" , "pdf" , "svg" ] )
filters = f2w . keys ( )
wildcards = f2w . values ( )
wildcard = "|" . join ( wildcards )
if selection_bbox is None : # No selection - - > Use smallest filled area for bottom right edge
maxrow , maxcol , __ = code_array . get_last_filled_cell ( tab )
( top , left ) , ( bottom , right ) = ( 0 , 0 ) , ( maxrow , maxcol )
else :
( top , left ) , ( bottom , right ) = selection_bbox
# Generator of row and column keys in correct order
__top = 0 if top is None else top
__bottom = code_array . shape [ 0 ] if bottom is None else bottom + 1
__left = 0 if left is None else left
__right = code_array . shape [ 1 ] if right is None else right + 1
def data_gen ( top , bottom , left , right ) :
for row in xrange ( top , bottom ) :
yield ( code_array [ row , col , tab ] for col in xrange ( left , right ) )
data = data_gen ( __top , __bottom , __left , __right )
preview_data = data_gen ( __top , __bottom , __left , __right )
# Get target filepath from user
# No selection - - > Provide svg export of current cell
# if current cell is a matplotlib figure
if selection_bbox is None :
cursor = self . main_window . grid . actions . cursor
figure = code_array [ cursor ]
if Figure is not None and isinstance ( figure , Figure ) :
wildcard += "|" + _ ( "SVG of current cell" ) + " (*.svg)|*.svg" + "|" + _ ( "EPS of current cell" ) + " (*.eps)|*.eps" + "|" + _ ( "PS of current cell" ) + " (*.ps)|*.ps" + "|" + _ ( "PDF of current cell" ) + " (*.pdf)|*.pdf" + "|" + _ ( "PNG of current cell" ) + " (*.png)|*.png"
filters . append ( "cell_svg" )
filters . append ( "cell_eps" )
filters . append ( "cell_ps" )
filters . append ( "cell_pdf" )
filters . append ( "cell_png" )
message = _ ( "Choose filename for export." )
style = wx . SAVE
path , filterindex = self . interfaces . get_filepath_findex_from_user ( wildcard , message , style )
if path is None :
return
# If an single cell is exported then the selection bbox
# has to be changed to the current cell
if filters [ filterindex ] . startswith ( "cell_" ) :
data = figure
# Export file
self . main_window . actions . export_file ( path , filters [ filterindex ] , data , preview_data )
|
def addAdminResource ( self , pluginSubPath : bytes , resource : BasicResource ) -> None :
"""Add Site Resource
Add a cusotom implementation of a served http resource .
: param pluginSubPath : The resource path where you want to serve this resource .
: param resource : The resource to serve .
: return : None"""
|
pluginSubPath = pluginSubPath . strip ( b'/' )
self . __rootAdminResource . putChild ( pluginSubPath , resource )
|
def convert_machine_list_time_val ( text : str ) -> datetime . datetime :
'''Convert RFC 3659 time - val to datetime objects .'''
|
# TODO : implement fractional seconds
text = text [ : 14 ]
if len ( text ) != 14 :
raise ValueError ( 'Time value not 14 chars' )
year = int ( text [ 0 : 4 ] )
month = int ( text [ 4 : 6 ] )
day = int ( text [ 6 : 8 ] )
hour = int ( text [ 8 : 10 ] )
minute = int ( text [ 10 : 12 ] )
second = int ( text [ 12 : 14 ] )
return datetime . datetime ( year , month , day , hour , minute , second , tzinfo = datetime . timezone . utc )
|
def checkpoint_filepath ( checkpoint , pm ) :
"""Create filepath for indicated checkpoint .
: param str | pypiper . Stage checkpoint : Pipeline phase / stage or one ' s name
: param pypiper . PipelineManager | pypiper . Pipeline pm : manager of a pipeline
instance , relevant for output folder path .
: return str : standardized checkpoint name for file , plus extension
: raise ValueError : if the checkpoint is given as absolute path that does
not point within pipeline output folder"""
|
# Handle case in which checkpoint is given not just as a string , but
# as a checkpoint - like filename . Don ' t worry about absolute path status
# of a potential filename input , or whether it ' s in the pipeline ' s
# output folder . That ' s handled upstream . While this isn ' t a protected
# function , there ' s no real reason to call this from outside the package .
if isinstance ( checkpoint , str ) :
if os . path . isabs ( checkpoint ) :
if is_in_file_tree ( checkpoint , pm . outfolder ) :
return checkpoint
else :
raise ValueError ( "Absolute checkpoint path '{}' is not in pipeline output " "folder '{}'" . format ( checkpoint , pm . outfolder ) )
_ , ext = os . path . splitext ( checkpoint )
if ext == CHECKPOINT_EXTENSION :
return pipeline_filepath ( pm , filename = checkpoint )
# Allow Pipeline as pm type without importing Pipeline .
try :
pm = pm . manager
except AttributeError :
pass
# We want the checkpoint filename itself to become a suffix , with a
# delimiter intervening between the pipeline name and the checkpoint
# name + extension . This is to handle the case in which a single , e . g . ,
# sample ' s output folder is the destination for output from multiple
# pipelines , and we thus want to be able to distinguish between
# checkpoint files from different pipelines for that sample that may
# well define one or more stages with the same name ( e . g . , trim _ reads ,
# align _ reads , etc . )
chkpt_name = checkpoint_filename ( checkpoint , pipeline_name = pm . name )
return pipeline_filepath ( pm , filename = chkpt_name )
|
def actions_for_project ( self , project ) :
"""Compile & Run the experiment with - O3 enabled ."""
|
project . cflags = [ "-O3" , "-fno-omit-frame-pointer" ]
project . runtime_extension = time . RunWithTime ( run . RuntimeExtension ( project , self ) )
return self . default_runtime_actions ( project )
|
def tree_analysisOutput ( self , * args , ** kwargs ) :
"""An optional method for looping over the < outputTree > and
calling an outputcallback on the analysis results at each
path .
Only call this if self . b _ persisAnalysisResults is True ."""
|
fn_outputcallback = None
for k , v in kwargs . items ( ) :
if k == 'outputcallback' :
fn_outputcallback = v
index = 1
total = len ( self . d_inputTree . keys ( ) )
for path , d_analysis in self . d_outputTree . items ( ) :
self . simpleProgress_show ( index , total )
self . dp . qprint ( "Processing analysis results in output: %s" % path )
d_output = fn_outputcallback ( ( path , d_analysis ) , ** kwargs )
return { 'status' : True }
|
def insert ( self , data ) :
"""Insert 1 into each bit by local _ hash"""
|
if not data :
return
data = self . _compress_by_md5 ( data )
# cut the first two place , route to different block by block _ num
name = self . key + str ( int ( data [ 0 : 2 ] , 16 ) % self . block_num )
for h in self . hash_function :
local_hash = h . hash ( data )
self . server . setbit ( name , local_hash , 1 )
|
def set_object ( cache , template , indexes , data ) :
"""Set an object in Redis using a pipeline .
Only sets the fields that are present in both the template and the data .
Arguments :
template : a dictionary containg the keys for the object and
template strings for the corresponding redis keys . The template
string uses named string interpolation format . Example :
' username ' : ' user : % ( id ) s : username ' ,
' email ' : ' user : % ( id ) s : email ' ,
' phone ' : ' user : % ( id ) s : phone '
indexes : a dictionary containing the values to use to cosntruct the
redis keys : Example :
' id ' : 342
data : a dictionary returning the data to store . Example :
' username ' : ' bob ' ,
' email ' : ' bob @ example . com ' ,
' phone ' : ' 555-555-5555'"""
|
# TODO ( mattmillr ) : Handle expiration times
with cache as redis_connection :
pipe = redis_connection . pipeline ( )
for key in set ( template . keys ( ) ) & set ( data . keys ( ) ) :
pipe . set ( template [ key ] % indexes , str ( data [ key ] ) )
pipe . execute ( )
|
def upload_image ( self , image_file , referer_url = None , title = None , desc = None , created_at = None , collection_id = None ) :
"""Upload an image
: param image _ file : File - like object of an image file
: param referer _ url : Referer site URL
: param title : Site title
: param desc : Comment
: param created _ at : Image ' s created time in unix time
: param collection _ id : Collection ID"""
|
url = self . upload_url + '/api/upload'
data = { }
if referer_url is not None :
data [ 'referer_url' ] = referer_url
if title is not None :
data [ 'title' ] = title
if desc is not None :
data [ 'desc' ] = desc
if created_at is not None :
data [ 'created_at' ] = str ( created_at )
if collection_id is not None :
data [ 'collection_id' ] = collection_id
files = { 'imagedata' : image_file }
response = self . _request_url ( url , 'post' , data = data , files = files , with_access_token = True )
headers , result = self . _parse_and_check ( response )
return Image . from_dict ( result )
|
def trim_tree_before ( element , include_element = True , keep_head = True ) :
"""Removes the document tree preceding the given element . If include _ element
is True , the given element is kept in the tree , otherwise it is removed ."""
|
el = element
for parent_el in element . iterancestors ( ) :
parent_el . text = None
if el != element or include_element :
el = el . getprevious ( )
else :
parent_el . text = el . tail
while el is not None :
remove_el = el
el = el . getprevious ( )
tag = remove_el . tag
is_head = isinstance ( tag , string_class ) and tag . lower ( ) == 'head'
if not keep_head or not is_head :
parent_el . remove ( remove_el )
el = parent_el
|
def load_data ( train_path = './data/regression.train' , test_path = './data/regression.test' ) :
'''Load or create dataset'''
|
print ( 'Load data...' )
df_train = pd . read_csv ( train_path , header = None , sep = '\t' )
df_test = pd . read_csv ( test_path , header = None , sep = '\t' )
num = len ( df_train )
split_num = int ( 0.9 * num )
y_train = df_train [ 0 ] . values
y_test = df_test [ 0 ] . values
y_eval = y_train [ split_num : ]
y_train = y_train [ : split_num ]
X_train = df_train . drop ( 0 , axis = 1 ) . values
X_test = df_test . drop ( 0 , axis = 1 ) . values
X_eval = X_train [ split_num : , : ]
X_train = X_train [ : split_num , : ]
# create dataset for lightgbm
lgb_train = lgb . Dataset ( X_train , y_train )
lgb_eval = lgb . Dataset ( X_eval , y_eval , reference = lgb_train )
return lgb_train , lgb_eval , X_test , y_test
|
def mkmanpage ( name ) :
"""Return man page content for the given ` cmdln . Cmdln ` subclass name ."""
|
mod_name , class_name = name . rsplit ( '.' , 1 )
mod = __import__ ( mod_name )
inst = getattr ( mod , class_name ) ( )
sections = cmdln . man_sections_from_cmdln ( inst )
sys . stdout . write ( '' . join ( sections ) )
|
def _is_subsequence_of ( self , sub , sup ) :
"""Parameters
sub : str
sup : str
Returns
bool"""
|
return bool ( re . search ( ".*" . join ( sub ) , sup ) )
|
def encode_index_req ( self , bucket , index , startkey , endkey = None , return_terms = None , max_results = None , continuation = None , timeout = None , term_regex = None , streaming = False ) :
"""Encodes a secondary index request into the protobuf message .
: param bucket : the bucket whose index to query
: type bucket : string
: param index : the index to query
: type index : string
: param startkey : the value or beginning of the range
: type startkey : integer , string
: param endkey : the end of the range
: type endkey : integer , string
: param return _ terms : whether to return the index term with the key
: type return _ terms : bool
: param max _ results : the maximum number of results to return ( page size )
: type max _ results : integer
: param continuation : the opaque continuation returned from a
previous paginated request
: type continuation : string
: param timeout : a timeout value in milliseconds , or ' infinity '
: type timeout : int
: param term _ regex : a regular expression used to filter index terms
: type term _ regex : string
: param streaming : encode as streaming request
: type streaming : bool
: rtype riak . pb . riak _ kv _ pb2 . RpbIndexReq"""
|
req = riak . pb . riak_kv_pb2 . RpbIndexReq ( bucket = str_to_bytes ( bucket . name ) , index = str_to_bytes ( index ) )
self . _add_bucket_type ( req , bucket . bucket_type )
if endkey is not None :
req . qtype = riak . pb . riak_kv_pb2 . RpbIndexReq . range
req . range_min = str_to_bytes ( str ( startkey ) )
req . range_max = str_to_bytes ( str ( endkey ) )
else :
req . qtype = riak . pb . riak_kv_pb2 . RpbIndexReq . eq
req . key = str_to_bytes ( str ( startkey ) )
if return_terms is not None :
req . return_terms = return_terms
if max_results :
req . max_results = max_results
if continuation :
req . continuation = str_to_bytes ( continuation )
if timeout :
if timeout == 'infinity' :
req . timeout = 0
else :
req . timeout = timeout
if term_regex :
req . term_regex = str_to_bytes ( term_regex )
req . stream = streaming
mc = riak . pb . messages . MSG_CODE_INDEX_REQ
rc = riak . pb . messages . MSG_CODE_INDEX_RESP
return Msg ( mc , req . SerializeToString ( ) , rc )
|
def crval ( self ) :
"""Get the world coordinate of the reference pixel .
@ rtype : float , float"""
|
try :
return self . wcs . crval1 , self . wcs . crval2
except Exception as ex :
logging . debug ( "Couldn't get CRVAL from WCS: {}" . format ( ex ) )
logging . debug ( "Trying RA/DEC values" )
try :
return ( float ( self [ 'RA-DEG' ] ) , float ( self [ 'DEC-DEG' ] ) )
except KeyError as ke :
KeyError ( "Can't build CRVAL1/2 missing keyword: {}" . format ( ke . args [ 0 ] ) )
|
def name ( self ) :
"""MessageHandler name ."""
|
return ffi . string ( lib . EnvGetDefmessageHandlerName ( self . _env , self . _cls , self . _idx ) ) . decode ( )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.