signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def _validate_signal ( self , sig ) :
"""Internal helper to validate a signal .
Raise ValueError if the signal number is invalid or uncatchable .
Raise RuntimeError if there is a problem setting up the handler ."""
|
if not isinstance ( sig , int ) :
raise TypeError ( 'sig must be an int, not {!r}' . format ( sig ) )
if signal is None :
raise RuntimeError ( 'Signals are not supported' )
if not ( 1 <= sig < signal . NSIG ) :
raise ValueError ( 'sig {} out of range(1, {})' . format ( sig , signal . NSIG ) )
if sys . platform == 'win32' :
raise RuntimeError ( 'Signals are not really supported on Windows' )
|
def status ( name = None , user = None , conf_file = None , bin_env = None ) :
'''List programs and its state
user
user to run supervisorctl as
conf _ file
path to supervisord config file
bin _ env
path to supervisorctl bin or path to virtualenv with supervisor
installed
CLI Example :
. . code - block : : bash
salt ' * ' supervisord . status'''
|
all_process = { }
for line in status_raw ( name , user , conf_file , bin_env ) . splitlines ( ) :
if len ( line . split ( ) ) > 2 :
process , state , reason = line . split ( None , 2 )
else :
process , state , reason = line . split ( ) + [ '' ]
all_process [ process ] = { 'state' : state , 'reason' : reason }
return all_process
|
def process_import ( self , request , * args , ** kwargs ) :
"""Perform the actual import action ( after the user has confirmed the import )"""
|
if not self . has_import_permission ( request ) :
raise PermissionDenied
form_type = self . get_confirm_import_form ( )
confirm_form = form_type ( request . POST )
if confirm_form . is_valid ( ) :
import_formats = self . get_import_formats ( )
input_format = import_formats [ int ( confirm_form . cleaned_data [ 'input_format' ] ) ] ( )
tmp_storage = self . get_tmp_storage_class ( ) ( name = confirm_form . cleaned_data [ 'import_file_name' ] )
data = tmp_storage . read ( input_format . get_read_mode ( ) )
if not input_format . is_binary ( ) and self . from_encoding :
data = force_text ( data , self . from_encoding )
dataset = input_format . create_dataset ( data )
result = self . process_dataset ( dataset , confirm_form , request , * args , ** kwargs )
tmp_storage . remove ( )
return self . process_result ( result , request )
|
def get_stp_mst_detail_output_cist_port_designated_port_id ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
get_stp_mst_detail = ET . Element ( "get_stp_mst_detail" )
config = get_stp_mst_detail
output = ET . SubElement ( get_stp_mst_detail , "output" )
cist = ET . SubElement ( output , "cist" )
port = ET . SubElement ( cist , "port" )
designated_port_id = ET . SubElement ( port , "designated-port-id" )
designated_port_id . text = kwargs . pop ( 'designated_port_id' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def get_osd_config ( self , osd_version ) :
"""Select appropriate portion of config .
If the version requested is not supported the raise an exception with
a helpful error message listing the versions supported ."""
|
if ( osd_version in self . osd_config ) :
return ( self . osd_config [ osd_version ] )
else :
raise IIIFStaticError ( "OpenSeadragon version %s not supported, available versions are %s" % ( osd_version , ', ' . join ( sorted ( self . osd_config . keys ( ) ) ) ) )
|
def mtime ( path ) :
"""Get the modification time of a file , or - 1 if the file does not exist ."""
|
if not os . path . exists ( path ) :
return - 1
stat = os . stat ( path )
return stat . st_mtime
|
def build_iters ( data_dir , max_records , q , horizon , splits , batch_size ) :
"""Load & generate training examples from multivariate time series data
: return : data iters & variables required to define network architecture"""
|
# Read in data as numpy array
df = pd . read_csv ( os . path . join ( data_dir , "electricity.txt" ) , sep = "," , header = None )
feature_df = df . iloc [ : , : ] . astype ( float )
x = feature_df . as_matrix ( )
x = x [ : max_records ] if max_records else x
# Construct training examples based on horizon and window
x_ts = np . zeros ( ( x . shape [ 0 ] - q , q , x . shape [ 1 ] ) )
y_ts = np . zeros ( ( x . shape [ 0 ] - q , x . shape [ 1 ] ) )
for n in range ( x . shape [ 0 ] ) :
if n + 1 < q :
continue
elif n + 1 + horizon > x . shape [ 0 ] :
continue
else :
y_n = x [ n + horizon , : ]
x_n = x [ n + 1 - q : n + 1 , : ]
x_ts [ n - q ] = x_n
y_ts [ n - q ] = y_n
# Split into training and testing data
training_examples = int ( x_ts . shape [ 0 ] * splits [ 0 ] )
valid_examples = int ( x_ts . shape [ 0 ] * splits [ 1 ] )
x_train , y_train = x_ts [ : training_examples ] , y_ts [ : training_examples ]
x_valid , y_valid = x_ts [ training_examples : training_examples + valid_examples ] , y_ts [ training_examples : training_examples + valid_examples ]
x_test , y_test = x_ts [ training_examples + valid_examples : ] , y_ts [ training_examples + valid_examples : ]
# build iterators to feed batches to network
train_iter = mx . io . NDArrayIter ( data = x_train , label = y_train , batch_size = batch_size )
val_iter = mx . io . NDArrayIter ( data = x_valid , label = y_valid , batch_size = batch_size )
test_iter = mx . io . NDArrayIter ( data = x_test , label = y_test , batch_size = batch_size )
return train_iter , val_iter , test_iter
|
def oldest_frame ( self , raw = False ) :
"""Get the oldest frame in the panel ."""
|
if raw :
return self . buffer . values [ : , self . _start_index , : ]
return self . buffer . iloc [ : , self . _start_index , : ]
|
def set_hparams_from_args ( args ) :
"""Set hparams overrides from unparsed args list ."""
|
if not args :
return
hp_prefix = "--hp_"
tf . logging . info ( "Found unparsed command-line arguments. Checking if any " "start with %s and interpreting those as hparams " "settings." , hp_prefix )
pairs = [ ]
i = 0
while i < len ( args ) :
arg = args [ i ]
if arg . startswith ( hp_prefix ) :
pairs . append ( ( arg [ len ( hp_prefix ) : ] , args [ i + 1 ] ) )
i += 2
else :
tf . logging . warn ( "Found unknown flag: %s" , arg )
i += 1
as_hparams = "," . join ( [ "%s=%s" % ( key , val ) for key , val in pairs ] )
if FLAGS . hparams :
as_hparams = "," + as_hparams
FLAGS . hparams += as_hparams
|
def main ( ) :
"""parse commandline arguments and print result"""
|
# setup command line parsing a la argpase
parser = argparse . ArgumentParser ( )
# positional args
parser . add_argument ( 'uri' , metavar = 'URI' , nargs = '?' , default = '/' , help = '[owserver:]//server:port/entity' )
# optional args for temperature scale
parser . set_defaults ( t_flags = protocol . FLG_TEMP_C )
tempg = parser . add_mutually_exclusive_group ( )
tempg . add_argument ( '-C' , '--Celsius' , const = protocol . FLG_TEMP_C , help = 'Celsius(default) temperature scale' , dest = 't_flags' , action = 'store_const' , )
tempg . add_argument ( '-F' , '--Fahrenheit' , const = protocol . FLG_TEMP_F , help = 'Fahrenheit temperature scale' , dest = 't_flags' , action = 'store_const' , )
tempg . add_argument ( '-K' , '--Kelvin' , const = protocol . FLG_TEMP_K , help = 'Kelvin temperature scale' , dest = 't_flags' , action = 'store_const' , )
tempg . add_argument ( '-R' , '--Rankine' , const = protocol . FLG_TEMP_R , help = 'Rankine temperature scale' , dest = 't_flags' , action = 'store_const' , )
# optional arg for address format
fcodes = collections . OrderedDict ( ( ( 'f.i' , protocol . FLG_FORMAT_FDI ) , ( 'fi' , protocol . FLG_FORMAT_FI ) , ( 'f.i.c' , protocol . FLG_FORMAT_FDIDC ) , ( 'f.ic' , protocol . FLG_FORMAT_FDIC ) , ( 'fi.c' , protocol . FLG_FORMAT_FIDC ) , ( 'fic' , protocol . FLG_FORMAT_FIC ) , ) )
parser . set_defaults ( format = 'f.i' )
parser . add_argument ( '-f' , '--format' , choices = fcodes , help = 'format for 1-wire unique serial IDs display' )
parser . add_argument ( '--nosys' , '--only-sensors' , action = 'store_false' , dest = 'bus' , help = 'do not descend system directories' )
# parse command line args
args = parser . parse_args ( )
# parse args . uri and substitute defaults
urlc = urlsplit ( args . uri , scheme = 'owserver' , allow_fragments = False )
if urlc . scheme != 'owserver' :
parser . error ( "Invalid URI scheme '{}:'" . format ( urlc . scheme ) )
assert not urlc . fragment
if urlc . query :
parser . error ( "Invalid URI '{}', no query component allowed" . format ( args . uri ) )
host = urlc . hostname or 'localhost'
port = urlc . port or 4304
# create owserver proxy object
try :
proxy = protocol . proxy ( host , port , flags = args . t_flags | fcodes [ args . format ] , persistent = True )
except ( protocol . ConnError , protocol . ProtocolError ) as error :
parser . exit ( status = 1 , message = str ( error ) + '\n' )
def walk ( path ) :
try :
if not path . endswith ( '/' ) :
val = proxy . read ( path )
print ( "{:40} {!r}" . format ( path , val ) )
else :
for entity in proxy . dir ( path , bus = args . bus ) :
walk ( entity )
except protocol . OwnetError as error :
print ( 'Unable to walk {}: server says {}' . format ( path , error ) , file = sys . stderr )
except protocol . ConnError as error :
print ( 'Unable to walk {}: {}' . format ( path , error ) , file = sys . stderr )
with proxy :
walk ( urlc . path )
|
def focus_parent ( self ) :
"""move focus to parent node of currently focussed one"""
|
w , focuspos = self . get_focus ( )
parent = self . _tree . parent_position ( focuspos )
if parent is not None :
self . set_focus ( parent )
|
def take_action ( self , alert , action , text , ** kwargs ) :
"""should return internal id of external system"""
|
BASE_URL = '{}/projects/{}' . format ( GITLAB_URL , quote ( GITLAB_PROJECT_ID , safe = '' ) )
if action == 'createIssue' :
if 'issue_iid' not in alert . attributes :
url = BASE_URL + '/issues?title=' + alert . text
r = requests . post ( url , headers = self . headers )
alert . attributes [ 'issue_iid' ] = r . json ( ) . get ( 'iid' , None )
alert . attributes [ 'gitlabUrl' ] = '<a href="{}" target="_blank">Issue #{}</a>' . format ( r . json ( ) . get ( 'web_url' , None ) , r . json ( ) . get ( 'iid' , None ) )
elif action == 'updateIssue' :
if 'issue_iid' in alert . attributes :
issue_iid = alert . attributes [ 'issue_iid' ]
body = 'Update: ' + alert . text
url = BASE_URL + '/issues/{}/discussions?body={}' . format ( issue_iid , body )
r = requests . post ( url , headers = self . headers )
return alert , action , text
|
def split_long_sentence ( sentence , words_per_line ) :
"""Takes a sentence and adds a newline every " words _ per _ line " words .
Parameters
sentence : str
Sentene to split
words _ per _ line : double
Add a newline every this many words"""
|
words = sentence . split ( ' ' )
split_sentence = ''
for i in range ( len ( words ) ) :
split_sentence = split_sentence + words [ i ]
if ( i + 1 ) % words_per_line == 0 :
split_sentence = split_sentence + '\n'
elif i != len ( words ) - 1 :
split_sentence = split_sentence + " "
return split_sentence
|
def reserve ( ctx , amount , symbol , account ) :
"""Reserve / Burn tokens"""
|
print_tx ( ctx . bitshares . reserve ( Amount ( amount , symbol , bitshares_instance = ctx . bitshares ) , account = account ) )
|
def getTarget ( self , iid ) :
'''Returns a dictionary containing information about a certain target'''
|
sql = 'select name, path from {} where _id=?' . format ( self . TABLE_ITEMS )
data = self . db . execute ( sql , ( iid , ) ) . fetchone ( )
if data :
return { 'name' : data [ 0 ] , 'path' : data [ 1 ] }
return None
|
def _load_meta_cache ( self ) :
"""Try to load metadata from file ."""
|
try :
if self . _should_invalidate_cache ( ) :
os . remove ( self . _cache_filename )
else :
with open ( self . _cache_filename , 'rb' ) as f :
self . _document_meta = compat . pickle . load ( f )
except ( OSError , IOError , compat . pickle . PickleError , ImportError , AttributeError ) :
pass
|
def image_data ( verbose = False ) :
"""Get the raw encoded image data , downloading it if necessary ."""
|
# This is a principled use of the ` global ` statement ; don ' t lint me .
global _IMAGE_DATA
# pylint : disable = global - statement
if _IMAGE_DATA is None :
if verbose :
logger . info ( "--- Downloading image." )
with contextlib . closing ( urllib . request . urlopen ( IMAGE_URL ) ) as infile :
_IMAGE_DATA = infile . read ( )
return _IMAGE_DATA
|
def structure_lines ( self , structure , cell_flg = True , frac_flg = True , anion_shell_flg = True , cation_shell_flg = False , symm_flg = True ) :
"""Generates GULP input string corresponding to pymatgen structure .
Args :
structure : pymatgen Structure object
cell _ flg ( default = True ) : Option to use lattice parameters .
fractional _ flg ( default = True ) : If True , fractional coordinates
are used . Else , cartesian coodinates in Angstroms are used .
GULP convention is to use fractional coordinates for periodic
structures and cartesian coordinates for non - periodic
structures .
anion _ shell _ flg ( default = True ) : If True , anions are considered
polarizable .
cation _ shell _ flg ( default = False ) : If True , cations are
considered polarizable .
symm _ flg ( default = True ) : If True , symmetry information is also
written .
Returns :
string containing structure for GULP input"""
|
gin = ""
if cell_flg :
gin += "cell\n"
l = structure . lattice
lat_str = [ str ( i ) for i in [ l . a , l . b , l . c , l . alpha , l . beta , l . gamma ] ]
gin += " " . join ( lat_str ) + "\n"
if frac_flg :
gin += "frac\n"
coord_attr = "frac_coords"
else :
gin += "cart\n"
coord_attr = "coords"
for site in structure . sites :
coord = [ str ( i ) for i in getattr ( site , coord_attr ) ]
specie = site . specie
core_site_desc = specie . symbol + " core " + " " . join ( coord ) + "\n"
gin += core_site_desc
if ( ( specie in _anions and anion_shell_flg ) or ( specie in _cations and cation_shell_flg ) ) :
shel_site_desc = specie . symbol + " shel " + " " . join ( coord ) + "\n"
gin += shel_site_desc
else :
pass
if symm_flg :
gin += "space\n"
gin += str ( SpacegroupAnalyzer ( structure ) . get_space_group_number ( ) ) + "\n"
return gin
|
def flatten ( nested , containers = ( list , tuple ) ) :
"""Flatten a nested list in - place and return it ."""
|
flat = list ( nested )
# handle iterators / generators
i = 0
while i < len ( flat ) :
while isinstance ( flat [ i ] , containers ) :
if not flat [ i ] : # kill empty list
flat . pop ( i )
# inspect new ' i ' th element in outer loop
i -= 1
break
else :
flat [ i : i + 1 ] = ( flat [ i ] )
# ' i ' th element is scalar , proceed
i += 1
return flat
|
def resize ( image , shape , kind = 'linear' ) :
"""Resize an image
Parameters
image : ndarray
Array of shape ( N , M , . . . ) .
shape : tuple
2 - element shape .
kind : str
Interpolation , either " linear " or " nearest " .
Returns
scaled _ image : ndarray
New image , will have dtype np . float64."""
|
image = np . array ( image , float )
shape = np . array ( shape , int )
if shape . ndim != 1 or shape . size != 2 :
raise ValueError ( 'shape must have two elements' )
if image . ndim < 2 :
raise ValueError ( 'image must have two dimensions' )
if not isinstance ( kind , string_types ) or kind not in ( 'nearest' , 'linear' ) :
raise ValueError ( 'mode must be "nearest" or "linear"' )
r = np . linspace ( 0 , image . shape [ 0 ] - 1 , shape [ 0 ] )
c = np . linspace ( 0 , image . shape [ 1 ] - 1 , shape [ 1 ] )
if kind == 'linear' :
r_0 = np . floor ( r ) . astype ( int )
c_0 = np . floor ( c ) . astype ( int )
r_1 = r_0 + 1
c_1 = c_0 + 1
top = ( r_1 - r ) [ : , np . newaxis ]
bot = ( r - r_0 ) [ : , np . newaxis ]
lef = ( c - c_0 ) [ np . newaxis , : ]
rig = ( c_1 - c ) [ np . newaxis , : ]
c_1 = np . minimum ( c_1 , image . shape [ 1 ] - 1 )
r_1 = np . minimum ( r_1 , image . shape [ 0 ] - 1 )
for arr in ( top , bot , lef , rig ) :
arr . shape = arr . shape + ( 1 , ) * ( image . ndim - 2 )
out = top * rig * image [ r_0 ] [ : , c_0 , ... ]
out += bot * rig * image [ r_1 ] [ : , c_0 , ... ]
out += top * lef * image [ r_0 ] [ : , c_1 , ... ]
out += bot * lef * image [ r_1 ] [ : , c_1 , ... ]
else : # kind = = ' nearest '
r = np . round ( r ) . astype ( int )
c = np . round ( c ) . astype ( int )
out = image [ r ] [ : , c , ... ]
return out
|
def new_program ( self , _id , series , title , subtitle , description , mpaaRating , starRating , runTime , year , showType , colorCode , originalAirDate , syndicatedEpisodeNumber , advisories ) :
"""Callback run for each new program entry"""
|
raise NotImplementedError ( )
|
def collect_data ( parent_module ) :
"""Find Picard VariantCallingMetrics reports and parse their data"""
|
data = dict ( )
for file_meta in parent_module . find_log_files ( 'picard/variant_calling_metrics' , filehandles = True ) :
s_name = None
for header , value in table_in ( file_meta [ 'f' ] , pre_header_string = '## METRICS CLASS' ) :
if header == 'SAMPLE_ALIAS' :
s_name = value
if s_name in data :
log . debug ( "Duplicate sample name found in {}! Overwriting: {}" . format ( file_meta [ 'fn' ] , s_name ) )
data [ s_name ] = OrderedDict ( )
else :
data [ s_name ] [ header ] = value
return data
|
def unique_prefix ( self , area_uuid ) :
"""Find the minimum prefix required to address this Upload Area UUID uniquely .
: param ( str ) area _ uuid : UUID of Upload Area
: return : a string with the minimum prefix required to be unique
: rtype : str"""
|
for prefix_len in range ( 1 , len ( area_uuid ) ) :
prefix = area_uuid [ 0 : prefix_len ]
matching_areas = [ uuid for uuid in self . areas if re . match ( prefix , uuid ) ]
if len ( matching_areas ) == 1 :
return prefix
|
def get_nac_eigendisplacements_along_dir ( self , direction ) :
"""Returns the nac _ eigendisplacements for the given direction ( not necessarily a versor ) .
None if the direction is not present or nac _ eigendisplacements has not been calculated .
Args :
direction : the direction as a list of 3 elements
Returns :
the eigendisplacements as a numpy array of complex numbers with shape
(3 * len ( structure ) , len ( structure ) , 3 ) . None if not found ."""
|
versor = [ i / np . linalg . norm ( direction ) for i in direction ]
for d , e in self . nac_eigendisplacements :
if np . allclose ( versor , d ) :
return e
return None
|
def sample ( self , ctrs , rstate = None , return_q = False , kdtree = None ) :
"""Sample a point uniformly distributed within the * union * of cubes .
Uses a K - D Tree to perform the search if provided .
Returns
x : ` ~ numpy . ndarray ` with shape ( ndim , )
A coordinate within the set of cubes .
q : int , optional
The number of cubes ` x ` falls within ."""
|
if rstate is None :
rstate = np . random
nctrs = len ( ctrs )
# number of cubes
# If there is only one cube , sample from it .
if nctrs == 1 :
dx = self . hside * ( 2. * rstate . rand ( self . n ) - 1. )
x = ctrs [ 0 ] + dx
if return_q :
return x , 1
else :
return x
# Select a cube at random .
idx = rstate . randint ( nctrs )
# Select a point from the chosen cube .
dx = self . hside * ( 2. * rstate . rand ( self . n ) - 1. )
x = ctrs [ idx ] + dx
# Check how many cubes the point lies within , passing over
# the ` idx ` - th cube ` x ` was sampled from .
q = self . overlap ( x , ctrs , kdtree = kdtree )
if return_q : # If ` q ` is being returned , assume the user wants to
# explicitly apply the ` 1 . / q ` acceptance criterion to
# properly sample from the union of balls .
return x , q
else : # If ` q ` is not being returned , assume the user wants this
# done internally .
while rstate . rand ( ) > ( 1. / q ) :
idx = rstate . randint ( nctrs )
dx = self . hside * ( 2. * rstate . rand ( self . n ) - 1. )
x = ctrs [ idx ] + dx
q = self . overlap ( x , ctrs , kdtree = kdtree )
return x
|
def get_nodes ( self , node_id = None , ** kwargs ) :
"""Alias for get _ elements ( ) but filter the result by Node ( )
: param node _ id : The Id of the node
: type node _ id : Integer
: return : List of elements"""
|
return self . get_elements ( Node , elem_id = node_id , ** kwargs )
|
def done_evaluating ( self , n : Node , s : ShExJ . shapeExpr , result : bool ) -> Tuple [ bool , bool ] :
"""Indicate that we have completed an actual evaluation of ( n , s ) . This is only called when start _ evaluating
has returned None as the assumed result
: param n : Node that was evaluated
: param s : expression for node evaluation
: param result : result of evaluation
: return : Tuple - first element is whether we are done , second is whether evaluation was consistent"""
|
key = ( n , s . id )
# If we didn ' t have to assume anything or our assumption was correct , we ' re done
if key not in self . assumptions or self . assumptions [ key ] == result :
if key in self . assumptions :
del self . assumptions [ key ]
# good housekeeping , not strictly necessary
self . evaluating . remove ( key )
self . known_results [ key ] = result
return True , True
# If we assumed true and got a false , try assuming false
elif self . assumptions [ key ] :
self . evaluating . remove ( key )
# restart the evaluation from the top
self . assumptions [ key ] = False
return False , True
else :
self . fail_reason = f"{s.id}: Inconsistent recursive shape reference"
return True , False
|
def fitcircle ( n , x , y ) : # n points , x points , y points
"""c Fit circle to arbitrary number of x , y pairs , based on the
c modified least squares method of Umback and Jones ( 2000 ) ,
c IEEE Transactions on Instrumentation and Measurement ."""
|
# adding in normalize vectors step
# x = numpy . array ( x ) / max ( x )
# y = numpy . array ( y ) / max ( y )
sx , sx2 , sx3 , sy , sy2 , sy3 , sxy , sxy2 , syx2 = ( 0 , ) * 9
print ( type ( sx ) , sx )
for i in range ( n ) :
sx = sx + x [ i ]
sx2 = sx2 + x [ i ] ** 2
sx3 = sx3 + x [ i ] ** 3
sy = sy + y [ i ]
sy2 = sy2 + y [ i ] ** 2
sy3 = sy3 + y [ i ] ** 3
sxy = sxy + x [ i ] * y [ i ]
sxy2 = sxy2 + x [ i ] * y [ i ] ** 2
syx2 = syx2 + y [ i ] * x [ i ] ** 2
A = n * sx2 - sx ** 2
B = n * sxy - sx * sy
C = n * sy2 - sy ** 2
D = 0.5 * ( n * sxy2 - sx * sy2 + n * sx3 - sx * sx2 )
E = 0.5 * ( n * syx2 - sy * sx2 + n * sy3 - sy * sy2 )
# values check out up to here
xo = old_div ( ( D * C - B * E ) , ( A * C - B ** 2 ) )
yo = old_div ( ( A * E - B * D ) , ( A * C - B ** 2 ) )
print ( "xo" , xo )
print ( "yo" , yo )
r = 0
for z in range ( n ) :
r = r + old_div ( numpy . sqrt ( ( x [ z ] - xo ) ** 2 + ( y [ z ] - yo ) ** 2 ) , n )
if xo <= numpy . mean ( x ) and yo <= numpy . mean ( y ) :
k = old_div ( - 1. , r )
else :
k = old_div ( 1. , r )
SSE = lib_k . get_SSE ( xo , yo , r , x , y )
print ( "r" , r )
return k , xo , yo , SSE
|
def nvmlDeviceGetDriverModel ( handle ) :
r"""* Retrieves the current and pending driver model for the device .
* For Fermi & tm ; or newer fully supported devices .
* For windows only .
* On Windows platforms the device driver can run in either WDDM or WDM ( TCC ) mode . If a display is attached
* to the device it must run in WDDM mode . TCC mode is preferred if a display is not attached .
* See \ ref nvmlDriverModel _ t for details on available driver models .
* @ param device The identifier of the target device
* @ param current Reference in which to return the current driver model
* @ param pending Reference in which to return the pending driver model
* @ return
* - \ ref NVML _ SUCCESS if either \ a current and / or \ a pending have been set
* - \ ref NVML _ ERROR _ UNINITIALIZED if the library has not been successfully initialized
* - \ ref NVML _ ERROR _ INVALID _ ARGUMENT if \ a device is invalid or both \ a current and \ a pending are NULL
* - \ ref NVML _ ERROR _ NOT _ SUPPORTED if the platform is not windows
* - \ ref NVML _ ERROR _ GPU _ IS _ LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ ref NVML _ ERROR _ UNKNOWN on any unexpected error
* @ see nvmlDeviceSetDriverModel ( )
nvmlReturn _ t DECLDIR nvmlDeviceGetDriverModel"""
|
c_currModel = _nvmlDriverModel_t ( )
c_pendingModel = _nvmlDriverModel_t ( )
fn = _nvmlGetFunctionPointer ( "nvmlDeviceGetDriverModel" )
ret = fn ( handle , byref ( c_currModel ) , byref ( c_pendingModel ) )
_nvmlCheckReturn ( ret )
return [ c_currModel . value , c_pendingModel . value ]
|
def from_file ( cls , filename ) :
"""Initialize datasource from file ( . tds ot . tdsx )"""
|
dsxml = xml_open ( filename , 'datasource' ) . getroot ( )
return cls ( dsxml , filename )
|
def _parse_log_entry ( entry_pb ) :
"""Special helper to parse ` ` LogEntry ` ` protobuf into a dictionary .
The ` ` proto _ payload ` ` field in ` ` LogEntry ` ` is of type ` ` Any ` ` . This
can be problematic if the type URL in the payload isn ' t in the
` ` google . protobuf ` ` registry . To help with parsing unregistered types ,
this function will remove ` ` proto _ payload ` ` before parsing .
: type entry _ pb : : class : ` . log _ entry _ pb2 . LogEntry `
: param entry _ pb : Log entry protobuf .
: rtype : dict
: returns : The parsed log entry . The ` ` protoPayload ` ` key may contain
the raw ` ` Any ` ` protobuf from ` ` entry _ pb . proto _ payload ` ` if
it could not be parsed ."""
|
try :
return MessageToDict ( entry_pb )
except TypeError :
if entry_pb . HasField ( "proto_payload" ) :
proto_payload = entry_pb . proto_payload
entry_pb . ClearField ( "proto_payload" )
entry_mapping = MessageToDict ( entry_pb )
entry_mapping [ "protoPayload" ] = proto_payload
return entry_mapping
else :
raise
|
def explore_show_summary ( self , list , index = False , expected = None , context = None ) :
"""Show summary of one capability document .
Given a capability document or index ( in list , index True if it is an
index ) , write out a simply textual summary of the document with all
related documents shown as numbered options ( of the form
[ # ] . . . description . . . ) which will then form a menu for the next
exploration .
If expected is not None then it should be a list of expected document
types . If this is set then a warning will be printed if list is not
one of these document types .
Look for certain top - level link types including rel = " up " ."""
|
num_entries = len ( list . resources )
capability = '(unknown capability)'
if ( 'capability' in list . md ) :
capability = list . md [ 'capability' ]
if ( index ) :
capability += 'index'
print ( "Parsed %s document with %d entries:" % ( capability , num_entries ) )
if ( expected is not None and capability not in expected ) :
print ( "WARNING - expected a %s document" % ( ',' . join ( expected ) ) )
if ( capability not in [ 'description' , 'descriptionoindex' , 'capabilitylist' , 'resourcelist' , 'resourcelistindex' , 'changelist' , 'changelistindex' , 'resourcedump' , 'resourcedumpindex' , 'changedump' , 'changedumpindex' , 'resourcelist-archive' , 'resourcedump-archive' , 'changelist-archive' , 'changedump-archive' ] ) :
print ( "WARNING - unknown %s document type" % ( capability ) )
to_show = num_entries
if ( num_entries > 21 ) :
to_show = 20
# What capability entries are allowed / expected ?
entry_caps = self . allowed_entries ( capability )
options = { }
n = 0
# Look for < rs : ln > elements in this document
ln_describedby = list . link ( 'describedby' )
if ( ln_describedby ) :
if ( 'href' in ln_describedby ) :
uri = ln_describedby [ 'href' ]
print ( "[%s] rel='describedby' link to %s" % ( 'd' , uri ) )
uri = self . expand_relative_uri ( context , uri )
options [ 'd' ] = Resource ( uri , capability = 'resource' )
else :
print ( "WARNING - describedby link with no href, ignoring" )
ln_up = list . link ( 'up' )
if ( ln_up ) :
if ( 'href' in ln_up ) :
uri = ln_up [ 'href' ]
print ( "[%s] rel='up' link to %s" % ( 'u' , uri ) )
uri = self . expand_relative_uri ( context , uri )
options [ 'u' ] = Resource ( uri )
else :
print ( "WARNING - up link with no href, ignoring" )
ln_index = list . link ( 'index' )
if ( ln_index ) :
if ( 'href' in ln_index ) :
uri = ln_index [ 'href' ]
print ( "[%s] rel='index' link to %s" % ( 'i' , uri ) )
uri = self . expand_relative_uri ( context , uri )
options [ 'i' ] = Resource ( uri )
else :
print ( "WARNING - index link with no href, ignoring" )
# Show listed resources as numbered options
for r in list . resources :
if ( n >= to_show ) :
print ( "(not showing remaining %d entries)" % ( num_entries - n ) )
break
n += 1
options [ str ( n ) ] = r
print ( "[%d] %s" % ( n , r . uri ) )
if ( self . verbose ) :
print ( " " + str ( r ) )
r . uri = self . expand_relative_uri ( context , r . uri )
if ( r . capability is not None ) :
warning = ''
if ( r . capability not in entry_caps ) :
warning = " (EXPECTED %s)" % ( ' or ' . join ( entry_caps ) )
print ( " %s%s" % ( r . capability , warning ) )
elif ( len ( entry_caps ) == 1 ) :
r . capability = entry_caps [ 0 ]
print ( " capability not specified, should be %s" % ( r . capability ) )
return ( options , capability )
|
def parse_references ( reference_lines , recid = None , override_kbs_files = None , reference_format = u"{title} {volume} ({year}) {page}" , linker_callback = None ) :
"""Parse a list of references
Given a list of raw reference lines ( list of strings ) ,
output a list of dictionaries containing the parsed references"""
|
# RefExtract knowledge bases
kbs = get_kbs ( custom_kbs_files = override_kbs_files )
# Identify journal titles , report numbers , URLs , DOIs , and authors . . .
processed_references , counts , dummy_bad_titles_count = parse_references_elements ( reference_lines , kbs , linker_callback )
return ( build_references ( processed_references , reference_format ) , build_stats ( counts ) )
|
def crc16 ( data ) :
"""Calculate an ISO13239 CRC checksum of the input buffer ."""
|
m_crc = 0xffff
for this in data :
m_crc ^= ord ( this )
for _ in range ( 8 ) :
j = m_crc & 1
m_crc >>= 1
if j :
m_crc ^= 0x8408
return m_crc
|
def engineering ( value , precision = 3 , prefix = False , prefixes = SI ) :
"""Convert a number to engineering notation ."""
|
display = decimal . Context ( prec = precision )
value = decimal . Decimal ( value ) . normalize ( context = display )
string = value . to_eng_string ( )
if prefix :
prefixes = { e ( exponent ) : prefix for exponent , prefix in prefixes . items ( ) }
return replace ( string , prefixes )
else :
return string
|
def tracking_event_post ( node_id ) :
"""Enqueue a TrackingEvent worker for the specified Node ."""
|
details = request_parameter ( parameter = "details" , optional = True )
if details :
details = loads ( details )
# check the node exists
node = models . Node . query . get ( node_id )
if node is None :
return error_response ( error_type = "/info POST, node does not exist" )
db . logger . debug ( "rq: Queueing %s with for node: %s for worker_function" , "TrackingEvent" , node_id , )
q . enqueue ( worker_function , "TrackingEvent" , None , None , node_id = node_id , details = details )
return success_response ( details = details )
|
def enumerate_version_changelog ( self , url , versions_estimated , timeout = 15 , headers = { } ) :
"""If we have a changelog in store for this CMS , this function will be
called , and a changelog will be used for narrowing down which version is
installed . If the changelog ' s version is outside our estimated range ,
it is discarded .
@ param url : the url to check against .
@ param versions _ estimated : the version other checks estimate the
installation is .
@ param timeout : the number of seconds to wait before expiring a request .
@ param headers : headers to pass to requests . get ( )"""
|
changelogs = self . vf . changelogs_get ( )
ch_hash = None
for ch_url in changelogs :
try :
ch_hash = self . enumerate_file_hash ( url , file_url = ch_url , timeout = timeout , headers = headers )
except RuntimeError :
pass
ch_version = self . vf . changelog_identify ( ch_hash )
if ch_version in versions_estimated :
return [ ch_version ]
else :
return versions_estimated
|
def notify_observers ( self , joinpoint , post = False ) :
"""Notify observers with parameter calls and information about
pre / post call ."""
|
_observers = tuple ( self . observers )
for observer in _observers :
observer . notify ( joinpoint = joinpoint , post = post )
|
async def crawl ( self , urls , sem ) :
""": param urls :
: type urls : list / dict
: param sem :
: type sem :
: return :
: rtype :"""
|
tasks = [ self . _sem_crawl ( sem , x ) for x in urls ]
tasks_iter = asyncio . as_completed ( tasks )
fk_task_iter = tqdm . tqdm ( tasks_iter , total = len ( tasks ) , desc = ' ✈' , # desc = ' ✈ { } / { } ' . format ( self . result [ ' ok ' ] , self . result [ ' fail ' ] )
)
for co_ in fk_task_iter :
await co_
|
def render ( self , surf ) :
"""Render the button"""
|
if self . clicked :
icon = self . icon_pressed
else :
icon = self . icon
surf . blit ( icon , self )
|
def endStep ( self , key ) :
"""Record the end time for the step .
If key = = None , simply record ptime as end time for class to represent
the overall runtime since the initialization of the class ."""
|
ptime = _ptime ( )
if key is not None :
self . steps [ key ] [ 'end' ] = ptime
self . steps [ key ] [ 'elapsed' ] = ptime [ 1 ] - self . steps [ key ] [ 'start' ] [ 1 ]
self . end = ptime
print ( '==== Processing Step ' , key , ' finished at ' , ptime [ 0 ] )
print ( '' )
|
def get_dataset ( self , dsid , dsinfo ) :
"""Get dataset function
Args :
dsid : Dataset ID
param2 : Dataset Information
Returns :
Dask DataArray : Data"""
|
data = self [ dsinfo . get ( 'file_key' , dsid . name ) ]
data . attrs . update ( dsinfo )
data . attrs [ "platform_name" ] = self [ '/attr/satellite_name' ]
data . attrs [ "sensor" ] = self [ '/attr/instrument_name' ]
return data
|
def decrypt ( self , esp , key , icv_size = None ) :
"""Decrypt an ESP packet
@ param esp : an encrypted ESP packet
@ param key : the secret key used for encryption
@ param icv _ size : the length of the icv used for integrity check
@ return : a valid ESP packet encrypted with this algorithm
@ raise IPSecIntegrityError : if the integrity check fails with an AEAD
algorithm"""
|
if icv_size is None :
icv_size = self . icv_size if self . is_aead else 0
iv = esp . data [ : self . iv_size ]
data = esp . data [ self . iv_size : len ( esp . data ) - icv_size ]
icv = esp . data [ len ( esp . data ) - icv_size : ]
if self . cipher :
cipher = self . new_cipher ( key , iv , icv )
decryptor = cipher . decryptor ( )
if self . is_aead : # Tag value check is done during the finalize method
decryptor . authenticate_additional_data ( struct . pack ( '!LL' , esp . spi , esp . seq ) )
try :
data = decryptor . update ( data ) + decryptor . finalize ( )
except InvalidTag as err :
raise IPSecIntegrityError ( err )
# extract padlen and nh
padlen = ( data [ - 2 ] )
nh = data [ - 1 ]
# then use padlen to determine data and padding
data = data [ : len ( data ) - padlen - 2 ]
padding = data [ len ( data ) - padlen - 2 : len ( data ) - 2 ]
return _ESPPlain ( spi = esp . spi , seq = esp . seq , iv = iv , data = data , padding = padding , padlen = padlen , nh = nh , icv = icv )
|
def _get_image_information ( self ) :
""": returns : Dictionary information about the container image"""
|
result = yield from self . manager . query ( "GET" , "images/{}/json" . format ( self . _image ) )
return result
|
def clear_cache_root ( ) :
"""Clears everything in the song cache"""
|
logger . debug ( "Clearing root cache" )
if os . path . isdir ( _root_songcache_dir ) :
for filename in os . listdir ( _root_songcache_dir ) :
file_path = os . path . join ( _root_songcache_dir , filename )
try :
if os . path . isfile ( file_path ) :
os . unlink ( file_path )
elif os . path . isdir ( file_path ) :
shutil . rmtree ( file_path )
except PermissionError :
pass
except Exception as e :
logger . exception ( e )
logger . debug ( "Root cache cleared" )
|
def set ( self , value : Optional [ bool ] ) :
"""Sets current status of a check
: param value : ` ` True ` ` ( healthy ) , ` ` False ` ` ( unhealthy ) , or ` ` None ` `
( unknown )"""
|
prev_value = self . _value
self . _value = value
if self . _value != prev_value : # notify all watchers that this check was changed
for event in self . _events :
event . set ( )
|
def get_bios ( self ) :
"""Gets the list of BIOS / UEFI values currently set on the physical server .
Returns :
dict : Dictionary of BIOS / UEFI values ."""
|
uri = "{}/bios" . format ( self . data [ "uri" ] )
return self . _helper . do_get ( uri )
|
def get_last_date ( self , field , filters_ = [ ] ) :
''': field : field with the data
: filters _ : additional filters to find the date'''
|
last_date = self . get_last_item_field ( field , filters_ = filters_ )
return last_date
|
def x_position ( self , filter_order = None , window_size = None , tol = 0.05 , Lx = None ) :
'''Calculate $ x $ - position according to :
_ _ | C |
╲ ╱ a ⋅ | - - c _ f |
c _ d - c _ f
where :
- $ C $ is the measured capacitance .
- $ c _ f $ is the capacitance of the filler medium per unit area
_ ( e . g . , air ) _ .
- $ c _ d $ is the capacitance of an electrode completely covered in
liquid per unit area .
- $ a $ is the area of the actuated electrode ( s ) .
Note that this equation for $ x $ assumes a single drop moving across an
electrode with a length along the x - axis of Lx . If no value is provided
for Lx , the electrode is assumed to be square , i . e . ,
Lx = Ly = sqrt ( area )'''
|
if self . calibration . _c_drop :
c_drop = self . calibration . c_drop ( self . frequency )
else :
c_drop = self . capacitance ( ) [ - 1 ] / self . area
if self . calibration . _c_filler :
c_filler = self . calibration . c_filler ( self . frequency )
else :
c_filler = 0
if Lx is None :
Lx = np . sqrt ( self . area )
return ( self . capacitance ( filter_order = filter_order , window_size = window_size , tol = tol ) / self . area - c_filler ) / ( c_drop - c_filler ) * Lx
|
def _create_nodes ( self , env , target = None , source = None ) :
"""Create and return lists of target and source nodes ."""
|
src_suf = self . get_src_suffix ( env )
target_factory = env . get_factory ( self . target_factory )
source_factory = env . get_factory ( self . source_factory )
source = self . _adjustixes ( source , None , src_suf )
slist = env . arg2nodes ( source , source_factory )
pre = self . get_prefix ( env , slist )
suf = self . get_suffix ( env , slist )
if target is None :
try :
t_from_s = slist [ 0 ] . target_from_source
except AttributeError :
raise UserError ( "Do not know how to create a target from source `%s'" % slist [ 0 ] )
except IndexError :
tlist = [ ]
else :
splitext = lambda S : self . splitext ( S , env )
tlist = [ t_from_s ( pre , suf , splitext ) ]
else :
target = self . _adjustixes ( target , pre , suf , self . ensure_suffix )
tlist = env . arg2nodes ( target , target_factory , target = target , source = source )
if self . emitter : # The emitter is going to do str ( node ) , but because we ' re
# being called * from * a builder invocation , the new targets
# don ' t yet have a builder set on them and will look like
# source files . Fool the emitter ' s str ( ) calls by setting
# up a temporary builder on the new targets .
new_targets = [ ]
for t in tlist :
if not t . is_derived ( ) :
t . builder_set ( self )
new_targets . append ( t )
orig_tlist = tlist [ : ]
orig_slist = slist [ : ]
target , source = self . emitter ( target = tlist , source = slist , env = env )
# Now delete the temporary builders that we attached to any
# new targets , so that _ node _ errors ( ) doesn ' t do weird stuff
# to them because it thinks they already have builders .
for t in new_targets :
if t . builder is self : # Only delete the temporary builder if the emitter
# didn ' t change it on us .
t . builder_set ( None )
# Have to call arg2nodes yet again , since it is legal for
# emitters to spit out strings as well as Node instances .
tlist = env . arg2nodes ( target , target_factory , target = orig_tlist , source = orig_slist )
slist = env . arg2nodes ( source , source_factory , target = orig_tlist , source = orig_slist )
return tlist , slist
|
def remove ( self , bw ) :
"""Removes a buffer watch identifier .
@ type bw : L { BufferWatch }
@ param bw :
Buffer watch identifier .
@ raise KeyError : The buffer watch identifier was already removed ."""
|
try :
self . __ranges . remove ( bw )
except KeyError :
if not bw . oneshot :
raise
|
def unzip ( source_file , destination_dir ) :
"""Unzips < source _ file > into < destination _ dir > .
: param str | unicode source _ file : The name of the file to read
: param str | unicode destination _ dir : The name of the directory to write the unzipped files"""
|
with zipfile . ZipFile ( source_file ) as zf :
zf . extractall ( destination_dir )
|
def download ( url , tries = DEFAULT_TRIES , retry_delay = RETRY_DELAY , try_timeout = None , proxies = None , verify = True ) :
"""Descarga un archivo a través del protocolo HTTP , en uno o más intentos .
Args :
url ( str ) : URL ( schema HTTP ) del archivo a descargar .
tries ( int ) : Intentos a realizar ( default : 1 ) .
retry _ delay ( int o float ) : Tiempo a esperar , en segundos , entre cada
intento .
try _ timeout ( int o float ) : Tiempo máximo a esperar por intento .
proxies ( dict ) : Proxies a utilizar . El diccionario debe contener los
valores ' http ' y ' https ' , cada uno asociados a la URL del proxy
correspondiente .
Returns :
bytes : Contenido del archivo"""
|
for i in range ( tries ) :
try :
return requests . get ( url , timeout = try_timeout , proxies = proxies , verify = verify ) . content
except Exception as e :
download_exception = e
if i < tries - 1 :
time . sleep ( retry_delay )
raise download_exception
|
def getRequiredAttrs ( self ) :
"""Get the type URIs for all attributes that have been marked
as required .
@ returns : A list of the type URIs for attributes that have
been marked as required .
@ rtype : [ str ]"""
|
required = [ ]
for type_uri , attribute in self . requested_attributes . items ( ) :
if attribute . required :
required . append ( type_uri )
return required
|
def get_queryset ( self ) :
"""Get queryset based on url params ( < app > , < mode > ) if model is not set on class"""
|
if self . queryset is None and not self . model :
try :
return self . get_model_class ( ) . _default_manager . all ( )
except LookupError :
raise Http404 ( )
return super ( ) . get_queryset ( )
|
def commit ( self ) :
"""Commit mutations to the database .
: rtype : datetime
: returns : timestamp of the committed changes ."""
|
self . _check_state ( )
database = self . _session . _database
api = database . spanner_api
metadata = _metadata_with_prefix ( database . name )
txn_options = TransactionOptions ( read_write = TransactionOptions . ReadWrite ( ) )
response = api . commit ( self . _session . name , self . _mutations , single_use_transaction = txn_options , metadata = metadata , )
self . committed = _pb_timestamp_to_datetime ( response . commit_timestamp )
return self . committed
|
def local_runtime_values ( self ) :
"""Tries to find all runtime values of this function which do not come from inputs .
These values are generated by starting from a blank state and reanalyzing the basic blocks once each .
Function calls are skipped , and back edges are never taken so these values are often unreliable ,
This function is good at finding simple constant addresses which the function will use or calculate .
: return : a set of constants"""
|
constants = set ( )
if not self . _project . loader . main_object . contains_addr ( self . addr ) :
return constants
# FIXME the old way was better for architectures like mips , but we need the initial irsb
# reanalyze function with a new initial state ( use persistent registers )
# initial _ state = self . _ function _ manager . _ cfg . get _ any _ irsb ( self . addr ) . initial _ state
# fresh _ state = self . _ project . factory . blank _ state ( mode = " fastpath " )
# for reg in initial _ state . arch . persistent _ regs + [ ' ip ' ] :
# fresh _ state . registers . store ( reg , initial _ state . registers . load ( reg ) )
# reanalyze function with a new initial state
fresh_state = self . _project . factory . blank_state ( mode = "fastpath" )
fresh_state . regs . ip = self . addr
graph_addrs = set ( x . addr for x in self . graph . nodes ( ) if isinstance ( x , BlockNode ) )
# process the nodes in a breadth - first order keeping track of which nodes have already been analyzed
analyzed = set ( )
q = [ fresh_state ]
analyzed . add ( fresh_state . solver . eval ( fresh_state . ip ) )
while len ( q ) > 0 :
state = q . pop ( )
# make sure its in this function
if state . solver . eval ( state . ip ) not in graph_addrs :
continue
# don ' t trace into simprocedures
if self . _project . is_hooked ( state . solver . eval ( state . ip ) ) :
continue
# don ' t trace outside of the binary
if not self . _project . loader . main_object . contains_addr ( state . solver . eval ( state . ip ) ) :
continue
# don ' t trace unreachable blocks
if state . history . jumpkind in { 'Ijk_EmWarn' , 'Ijk_NoDecode' , 'Ijk_MapFail' , 'Ijk_NoRedir' , 'Ijk_SigTRAP' , 'Ijk_SigSEGV' , 'Ijk_ClientReq' } :
continue
curr_ip = state . solver . eval ( state . ip )
# get runtime values from logs of successors
successors = self . _project . factory . successors ( state )
for succ in successors . flat_successors + successors . unsat_successors :
for a in succ . history . recent_actions :
for ao in a . all_objects :
if not isinstance ( ao . ast , claripy . ast . Base ) :
constants . add ( ao . ast )
elif not ao . ast . symbolic :
constants . add ( succ . solver . eval ( ao . ast ) )
# add successors to the queue to analyze
if not succ . solver . symbolic ( succ . ip ) :
succ_ip = succ . solver . eval ( succ . ip )
if succ_ip in self and succ_ip not in analyzed :
analyzed . add ( succ_ip )
q . insert ( 0 , succ )
# force jumps to missing successors
# ( this is a slightly hacky way to force it to explore all the nodes in the function )
node = self . get_node ( curr_ip )
if node is None : # the node does not exist . maybe it ' s not a block node .
continue
missing = set ( x . addr for x in list ( self . graph . successors ( node ) ) ) - analyzed
for succ_addr in missing :
l . info ( "Forcing jump to missing successor: %#x" , succ_addr )
if succ_addr not in analyzed :
all_successors = successors . unconstrained_successors + successors . flat_successors + successors . unsat_successors
if len ( all_successors ) > 0 : # set the ip of a copied successor to the successor address
succ = all_successors [ 0 ] . copy ( )
succ . ip = succ_addr
analyzed . add ( succ_addr )
q . insert ( 0 , succ )
else :
l . warning ( "Could not reach successor: %#x" , succ_addr )
return constants
|
def untrust ( self , scope , vk ) :
"""Stop trusting a particular key for given scope ."""
|
self . data [ 'verifiers' ] . remove ( { 'scope' : scope , 'vk' : vk } )
return self
|
def gantry_axes ( cls ) -> Tuple [ 'Axis' , 'Axis' , 'Axis' , 'Axis' ] :
"""The axes which are tied to the gantry and require the deck
calibration transform"""
|
return ( cls . X , cls . Y , cls . Z , cls . A )
|
def draw_shapes_svg_layer ( df_shapes , shape_i_columns , layer_name , layer_number = 1 , use_svg_path = True ) :
'''Draw shapes as a layer in a SVG file .
Args :
df _ shapes ( pandas . DataFrame ) : Table of shape vertices ( one row per
vertex ) .
shape _ i _ columns ( str or list ) : Either a single column name as a string
or a list of column names in ` ` df _ shapes ` ` . Rows in ` ` df _ shapes ` `
with the same value in the ` ` shape _ i _ columns ` ` column ( s ) are
grouped together as a shape .
layer _ name ( str ) : Name of Inkscape layer .
layer _ number ( int , optional ) : Z - order index of Inkscape layer .
use _ svg _ path ( bool , optional ) : If ` ` True ` ` , electrodes are drawn as
` ` svg : path ` ` elements . Otherwise , electrodes are drawn as
` ` svg : polygon ` ` elements .
Returns
StringIO . StringIO
A file - like object containing SVG XML source .
The XML contains a layer named according to : data : ` layer _ name ` , which
in turn contains ` ` svg : polygon ` ` or ` ` svg : path ` ` elements corresponding
to the shapes in the input : data : ` df _ shapes ` table .'''
|
# Note that ` svgwrite . Drawing ` requires a filepath to be specified during
# construction , * but * nothing is actually written to the path unless one of
# the ` save * ` methods is called .
# In this function , we do * not * call any of the ` save * ` methods . Instead ,
# we use the ` write ` method to write to an in - memory file - like object .
minx , miny = df_shapes [ [ 'x' , 'y' ] ] . min ( ) . values
maxx , maxy = df_shapes [ [ 'x' , 'y' ] ] . max ( ) . values
width = maxx - minx
height = maxy - miny
dwg = svgwrite . Drawing ( 'should_not_exist.svg' , size = ( width , height ) , debug = False )
nsmap = INKSCAPE_NSMAP
dwg . attribs [ 'xmlns:inkscape' ] = nsmap [ 'inkscape' ]
svg_root = dwg . g ( id = 'layer%d' % layer_number , ** { 'inkscape:label' : layer_name , 'inkscape:groupmode' : 'layer' } )
minx , miny = df_shapes [ [ 'x' , 'y' ] ] . min ( ) . values
for shape_i , df_shape_i in df_shapes . groupby ( shape_i_columns ) :
attr_columns = [ c for c in df_shape_i . columns if c not in ( 'vertex_i' , 'x' , 'y' ) ]
attrs = df_shape_i . iloc [ 0 ] [ attr_columns ] . to_dict ( )
vertices = df_shape_i [ [ 'x' , 'y' ] ] . values . tolist ( )
if not use_svg_path : # Draw electrode shape as an ` svg : polygon ` element .
p = Polygon ( vertices , debug = False , ** attrs )
else : # Draw electrode shape as an ` svg : path ` element .
commands = [ 'M %s,%s' % tuple ( vertices [ 0 ] ) ]
commands += [ 'L %s,%s' % tuple ( v ) for v in vertices [ 1 : ] ]
while vertices [ 0 ] == vertices [ - 1 ] : # Start is equal to end of path , but we will use the ` ' Z ' `
# command to close the path , so delete the last point in the
# path .
del vertices [ - 1 ]
commands += [ 'Z' ]
p = Path_ ( d = ' ' . join ( commands ) , debug = False , ** attrs )
svg_root . add ( p )
dwg . add ( svg_root )
# Write result to ` StringIO ` .
output = StringIO . StringIO ( )
dwg . write ( output )
output . seek ( 0 )
return output
|
def _draw_cursor ( self , dc , grid , row , col , pen = None , brush = None ) :
"""Draws cursor as Rectangle in lower right corner"""
|
# If in full screen mode draw no cursor
if grid . main_window . IsFullScreen ( ) :
return
key = row , col , grid . current_table
rect = grid . CellToRect ( row , col )
rect = self . get_merged_rect ( grid , key , rect )
# Check if cell is invisible
if rect is None :
return
size = self . get_zoomed_size ( 1.0 )
caret_length = int ( min ( [ rect . width , rect . height ] ) / 5.0 )
color = get_color ( config [ "text_color" ] )
if pen is None :
pen = wx . Pen ( color )
if brush is None :
brush = wx . Brush ( color )
pen . SetWidth ( size )
# Inner right and lower borders
border_left = rect . x + size - 1
border_right = rect . x + rect . width - size - 1
border_upper = rect . y + size - 1
border_lower = rect . y + rect . height - size - 1
points_lr = [ ( border_right , border_lower - caret_length ) , ( border_right , border_lower ) , ( border_right - caret_length , border_lower ) , ( border_right , border_lower ) , ]
points_ur = [ ( border_right , border_upper + caret_length ) , ( border_right , border_upper ) , ( border_right - caret_length , border_upper ) , ( border_right , border_upper ) , ]
points_ul = [ ( border_left , border_upper + caret_length ) , ( border_left , border_upper ) , ( border_left + caret_length , border_upper ) , ( border_left , border_upper ) , ]
points_ll = [ ( border_left , border_lower - caret_length ) , ( border_left , border_lower ) , ( border_left + caret_length , border_lower ) , ( border_left , border_lower ) , ]
point_list = [ points_lr , points_ur , points_ul , points_ll ]
dc . DrawPolygonList ( point_list , pens = pen , brushes = brush )
self . old_cursor_row_col = row , col
|
def templates ( self ) :
"""Property for accessing : class : ` TemplateManager ` instance , which is used to manage templates .
: rtype : yagocd . resources . template . TemplateManager"""
|
if self . _template_manager is None :
self . _template_manager = TemplateManager ( session = self . _session )
return self . _template_manager
|
def load_config ( config , expand_env = False , force = False ) :
"""Return repos from a directory and fnmatch . Not recursive .
: param config : paths to config file
: type config : str
: param expand _ env : True to expand environment varialbes in the config .
: type expand _ env : bool
: param bool force : True to aggregate even if repo is dirty .
: returns : expanded config dict item
: rtype : iter ( dict )"""
|
if not os . path . exists ( config ) :
raise ConfigException ( 'Unable to find configuration file: %s' % config )
file_extension = os . path . splitext ( config ) [ 1 ] [ 1 : ]
conf = kaptan . Kaptan ( handler = kaptan . HANDLER_EXT . get ( file_extension ) )
if expand_env :
with open ( config , 'r' ) as file_handler :
config = Template ( file_handler . read ( ) )
config = config . substitute ( os . environ )
conf . import_config ( config )
return get_repos ( conf . export ( 'dict' ) or { } , force )
|
def setPalette ( self , palette ) :
"""Sets the palette for this node to the inputed palette . If None is
provided , then the scene ' s palette will be used for this node .
: param palette | < XNodePalette > | | None"""
|
self . _palette = XNodePalette ( palette ) if palette is not None else None
self . setDirty ( )
|
def get_args ( self ) :
"""Gets the args for the query which will be escaped when being executed by the
db . All inner queries are inspected and their args are combined with this
query ' s args .
: return : all args for this query as a dict
: rtype : dict"""
|
for table in self . tables + self . with_tables :
if type ( table ) is QueryTable :
self . _where . args . update ( table . query . get_args ( ) )
return self . _where . args
|
def get_fcps_emerg ( request ) :
"""Return FCPS emergency information ."""
|
try :
emerg = get_emerg ( )
except Exception :
logger . info ( "Unable to fetch FCPS emergency info" )
emerg = { "status" : False }
if emerg [ "status" ] or ( "show_emerg" in request . GET ) :
msg = emerg [ "message" ]
return "{} <span style='display: block;text-align: right'>— FCPS</span>" . format ( msg )
return False
|
def write_corrected ( self , output , clobber = False ) :
"""Write out the destriped data ."""
|
# un - apply the flatfield if necessary
if self . flatcorr != 'COMPLETE' :
self . science = self . science / self . invflat
self . err = self . err / self . invflat
# un - apply the post - flash if necessary
if self . flshcorr != 'COMPLETE' :
self . science = self . science + self . flash
# un - apply the dark if necessary
if self . darkcorr != 'COMPLETE' :
self . science = self . science + self . dark
# reverse the amp merge
if ( self . ampstring == 'ABCD' ) :
tmp_1 , tmp_2 = np . split ( self . science , 2 , axis = 1 )
self . hdulist [ 'sci' , 1 ] . data = tmp_1 . copy ( )
self . hdulist [ 'sci' , 2 ] . data = tmp_2 [ : : - 1 , : ] . copy ( )
tmp_1 , tmp_2 = np . split ( self . err , 2 , axis = 1 )
self . hdulist [ 'err' , 1 ] . data = tmp_1 . copy ( )
self . hdulist [ 'err' , 2 ] . data = tmp_2 [ : : - 1 , : ] . copy ( )
else :
self . hdulist [ 'sci' , 1 ] . data = self . science . copy ( )
self . hdulist [ 'err' , 1 ] . data = self . err . copy ( )
# Write the output
self . hdulist . writeto ( output , overwrite = clobber )
|
def debug_string ( self ) :
"""This provides a progress notification for the algorithm .
For each bracket , the algorithm will output a string as follows :
Bracket ( Max Size ( n ) = 5 , Milestone ( r ) = 33 , completed = 14.6 % ) :
{ PENDING : 2 , RUNNING : 3 , TERMINATED : 2}
" Max Size " indicates the max number of pending / running experiments
set according to the Hyperband algorithm .
" Milestone " indicates the iterations a trial will run for before
the next halving will occur .
" Completed " indicates an approximate progress metric . Some brackets ,
like ones that are unfilled , will not reach 100 % ."""
|
out = "Using HyperBand: "
out += "num_stopped={} total_brackets={}" . format ( self . _num_stopped , sum ( len ( band ) for band in self . _hyperbands ) )
for i , band in enumerate ( self . _hyperbands ) :
out += "\nRound #{}:" . format ( i )
for bracket in band :
out += "\n {}" . format ( bracket )
return out
|
def args_options ( ) :
"""Generates an arugment parser .
: returns :
Parser object"""
|
parser = argparse . ArgumentParser ( prog = 'landsat' , formatter_class = argparse . RawDescriptionHelpFormatter , description = textwrap . dedent ( DESCRIPTION ) )
subparsers = parser . add_subparsers ( help = 'Landsat Utility' , dest = 'subs' )
parser . add_argument ( '--version' , action = 'version' , version = '%(prog)s version ' + __version__ )
# Search Logic
parser_search = subparsers . add_parser ( 'search' , help = 'Search Landsat metadata' )
# Global search options
parser_search . add_argument ( '-l' , '--limit' , default = 10 , type = int , help = 'Search return results limit\n' 'default is 10' )
parser_search . add_argument ( '-s' , '--start' , help = 'Start Date - Most formats are accepted ' 'e.g. Jun 12 2014 OR 06/12/2014' )
parser_search . add_argument ( '-e' , '--end' , help = 'End Date - Most formats are accepted ' 'e.g. Jun 12 2014 OR 06/12/2014' )
parser_search . add_argument ( '--latest' , default = - 1 , type = int , help = 'returns the N latest images within the last 365 days' )
parser_search . add_argument ( '-c' , '--cloud' , type = float , default = 100.0 , help = 'Maximum cloud percentage ' 'default is 100 perct' )
parser_search . add_argument ( '-p' , '--pathrow' , help = 'Paths and Rows in order separated by comma. Use quotes ("001").' 'Example: path,row,path,row 001,001,190,204' )
parser_search . add_argument ( '--lat' , type = float , help = 'The latitude' )
parser_search . add_argument ( '--lon' , type = float , help = 'The longitude' )
parser_search . add_argument ( '--address' , type = str , help = 'The address' )
parser_search . add_argument ( '--json' , action = 'store_true' , help = 'Returns a bare JSON response' )
parser_search . add_argument ( '--geojson' , action = 'store_true' , help = 'Returns a geojson response' )
parser_download = subparsers . add_parser ( 'download' , help = 'Download images from Google Storage' )
parser_download . add_argument ( 'scenes' , metavar = 'sceneID' , nargs = "+" , help = "Provide Full sceneID, e.g. LC81660392014196LGN00" )
parser_download . add_argument ( '-b' , '--bands' , help = 'If you specify bands, landsat-util will try to download ' 'the band from S3. If the band does not exist, an error is returned' , default = None )
parser_download . add_argument ( '-d' , '--dest' , help = 'Destination path' )
parser_download . add_argument ( '-p' , '--process' , help = 'Process the image after download' , action = 'store_true' )
parser_download . add_argument ( '--pansharpen' , action = 'store_true' , help = 'Whether to also pansharpen the process ' 'image. Pansharpening requires larger memory' )
parser_download . add_argument ( '--ndvi' , action = 'store_true' , help = 'Whether to run the NDVI process. If used, bands parameter is disregarded' )
parser_download . add_argument ( '--ndvigrey' , action = 'store_true' , help = 'Create an NDVI map in grayscale (grey)' )
parser_download . add_argument ( '--clip' , help = 'Clip the image with the bounding box provided. Values must be in ' + 'WGS84 datum, and with longitude and latitude units of decimal degrees ' + 'separated by comma.' + 'Example: --clip=-346.06658935546875,49.93531194616915,-345.4595947265625,' + '50.2682767372753' )
parser_download . add_argument ( '-u' , '--upload' , action = 'store_true' , help = 'Upload to S3 after the image processing completed' )
parser_download . add_argument ( '--username' , help = 'USGS Eros account Username (only works if the account has' + ' special inventory access). Username and password as a fallback if the image' + 'is not found on AWS S3 or Google Storage' )
parser_download . add_argument ( '--password' , help = 'USGS Eros username, used as a fallback' )
parser_download . add_argument ( '--key' , help = 'Amazon S3 Access Key (You can also be set AWS_ACCESS_KEY_ID as ' 'Environment Variables)' )
parser_download . add_argument ( '--secret' , help = 'Amazon S3 Secret Key (You can also be set AWS_SECRET_ACCESS_KEY ' 'as Environment Variables)' )
parser_download . add_argument ( '--bucket' , help = 'Bucket name (required if uploading to s3)' )
parser_download . add_argument ( '--region' , help = 'URL to S3 region e.g. s3-us-west-2.amazonaws.com' )
parser_download . add_argument ( '--force-unzip' , help = 'Force unzip tar file' , action = 'store_true' )
parser_process = subparsers . add_parser ( 'process' , help = 'Process Landsat imagery' )
parser_process . add_argument ( 'path' , help = 'Path to the compressed image file' )
parser_process . add_argument ( '--pansharpen' , action = 'store_true' , help = 'Whether to also pansharpen the process ' 'image. Pansharpening requires larger memory' )
parser_process . add_argument ( '--ndvi' , action = 'store_true' , help = 'Create an NDVI map in color.' )
parser_process . add_argument ( '--ndvigrey' , action = 'store_true' , help = 'Create an NDVI map in grayscale (grey)' )
parser_process . add_argument ( '--clip' , help = 'Clip the image with the bounding box provided. Values must be in ' + 'WGS84 datum, and with longitude and latitude units of decimal degrees ' + 'separated by comma.' + 'Example: --clip=-346.06658935546875,49.93531194616915,-345.4595947265625,' + '50.2682767372753' )
parser_process . add_argument ( '-b' , '--bands' , help = 'specify band combinations. Default is 432' 'Example: --bands 321' , default = '432' )
parser_process . add_argument ( '-v' , '--verbose' , action = 'store_true' , help = 'Turn on verbosity' )
parser_process . add_argument ( '-u' , '--upload' , action = 'store_true' , help = 'Upload to S3 after the image processing completed' )
parser_process . add_argument ( '--key' , help = 'Amazon S3 Access Key (You can also be set AWS_ACCESS_KEY_ID as ' 'Environment Variables)' )
parser_process . add_argument ( '--secret' , help = 'Amazon S3 Secret Key (You can also be set AWS_SECRET_ACCESS_KEY ' 'as Environment Variables)' )
parser_process . add_argument ( '--bucket' , help = 'Bucket name (required if uploading to s3)' )
parser_process . add_argument ( '--region' , help = 'URL to S3 region e.g. s3-us-west-2.amazonaws.com' )
parser_process . add_argument ( '--force-unzip' , help = 'Force unzip tar file' , action = 'store_true' )
return parser
|
def main ( mode ) :
"""Tags the current repository
and commits changes to news files
: param mode : ReleaseTarget mode ( i . e . beta or prod )
: type mode : ReleaseTarget"""
|
# see :
# https : / / packaging . python . org / tutorials / distributing - packages / # uploading - your - project - to - pypi
version = subprocess . check_output ( [ 'python' , 'setup.py' , '--version' ] ) . decode ( ) . strip ( )
twine_repo = os . getenv ( 'TWINE_REPOSITORY_URL' ) or os . getenv ( 'TWINE_REPOSITORY' )
print ( 'tagging and releasing %r as a %s release to %s as Twine user %s' % ( version , mode . name , twine_repo , os . getenv ( 'TWINE_USERNAME' ) ) )
print ( 'python - preparing environment' )
subprocess . check_call ( [ 'apk' , 'update' ] )
subprocess . check_call ( [ 'apk' , 'add' , 'git' ] )
# Pin twine to 1.11 due to https : / / github . com / pypa / twine / issues / 404
subprocess . check_call ( [ 'pip' , 'install' , 'twine<1.12' ] )
url = subprocess . check_output ( [ 'git' , 'remote' , 'get-url' , 'origin' ] )
new_url = git_url_ssh_to_https ( url . decode ( ) )
subprocess . check_call ( [ 'git' , 'remote' , 'set-url' , 'origin' , new_url ] )
branch_spec = 'origin/%s' % os . getenv ( 'CIRCLE_BRANCH' )
subprocess . check_call ( [ 'git' , 'branch' , '--set-upstream-to' , branch_spec ] )
subprocess . check_call ( [ 'git' , 'fetch' , '--tags' , '--force' ] )
# tags
subprocess . check_call ( [ 'git' , 'tag' , '-a' , version , '-m' , 'release %s' % version ] )
subprocess . check_call ( [ 'git' , 'tag' , '-f' , 'latest' ] )
if mode == release_target_map [ 'prod' ] :
print ( 'git - pushing %s tags' % mode . name )
subprocess . check_call ( [ 'git' , 'push' , '-f' , 'origin' , '--tags' ] )
print ( 'git - add changes' )
subprocess . check_call ( [ 'git' , 'add' , 'src/mbed_cloud/_version.py' ] )
subprocess . check_call ( [ 'git' , 'add' , 'CHANGELOG.rst' ] )
subprocess . check_call ( [ 'git' , 'add' , 'docs/news/*' ] )
message = ':checkered_flag: :newspaper: releasing version %s\n[skip ci]' % version
subprocess . check_call ( [ 'git' , 'commit' , '-m' , message ] )
if mode == release_target_map [ 'prod' ] :
print ( 'git - pushing %s changelog commit' % mode . name )
subprocess . check_call ( [ 'git' , 'push' , 'origin' ] )
print ( 'pypi - uploading' )
subprocess . check_call ( [ 'python' , '-m' , 'twine' , 'upload' , mode . bundle ] )
print ( 'pypi - uploading successful' )
|
def get_image_id ( self , namespace , respository , tag ) :
"""GET / v1 / repositories / ( namespace ) / ( repository ) / tags / ( tag * )"""
|
return self . _http_call ( self . TAGS + '/' + tag , get , namespace = namespace , repository = respository )
|
def validate_template_syntax ( source ) :
"""Basic Django Template syntax validation . This allows for robuster template
authoring ."""
|
try :
Template ( source )
except ( TemplateSyntaxError , TemplateDoesNotExist ) as err :
raise ValidationError ( text_type ( err ) )
|
def echo_class2 ( self , catstr = '' ) :
'''弹出的二级发布菜单'''
|
fatherid = catstr [ 1 : ]
self . write ( self . format_class2 ( fatherid ) )
|
def min_depth_img ( self , num_img = 1 ) :
"""Collect a series of depth images and return the min of the set .
Parameters
num _ img : int
The number of consecutive frames to process .
Returns
: obj : ` DepthImage `
The min DepthImage collected from the frames ."""
|
depths = self . _read_depth_images ( num_img )
return Image . min_images ( depths )
|
def getPose3d ( self ) :
'''Returns last Pose3d .
@ return last JdeRobotTypes Pose3d saved'''
|
self . lock . acquire ( )
pose = self . pose
self . lock . release ( )
return pose
|
def update ( collection_name , upsert , multi , spec , doc , safe , last_error_args , check_keys , opts , ctx = None ) :
"""Get an * * update * * message ."""
|
if ctx :
return _update_compressed ( collection_name , upsert , multi , spec , doc , check_keys , opts , ctx )
return _update_uncompressed ( collection_name , upsert , multi , spec , doc , safe , last_error_args , check_keys , opts )
|
def cli_parse ( parser ) :
"""Add method specific options to CLI parser .
Parameters
parser : argparse object
Returns
Updated argparse object"""
|
parser . add_argument ( '-n' , '--samples' , type = int , required = True , help = 'Number of Samples' )
return parser
|
def walk_dir_progress ( path , maxdircnt = 5000 , file = sys . stdout ) :
"""Walk a directory , printing status updates along the way ."""
|
p = ProgressBar ( 'Walking {}' . format ( C ( path , 'cyan' ) ) , bars = Bars . numbers_blue . with_wrapper ( ( '(' , ')' ) ) , show_time = True , file = file , )
rootcnt = 0
print ( '\nStarting progress bar...' )
p . start ( )
for root , dirs , files in os . walk ( path ) :
rootcnt += 1
if rootcnt % 100 == 0 :
p . update ( percent = min ( ( rootcnt / maxdircnt ) * 100 , 100 ) , text = 'Walking {}...' . format ( C ( os . path . split ( root ) [ - 1 ] , 'cyan' ) , ) )
if rootcnt > maxdircnt : # Stop is called because we are printing before the
# AnimatedProgress is finished running .
p . stop ( )
print ( '\nFinished walking {} directories.' . format ( C ( maxdircnt , 'blue' , style = 'bright' ) ) , file = file , )
break
else : # AnimatedProgress still running , ` stop ` it before printing .
p . stop ( )
print_err ( '\nNever made it to {} directories ({}).' . format ( C ( maxdircnt , 'blue' , style = 'bright' ) , C ( rootcnt , 'red' , style = 'bright' ) , ) )
print ( '\nFinished with progress bar.' )
return 0
|
def losing_name ( self ) :
"""Returns a ` ` string ` ` of the losing team ' s name , such as ' Los Angeles
Dodgers ' ."""
|
if self . winner == HOME :
return self . _away_name . text ( )
return self . _home_name . text ( )
|
def compute_master_secret ( self , pre_master_secret , client_random , server_random ) :
"""Return the 48 - byte master _ secret , computed from pre _ master _ secret ,
client _ random and server _ random . See RFC 5246 , section 6.3."""
|
seed = client_random + server_random
if self . tls_version < 0x0300 :
return None
elif self . tls_version == 0x0300 :
return self . prf ( pre_master_secret , seed , 48 )
else :
return self . prf ( pre_master_secret , b"master secret" , seed , 48 )
|
def send_file ( self , path , contents , shutit_pexpect_child = None , truncate = False , note = None , user = None , echo = False , group = None , loglevel = logging . INFO , encoding = None ) :
"""Sends the passed - in string as a file to the passed - in path on the
target .
@ param path : Target location of file on target .
@ param contents : Contents of file as a string .
@ param shutit _ pexpect _ child : See send ( )
@ param note : See send ( )
@ param user : Set ownership to this user ( defaults to whoami )
@ param group : Set group to this user ( defaults to first group in groups )
@ type path : string
@ type contents : string"""
|
shutit_global . shutit_global_object . yield_to_draw ( )
shutit_pexpect_child = shutit_pexpect_child or self . get_current_shutit_pexpect_session ( ) . pexpect_child
shutit_pexpect_session = self . get_shutit_pexpect_session_from_child ( shutit_pexpect_child )
return shutit_pexpect_session . send_file ( path , contents , truncate = truncate , note = note , echo = echo , user = user , group = group , loglevel = loglevel , encoding = encoding )
|
def effective_value ( self ) :
"""Read / write | float | representing normalized adjustment value for this
adjustment . Actual values are a large - ish integer expressed in shape
coordinates , nominally between 0 and 100,000 . The effective value is
normalized to a corresponding value nominally between 0.0 and 1.0.
Intuitively this represents the proportion of the width or height of
the shape at which the adjustment value is located from its starting
point . For simple shapes such as a rounded rectangle , this intuitive
correspondence holds . For more complicated shapes and at more extreme
shape proportions ( e . g . width is much greater than height ) , the value
can become negative or greater than 1.0."""
|
raw_value = self . actual
if raw_value is None :
raw_value = self . def_val
return self . _normalize ( raw_value )
|
def _ows_check_if_paused ( services = None , ports = None ) :
"""Check if the unit is supposed to be paused , and if so check that the
services / ports ( if passed ) are actually stopped / not being listened to .
If the unit isn ' t supposed to be paused , just return None , None
If the unit is performing a series upgrade , return a message indicating
this .
@ param services : OPTIONAL services spec or list of service names .
@ param ports : OPTIONAL list of port numbers .
@ returns state , message or None , None"""
|
if is_unit_upgrading_set ( ) :
state , message = check_actually_paused ( services = services , ports = ports )
if state is None : # we ' re paused okay , so set maintenance and return
state = "blocked"
message = ( "Ready for do-release-upgrade and reboot. " "Set complete when finished." )
return state , message
if is_unit_paused_set ( ) :
state , message = check_actually_paused ( services = services , ports = ports )
if state is None : # we ' re paused okay , so set maintenance and return
state = "maintenance"
message = "Paused. Use 'resume' action to resume normal service."
return state , message
return None , None
|
def create_from_hdu ( cls , hdu , ebins ) :
"""Creates and returns an HpxMap object from a FITS HDU .
hdu : The FITS
ebins : Energy bin edges [ optional ]"""
|
hpx = HPX . create_from_hdu ( hdu , ebins )
colnames = hdu . columns . names
cnames = [ ]
if hpx . conv . convname == 'FGST_SRCMAP_SPARSE' :
pixs = hdu . data . field ( 'PIX' )
chans = hdu . data . field ( 'CHANNEL' )
keys = chans * hpx . npix + pixs
vals = hdu . data . field ( 'VALUE' )
nebin = len ( ebins )
data = np . zeros ( ( nebin , hpx . npix ) )
data . flat [ keys ] = vals
else :
for c in colnames :
if c . find ( hpx . conv . colstring ) == 0 :
cnames . append ( c )
nebin = len ( cnames )
data = np . ndarray ( ( nebin , hpx . npix ) )
for i , cname in enumerate ( cnames ) :
data [ i , 0 : ] = hdu . data . field ( cname )
return cls ( data , hpx )
|
def _highlight_caret_scope ( self ) :
"""Highlight the scope of the current caret position .
This get called only if : attr : `
spyder . widgets . panels . FoldingPanel . highlight _ care _ scope ` is True ."""
|
cursor = self . editor . textCursor ( )
block_nbr = cursor . blockNumber ( )
if self . _block_nbr != block_nbr :
block = FoldScope . find_parent_scope ( self . editor . textCursor ( ) . block ( ) )
try :
s = FoldScope ( block )
except ValueError :
self . _clear_scope_decos ( )
else :
self . _mouse_over_line = block . blockNumber ( )
if TextBlockHelper . is_fold_trigger ( block ) :
self . _highlight_block ( block )
self . _block_nbr = block_nbr
|
def add_categories ( self , new_categories , inplace = False ) :
"""Add new categories .
` new _ categories ` will be included at the last / highest place in the
categories and will be unused directly after this call .
Parameters
new _ categories : category or list - like of category
The new categories to be included .
inplace : bool , default False
Whether or not to add the categories inplace or return a copy of
this categorical with added categories .
Returns
cat : Categorical with new categories added or None if inplace .
Raises
ValueError
If the new categories include old categories or do not validate as
categories
See Also
rename _ categories
reorder _ categories
remove _ categories
remove _ unused _ categories
set _ categories"""
|
inplace = validate_bool_kwarg ( inplace , 'inplace' )
if not is_list_like ( new_categories ) :
new_categories = [ new_categories ]
already_included = set ( new_categories ) & set ( self . dtype . categories )
if len ( already_included ) != 0 :
msg = ( "new categories must not include old categories: " "{already_included!s}" )
raise ValueError ( msg . format ( already_included = already_included ) )
new_categories = list ( self . dtype . categories ) + list ( new_categories )
new_dtype = CategoricalDtype ( new_categories , self . ordered )
cat = self if inplace else self . copy ( )
cat . _dtype = new_dtype
cat . _codes = coerce_indexer_dtype ( cat . _codes , new_dtype . categories )
if not inplace :
return cat
|
def remove_all_logger_handlers ( logger : logging . Logger ) -> None :
"""Remove all handlers from a logger .
Args :
logger : logger to modify"""
|
while logger . handlers :
h = logger . handlers [ 0 ]
logger . removeHandler ( h )
|
def item_count ( self , request , variant_id = None ) :
"""Get quantity of a single item in the basket"""
|
bid = utils . basket_id ( request )
item = ProductVariant . objects . get ( id = variant_id )
try :
count = BasketItem . objects . get ( basket_id = bid , variant = item ) . quantity
except BasketItem . DoesNotExist :
count = 0
return Response ( data = { "quantity" : count } , status = status . HTTP_200_OK )
|
def login ( self ) :
"""Login to verisure app api
Login before calling any read or write commands"""
|
if os . path . exists ( self . _cookieFileName ) :
with open ( self . _cookieFileName , 'r' ) as cookieFile :
self . _vid = cookieFile . read ( ) . strip ( )
try :
self . _get_installations ( )
except ResponseError :
self . _vid = None
os . remove ( self . _cookieFileName )
if self . _vid is None :
self . _create_cookie ( )
with open ( self . _cookieFileName , 'w' ) as cookieFile :
cookieFile . write ( self . _vid )
self . _get_installations ( )
self . _giid = self . installations [ 0 ] [ 'giid' ]
|
def ReadRemoteFile ( remote_file_path , hostname , ssh_key ) :
"""Reads a remote file into a string ."""
|
cmd = 'sudo cat %s' % remote_file_path
exit_code , output = RunCommandOnHost ( cmd , hostname , ssh_key )
if exit_code :
raise IOError ( 'Can not read remote path: %s' % ( remote_file_path ) )
return output
|
def _push_condition ( predicate ) :
"""As we enter new conditions , this pushes them on the predicate stack ."""
|
global _depth
_check_under_condition ( )
_depth += 1
if predicate is not otherwise and len ( predicate ) > 1 :
raise PyrtlError ( 'all predicates for conditional assignments must wirevectors of len 1' )
_conditions_list_stack [ - 1 ] . append ( predicate )
_conditions_list_stack . append ( [ ] )
|
def disable_dataset ( self , dataset = None , ** kwargs ) :
"""Disable a ' dataset ' . Datasets that are enabled will be computed
during : meth : ` run _ compute ` and included in the cost function
during : meth : ` run _ fitting ` .
If compute is not provided , the dataset will be disabled across all
compute options .
: parameter str dataset : name of the dataset
: parameter * * kwargs : any other tags to do the filter
( except dataset or context )
: return : : class : ` phoebe . parameters . parameters . ParameterSet `
of the disabled dataset"""
|
kwargs [ 'context' ] = 'compute'
kwargs [ 'dataset' ] = dataset
kwargs [ 'qualifier' ] = 'enabled'
self . set_value_all ( value = False , ** kwargs )
self . _add_history ( redo_func = 'disable_dataset' , redo_kwargs = { 'dataset' : dataset } , undo_func = 'enable_dataset' , undo_kwargs = { 'dataset' : dataset } )
return self . get_dataset ( dataset = dataset )
|
def wngram2idngram ( input_file , vocab_file , output_file , buffersize = 100 , hashtablesize = 2000000 , files = 20 , compress = False , verbosity = 2 , n = 3 , write_ascii = False , fof_size = 10 ) :
"""Takes a word N - gram file and a vocabulary file and lists every id n - gram which occurred in the text , along with its number of occurrences , in either ASCII or binary format .
Note : It is important that the vocabulary file is in alphabetical order . If you are using vocabularies generated by wfreq2vocab then this should not be an issue , as they will already be alphabetically sorted ."""
|
cmd = [ 'wngram2idngram' , '-vocab' , os . path . abspath ( vocab_file ) , '-idngram' , os . path . abspath ( output_file ) ]
if buffersize :
cmd . extend ( [ '-buffer' , buffersize ] )
if hashtablesize :
cmd . extend ( [ '-hash' , hashtablesize ] )
if files :
cmd . extend ( [ '-files' , files ] )
if verbosity :
cmd . extend ( [ '-verbosity' , verbosity ] )
if n :
cmd . extend ( [ '-n' , n ] )
if fof_size :
cmd . extend ( [ '-fof_size' , fof_size ] )
if compress :
cmd . append ( '-compress' )
if write_ascii :
cmd . append ( '-write_ascii' )
# Ensure that every parameter is of type ' str '
cmd = [ str ( x ) for x in cmd ]
with tempfile . SpooledTemporaryFile ( ) as output_f :
with tempfile . SpooledTemporaryFile ( ) as input_f :
input_f . write ( text . encode ( 'utf-8' ) if sys . version_info >= ( 3 , ) and type ( text ) is str else text )
input_f . seek ( 0 )
with output_to_debuglogger ( ) as err_f :
with do_in_tempdir ( ) :
exitcode = subprocess . call ( cmd , stdin = input_f , stdout = output_f , stderr = err_f )
output = output_f . read ( )
logger = logging . getLogger ( __name__ )
logger . debug ( "Command '%s' returned with exit code '%d'." % ( ' ' . join ( cmd ) , exitcode ) )
if exitcode != 0 :
raise ConversionError ( "'%r' returned with non-zero exit status '%s'" % ( cmd , exitcode ) )
if sys . version_info >= ( 3 , ) and type ( output ) is bytes :
output = output . decode ( 'utf-8' )
return output . strip ( )
|
def is_not_in ( self , iterable ) :
"""Ensures : attr : ` subject ` is not contained in * iterable * ."""
|
self . _run ( unittest_case . assertNotIn , ( self . _subject , iterable ) )
return ChainInspector ( self . _subject )
|
def bind ( self , func : Callable [ [ Any ] , IO ] ) -> IO :
"""IO a - > ( a - > IO b ) - > IO b"""
|
filename , g = self . _get_value ( )
return ReadFile ( filename , lambda s : g ( s ) . bind ( func ) )
|
def exists ( self ) :
""": type : bool
True when the object actually exists ( and can be accessed by
the current user ) in Fedora"""
|
# If we made the object under the pretext that it doesn ' t exist in
# fedora yet , then assume it doesn ' t exist in fedora yet .
if self . _create :
return False
# If we can get a valid object profile , regardless of its contents ,
# then this object exists . If not , then it doesn ' t .
try :
self . getProfile ( )
return True
except RequestFailed :
return False
|
def should_include_node ( ctx , directives ) : # type : ( ExecutionContext , Optional [ List [ Directive ] ] ) - > bool
"""Determines if a field should be included based on the @ include and
@ skip directives , where @ skip has higher precidence than @ include ."""
|
# TODO : Refactor based on latest code
if directives :
skip_ast = None
for directive in directives :
if directive . name . value == GraphQLSkipDirective . name :
skip_ast = directive
break
if skip_ast :
args = get_argument_values ( GraphQLSkipDirective . args , skip_ast . arguments , ctx . variable_values )
if args . get ( "if" ) is True :
return False
include_ast = None
for directive in directives :
if directive . name . value == GraphQLIncludeDirective . name :
include_ast = directive
break
if include_ast :
args = get_argument_values ( GraphQLIncludeDirective . args , include_ast . arguments , ctx . variable_values )
if args . get ( "if" ) is False :
return False
return True
|
def check_hankel ( ht , htarg , verb ) :
r"""Check Hankel transform parameters .
This check - function is called from one of the modelling routines in
: mod : ` model ` . Consult these modelling routines for a detailed description
of the input parameters .
Parameters
ht : { ' fht ' , ' qwe ' , ' quad ' }
Flag to choose the Hankel transform .
htarg : str or filter from empymod . filters or array _ like ,
Depends on the value for ` ` ht ` ` .
verb : { 0 , 1 , 2 , 3 , 4}
Level of verbosity .
Returns
ht , htarg
Checked if valid and set to defaults if not provided ."""
|
# Ensure ht is all lowercase
ht = ht . lower ( )
if ht == 'fht' : # If FHT , check filter settings
# Get and check input or set defaults
htarg = _check_targ ( htarg , [ 'fhtfilt' , 'pts_per_dec' ] )
# Check filter ; defaults to key _ 201_2009
try :
fhtfilt = htarg [ 'fhtfilt' ]
if not hasattr ( fhtfilt , 'base' ) :
fhtfilt = getattr ( filters , fhtfilt ) ( )
except VariableCatch :
fhtfilt = filters . key_201_2009 ( )
# Check pts _ per _ dec ; defaults to 0
try :
pts_per_dec = _check_var ( htarg [ 'pts_per_dec' ] , float , 0 , 'fht: pts_per_dec' , ( ) )
except VariableCatch :
pts_per_dec = 0.0
# Assemble htarg
htarg = ( fhtfilt , pts_per_dec )
# If verbose , print Hankel transform information
if verb > 2 :
print ( " Hankel : DLF (Fast Hankel Transform)" )
print ( " > Filter : " + fhtfilt . name )
pstr = " > DLF type : "
if pts_per_dec < 0 :
print ( pstr + "Lagged Convolution" )
elif pts_per_dec > 0 :
print ( pstr + "Splined, " + str ( pts_per_dec ) + " pts/dec" )
else :
print ( pstr + "Standard" )
elif ht in [ 'qwe' , 'hqwe' ] : # Rename ht
ht = 'hqwe'
# Get and check input or set defaults
htarg = _check_targ ( htarg , [ 'rtol' , 'atol' , 'nquad' , 'maxint' , 'pts_per_dec' , 'diff_quad' , 'a' , 'b' , 'limit' ] )
# rtol : 1e - 12
try :
rtol = _check_var ( htarg [ 'rtol' ] , float , 0 , 'qwe: rtol' , ( ) )
except VariableCatch :
rtol = np . array ( 1e-12 , dtype = float )
# atol : 1e - 30
try :
atol = _check_var ( htarg [ 'atol' ] , float , 0 , 'qwe: atol' , ( ) )
except VariableCatch :
atol = np . array ( 1e-30 , dtype = float )
# nquad : 51
try :
nquad = _check_var ( htarg [ 'nquad' ] , int , 0 , 'qwe: nquad' , ( ) )
except VariableCatch :
nquad = np . array ( 51 , dtype = int )
# maxint : 100
try :
maxint = _check_var ( htarg [ 'maxint' ] , int , 0 , 'qwe: maxint' , ( ) )
except VariableCatch :
maxint = np . array ( 100 , dtype = int )
# pts _ per _ dec : 0 # No spline
try :
pts_per_dec = _check_var ( htarg [ 'pts_per_dec' ] , int , 0 , 'qwe: pts_per_dec' , ( ) )
pts_per_dec = _check_min ( pts_per_dec , 0 , 'pts_per_dec' , '' , verb )
except VariableCatch :
pts_per_dec = np . array ( 0 , dtype = int )
# diff _ quad : 100
try :
diff_quad = _check_var ( htarg [ 'diff_quad' ] , float , 0 , 'qwe: diff_quad' , ( ) )
except VariableCatch :
diff_quad = np . array ( 100 , dtype = float )
# a : None
try :
a = _check_var ( htarg [ 'a' ] , float , 0 , 'qwe: a (quad)' , ( ) )
except VariableCatch :
a = None
# b : None
try :
b = _check_var ( htarg [ 'b' ] , float , 0 , 'qwe: b (quad)' , ( ) )
except VariableCatch :
b = None
# limit : None
try :
limit = _check_var ( htarg [ 'limit' ] , float , 0 , 'qwe: limit (quad)' , ( ) )
except VariableCatch :
limit = None
# Assemble htarg
htarg = ( rtol , atol , nquad , maxint , pts_per_dec , diff_quad , a , b , limit )
# If verbose , print Hankel transform information
if verb > 2 :
print ( " Hankel : Quadrature-with-Extrapolation" )
print ( " > rtol : " + str ( rtol ) )
print ( " > atol : " + str ( atol ) )
print ( " > nquad : " + str ( nquad ) )
print ( " > maxint : " + str ( maxint ) )
print ( " > pts_per_dec : " + str ( pts_per_dec ) )
print ( " > diff_quad : " + str ( diff_quad ) )
if a :
print ( " > a (quad): " + str ( a ) )
if b :
print ( " > b (quad): " + str ( b ) )
if limit :
print ( " > limit (quad): " + str ( limit ) )
elif ht in [ 'quad' , 'hquad' ] : # Rename ht
ht = 'hquad'
# Get and check input or set defaults
htarg = _check_targ ( htarg , [ 'rtol' , 'atol' , 'limit' , 'a' , 'b' , 'pts_per_dec' ] )
# rtol : 1e - 12
try :
rtol = _check_var ( htarg [ 'rtol' ] , float , 0 , 'quad: rtol' , ( ) )
except VariableCatch :
rtol = np . array ( 1e-12 , dtype = float )
# atol : 1e - 20
try :
atol = _check_var ( htarg [ 'atol' ] , float , 0 , 'quad: atol' , ( ) )
except VariableCatch :
atol = np . array ( 1e-20 , dtype = float )
# limit : 500
try :
limit = _check_var ( htarg [ 'limit' ] , int , 0 , 'quad: limit' , ( ) )
except VariableCatch :
limit = np . array ( 500 , dtype = int )
# a : 1e - 6
try :
a = _check_var ( htarg [ 'a' ] , float , 0 , 'quad: a' , ( ) )
except VariableCatch :
a = np . array ( 1e-6 , dtype = float )
# b : 0.1
try :
b = _check_var ( htarg [ 'b' ] , float , 0 , 'quad: b' , ( ) )
except VariableCatch :
b = np . array ( 0.1 , dtype = float )
# pts _ per _ dec : 40
try :
pts_per_dec = _check_var ( htarg [ 'pts_per_dec' ] , int , 0 , 'quad: pts_per_dec' , ( ) )
pts_per_dec = _check_min ( pts_per_dec , 1 , 'pts_per_dec' , '' , verb )
except VariableCatch :
pts_per_dec = np . array ( 40 , dtype = int )
# Assemble htarg
htarg = ( rtol , atol , limit , a , b , pts_per_dec )
# If verbose , print Hankel transform information
if verb > 2 :
print ( " Hankel : Quadrature" )
print ( " > rtol : " + str ( rtol ) )
print ( " > atol : " + str ( atol ) )
print ( " > limit : " + str ( limit ) )
print ( " > a : " + str ( a ) )
print ( " > b : " + str ( b ) )
print ( " > pts_per_dec : " + str ( pts_per_dec ) )
else :
print ( "* ERROR :: <ht> must be one of: ['fht', 'qwe', 'quad'];" + " <ht> provided: " + str ( ht ) )
raise ValueError ( 'ht' )
return ht , htarg
|
def fix_pdb ( self ) :
'''A function to fix fatal errors in PDB files when they can be automatically fixed . At present , this only runs if
self . strict is False . We may want a separate property for this since we may want to keep strict mode but still
allow PDBs to be fixed .
The only fixes at the moment are for missing chain IDs which get filled in with a valid PDB ID , if possible .'''
|
if self . strict :
return
# Get the list of chains
chains = set ( )
for l in self . lines :
if l . startswith ( 'ATOM ' ) or l . startswith ( 'HETATM' ) :
chains . add ( l [ 21 ] )
# If there is a chain with a blank ID , change that ID to a valid unused ID
if ' ' in chains :
fresh_id = None
allowed_chain_ids = list ( string . uppercase ) + list ( string . lowercase ) + map ( str , range ( 10 ) )
for c in chains :
try :
allowed_chain_ids . remove ( c )
except :
pass
if allowed_chain_ids :
fresh_id = allowed_chain_ids [ 0 ]
# Rewrite the lines
new_lines = [ ]
if fresh_id :
for l in self . lines :
if ( l . startswith ( 'ATOM ' ) or l . startswith ( 'HETATM' ) ) and l [ 21 ] == ' ' :
new_lines . append ( '%s%s%s' % ( l [ : 21 ] , fresh_id , l [ 22 : ] ) )
else :
new_lines . append ( l )
self . lines = new_lines
|
def get_appliances ( self , start = 0 , count = - 1 , filter = '' , fields = '' , query = '' , sort = '' , view = '' ) :
"""Gets a list of all the Image Streamer resources based on optional sorting and filtering , and constrained
by start and count parameters .
Args :
start :
The first item to return , using 0 - based indexing .
If not specified , the default is 0 - start with the first available item .
count :
The number of resources to return . A count of - 1 requests all items .
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items .
filter ( list or str ) :
A general filter / query string to narrow the list of items returned . The
default is no filter ; all resources are returned .
fields :
Specifies which fields should be returned in the result set .
query :
A general query string to narrow the list of resources returned . The default
is no query - all resources are returned .
sort :
The sort order of the returned data set . By default , the sort order is based
on create time with the oldest entry first .
view :
Return a specific subset of the attributes of the resource or collection , by
specifying the name of a predefined view . The default view is expand - show all
attributes of the resource and all elements of collections of resources .
Returns :
list : Image Streamer resources associated with the Deployment Servers ."""
|
uri = self . URI + '/image-streamer-appliances'
return self . _client . get_all ( start , count , filter = filter , sort = sort , query = query , fields = fields , view = view , uri = uri )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.