signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def paste_to_current_cell ( self , tl_key , data , freq = None ) :
"""Pastes data into grid from top left cell tl _ key
Parameters
ul _ key : Tuple
\key of top left cell of paste area
data : iterable of iterables where inner iterable returns string
\t The outer iterable represents rows
freq : Integer , defaults to None
\t Status message frequency"""
|
self . pasting = True
grid_rows , grid_cols , __ = self . grid . code_array . shape
self . need_abort = False
tl_row , tl_col , tl_tab = self . _get_full_key ( tl_key )
row_overflow = False
col_overflow = False
no_pasted_cells = 0
for src_row , row_data in enumerate ( data ) :
target_row = tl_row + src_row
if self . grid . actions . _is_aborted ( src_row , _ ( "Pasting cells... " ) , freq = freq ) :
self . _abort_paste ( )
return False
# Check if rows fit into grid
if target_row >= grid_rows :
row_overflow = True
break
for src_col , cell_data in enumerate ( row_data ) :
target_col = tl_col + src_col
if target_col >= grid_cols :
col_overflow = True
break
if cell_data is not None : # Is only None if pasting into selection
key = target_row , target_col , tl_tab
try :
CellActions . set_code ( self , key , cell_data )
no_pasted_cells += 1
except KeyError :
pass
if row_overflow or col_overflow :
self . _show_final_overflow_message ( row_overflow , col_overflow )
else :
self . _show_final_paste_message ( tl_key , no_pasted_cells )
self . pasting = False
|
def draw ( self , writer , idx , offset ) :
"""Draw the current page view to ` ` writer ` ` .
: param writer : callable writes to output stream , receiving unicode .
: type writer : callable
: param idx : current page index .
: type idx : int
: param offset : scrolling region offset of current page .
: type offset : int
: returns : tuple of next ( idx , offset ) .
: rtype : ( int , int )"""
|
# as our screen can be resized while we ' re mid - calculation ,
# our self . dirty flag can become re - toggled ; because we are
# not re - flowing our pagination , we must begin over again .
while self . dirty :
self . draw_heading ( writer )
self . dirty = self . STATE_CLEAN
( idx , offset ) , data = self . page_data ( idx , offset )
for txt in self . page_view ( data ) :
writer ( txt )
self . draw_status ( writer , idx )
flushout ( )
return idx , offset
|
def whichEncoding ( self ) :
"""How should I be encoded ?
@ returns : one of ENCODE _ URL , ENCODE _ HTML _ FORM , or ENCODE _ KVFORM .
@ change : 2.1.0 added the ENCODE _ HTML _ FORM response ."""
|
if self . request . mode in BROWSER_REQUEST_MODES :
if self . fields . getOpenIDNamespace ( ) == OPENID2_NS and len ( self . encodeToURL ( ) ) > OPENID1_URL_LIMIT :
return ENCODE_HTML_FORM
else :
return ENCODE_URL
else :
return ENCODE_KVFORM
|
def get_instance ( self , payload ) :
"""Build an instance of WorkersCumulativeStatisticsInstance
: param dict payload : Payload response from the API
: returns : twilio . rest . taskrouter . v1 . workspace . worker . workers _ cumulative _ statistics . WorkersCumulativeStatisticsInstance
: rtype : twilio . rest . taskrouter . v1 . workspace . worker . workers _ cumulative _ statistics . WorkersCumulativeStatisticsInstance"""
|
return WorkersCumulativeStatisticsInstance ( self . _version , payload , workspace_sid = self . _solution [ 'workspace_sid' ] , )
|
def write ( self , data ) :
"""Writes data to the device .
: param data : data to write
: type data : string
: returns : number of bytes sent
: raises : : py : class : ` ~ alarmdecoder . util . CommError `"""
|
data_sent = None
try :
if isinstance ( data , str ) :
data = data . encode ( 'utf-8' )
data_sent = self . _device . send ( data )
if data_sent == 0 :
raise CommError ( 'Error writing to device.' )
self . on_write ( data = data )
except ( SSL . Error , socket . error ) as err :
raise CommError ( 'Error writing to device.' , err )
return data_sent
|
def _disks_equal ( disk1 , disk2 ) :
'''Test if two disk elements should be considered like the same device'''
|
target1 = disk1 . find ( 'target' )
target2 = disk2 . find ( 'target' )
source1 = ElementTree . tostring ( disk1 . find ( 'source' ) ) if disk1 . find ( 'source' ) is not None else None
source2 = ElementTree . tostring ( disk2 . find ( 'source' ) ) if disk2 . find ( 'source' ) is not None else None
return source1 == source2 and target1 is not None and target2 is not None and target1 . get ( 'bus' ) == target2 . get ( 'bus' ) and disk1 . get ( 'device' , 'disk' ) == disk2 . get ( 'device' , 'disk' ) and target1 . get ( 'dev' ) == target2 . get ( 'dev' )
|
def create_from_cellranger ( indir : str , outdir : str = None , genome : str = None ) -> str :
"""Create a . loom file from 10X Genomics cellranger output
Args :
indir ( str ) : path to the cellranger output folder ( the one that contains ' outs ' )
outdir ( str ) : output folder wher the new loom file should be saved ( default to indir )
genome ( str ) : genome build to load ( e . g . ' mm10 ' ; if None , determine species from outs folder )
Returns :
path ( str ) : Full path to the created loom file .
Remarks :
The resulting file will be named ` ` { sampleID } . loom ` ` , where the sampleID is the one given by cellranger ."""
|
if outdir is None :
outdir = indir
sampleid = os . path . split ( os . path . abspath ( indir ) ) [ - 1 ]
matrix_folder = os . path . join ( indir , 'outs' , 'filtered_gene_bc_matrices' )
if os . path . exists ( matrix_folder ) :
if genome is None :
genome = [ f for f in os . listdir ( matrix_folder ) if not f . startswith ( "." ) ] [ 0 ]
matrix_folder = os . path . join ( matrix_folder , genome )
matrix = mmread ( os . path . join ( matrix_folder , "matrix.mtx" ) ) . astype ( "float32" ) . todense ( )
genelines = open ( os . path . join ( matrix_folder , "genes.tsv" ) , "r" ) . readlines ( )
bclines = open ( os . path . join ( matrix_folder , "barcodes.tsv" ) , "r" ) . readlines ( )
else : # cellranger V3 file locations
if genome is None :
genome = ""
# Genome is not visible from V3 folder
matrix_folder = os . path . join ( indir , 'outs' , 'filtered_feature_bc_matrix' )
matrix = mmread ( os . path . join ( matrix_folder , "matrix.mtx.gz" ) ) . astype ( "float32" ) . todense ( )
genelines = [ l . decode ( ) for l in gzip . open ( os . path . join ( matrix_folder , "features.tsv.gz" ) , "r" ) . readlines ( ) ]
bclines = [ l . decode ( ) for l in gzip . open ( os . path . join ( matrix_folder , "barcodes.tsv.gz" ) , "r" ) . readlines ( ) ]
accession = np . array ( [ x . split ( "\t" ) [ 0 ] for x in genelines ] ) . astype ( "str" )
gene = np . array ( [ x . split ( "\t" ) [ 1 ] . strip ( ) for x in genelines ] ) . astype ( "str" )
cellids = np . array ( [ sampleid + ":" + x . strip ( ) for x in bclines ] ) . astype ( "str" )
col_attrs = { "CellID" : cellids }
row_attrs = { "Accession" : accession , "Gene" : gene }
tsne_file = os . path . join ( indir , "outs" , "analysis" , "tsne" , "projection.csv" )
# In cellranger V2 the file moved one level deeper
if not os . path . exists ( tsne_file ) :
tsne_file = os . path . join ( indir , "outs" , "analysis" , "tsne" , "2_components" , "projection.csv" )
if os . path . exists ( tsne_file ) :
tsne = np . loadtxt ( tsne_file , usecols = ( 1 , 2 ) , delimiter = ',' , skiprows = 1 )
col_attrs [ "X" ] = tsne [ : , 0 ] . astype ( 'float32' )
col_attrs [ "Y" ] = tsne [ : , 1 ] . astype ( 'float32' )
clusters_file = os . path . join ( indir , "outs" , "analysis" , "clustering" , "graphclust" , "clusters.csv" )
if os . path . exists ( clusters_file ) :
labels = np . loadtxt ( clusters_file , usecols = ( 1 , ) , delimiter = ',' , skiprows = 1 )
col_attrs [ "ClusterID" ] = labels . astype ( 'int' ) - 1
path = os . path . join ( outdir , sampleid + ".loom" )
create ( path , matrix , row_attrs , col_attrs , file_attrs = { "Genome" : genome } )
return path
|
def cancel_order ( self , order_param ) :
"""Cancel an open order .
Parameters
order _ param : str or Order
The order _ id or order object to cancel ."""
|
order_id = order_param
if isinstance ( order_param , zipline . protocol . Order ) :
order_id = order_param . id
self . blotter . cancel ( order_id )
|
def fix_tour ( self , tour ) :
"""Test each scaffold if dropping does not decrease LMS ."""
|
scaffolds , oos = zip ( * tour )
keep = set ( )
for mlg in self . linkage_groups :
lg = mlg . lg
for s , o in tour :
i = scaffolds . index ( s )
L = [ self . get_series ( lg , x , xo ) for x , xo in tour [ : i ] ]
U = [ self . get_series ( lg , x , xo ) for x , xo in tour [ i + 1 : ] ]
L , U = list ( flatten ( L ) ) , list ( flatten ( U ) )
M = self . get_series ( lg , s , o )
score_with = lms ( L + M + U ) [ 0 ]
score_without = lms ( L + U ) [ 0 ]
assert score_with >= score_without
if score_with > score_without :
keep . add ( s )
dropped = len ( tour ) - len ( keep )
logging . debug ( "Dropped {0} minor scaffolds" . format ( dropped ) )
return [ ( s , o ) for ( s , o ) in tour if s in keep ]
|
def to_pandas ( self ) :
"""Returns the dataset as two pandas objects : X and y .
Returns
X : DataFrame with shape ( n _ instances , n _ features )
A pandas DataFrame containing feature data and named columns .
y : Series with shape ( n _ instances , )
A pandas Series containing target data and an index that matches
the feature DataFrame index ."""
|
# Ensure the metadata is valid before continuing
if self . meta is None :
raise DatasetsError ( ( "the downloaded dataset was improperly packaged without meta.json " "- please report this bug to the Yellowbrick maintainers!" ) )
if "features" not in self . meta or "target" not in self . meta :
raise DatasetsError ( ( "the downloaded dataset was improperly packaged without features " "or target - please report this bug to the Yellowbrick maintainers!" ) )
# Load data frame and return features and target
# TODO : Return y as None if there is no self . meta [ " target " ]
df = self . to_dataframe ( )
return df [ self . meta [ "features" ] ] , df [ self . meta [ "target" ] ]
|
def snapshotToMovie ( snap , filename , * args , ** kwargs ) :
"""NAME :
snapshotToMovie
PURPOSE :
turn a list of snapshots into a movie
INPUT :
snap - the snapshots ( list )
filename - name of the file to save the movie to
framerate = in fps
bitrate = ?
thumbnail = False : create thumbnail image ( filename - extension + . jpg )
thumbsize = size of thumbnail
+ Snapshot . plot args and kwargs
OUTPUT :
movie is saved to file
DEPENDENCIES :
this procedure uses ffmpeg and convert
BUGS :
matplotlib ' s ' Agg ' backend has a memory leak that prevents it from
creating hundred ' s of figures . It is recommended to call
import matplotlib
matplotlib . use ( ' PDF ' )
at the beginning of the movie creating script as the PDF backend does
not have the same memory leak .
HISTORY :
2011-02-06 - Written - Bovy ( NYU )"""
|
if kwargs . has_key ( 'tmpdir' ) :
tmpdir = kwargs [ 'tmpdir' ]
kwargs . pop ( 'tmpdir' )
else :
tmpdir = '/tmp'
if kwargs . has_key ( 'framerate' ) :
framerate = kwargs [ 'framerate' ]
kwargs . pop ( 'framerate' )
else :
framerate = 25
if kwargs . has_key ( 'bitrate' ) :
bitrate = kwargs [ 'bitrate' ]
kwargs . pop ( 'bitrate' )
else :
bitrate = 1000
if kwargs . has_key ( 'thumbnail' ) and kwargs [ 'thumbnail' ] :
thumbnail = True
kwargs . pop ( 'thumbnail' )
elif kwargs . has_key ( 'thumbnail' ) :
kwargs . pop ( 'thumbnail' )
thumbnail = False
else :
thumbnail = False
if kwargs . has_key ( 'thumbsize' ) :
thumbsize = kwargs [ 'thumbsize' ]
else :
thumbsize = 300
# Create all of the files
tempdir = tempfile . mkdtemp ( dir = tmpdir )
# Temporary directory
tmpfiles = [ ]
nsnap = len ( snap )
file_length = int ( m . ceil ( m . log10 ( nsnap ) ) )
# Determine good xrange BOVY TO DO
if not kwargs . has_key ( 'xrange' ) :
pass
if not kwargs . has_key ( 'yrange' ) :
pass
for ii in range ( nsnap ) :
tmpfiles . append ( os . path . join ( tempdir , str ( ii ) . zfill ( file_length ) ) )
bovy_plot . bovy_print ( )
snap [ ii ] . plot ( * args , ** kwargs )
bovy_plot . bovy_end_print ( tmpfiles [ ii ] + '.pdf' )
# Convert to jpeg
try :
subprocess . check_call ( [ 'convert' , tmpfiles [ ii ] + '.pdf' , tmpfiles [ ii ] + '.jpg' ] )
except subprocess . CalledProcessError :
print ( "'convert' failed" )
raise subprocess . CalledProcessError
# turn them into a movie
try :
subprocess . check_call ( [ 'ffmpeg' , '-r' , str ( framerate ) , '-b' , str ( bitrate ) , '-i' , os . path . join ( tempdir , '%' + '0%id.jpg' % file_length ) , '-y' , filename ] )
if thumbnail :
thumbnameTemp = re . split ( r'\.' , filename )
thumbnameTemp = thumbnameTemp [ 0 : len ( thumbnameTemp ) - 1 ]
thumbname = ''
for t in thumbnameTemp :
thumbname += t
thumbname += '.jpg'
subprocess . check_call ( [ 'ffmpeg' , '-itsoffset' , '-4' , '-y' , '-i' , filename , '-vcodec' , 'mjpeg' , '-vframes' , '1' , '-an' , '-f' , 'rawvideo' , '-s' , '%ix%i' % ( thumbsize , thumbsize ) , thumbname ] )
except subprocess . CalledProcessError :
print ( "'ffmpeg' failed" )
_cleanupMovieTempdir ( tempdir )
raise subprocess . CalledProcessError
finally :
_cleanupMovieTempdir ( tempdir )
|
def secret_from_transfer_task ( transfer_task : Optional [ TransferTask ] , secrethash : SecretHash , ) -> Optional [ Secret ] :
"""Return the secret for the transfer , None on EMPTY _ SECRET ."""
|
assert isinstance ( transfer_task , InitiatorTask )
transfer_state = transfer_task . manager_state . initiator_transfers [ secrethash ]
if transfer_state is None :
return None
return transfer_state . transfer_description . secret
|
def assemble ( self , module , * modules , ** kwargs ) : # type : ( AbstractModule , * AbstractModule , * * Any ) - > SeqRecord
"""Assemble the provided modules into the vector .
Arguments :
module ( ` ~ moclo . base . modules . AbstractModule ` ) : a module to insert
in the vector .
modules ( ` ~ moclo . base . modules . AbstractModule ` , optional ) : additional
modules to insert in the vector . The order of the parameters
is not important , since modules will be sorted by their start
overhang in the function .
Returns :
` ~ Bio . SeqRecord . SeqRecord ` : the assembled sequence with sequence
annotations inherited from the vector and the modules .
Raises :
` ~ moclo . errors . DuplicateModules ` : when two different modules share
the same start overhang , leading in possibly non - deterministic
constructs .
` ~ moclo . errors . MissingModule ` : when a module has an end overhang
that is not shared by any other module , leading to a partial
construct only
` ~ moclo . errors . InvalidSequence ` : when one of the modules does not
match the required module structure ( missing site , wrong
overhang , etc . ) .
` ~ moclo . errors . UnusedModules ` : when some modules were not used
during the assembly ( mostly caused by duplicate parts ) ."""
|
mgr = AssemblyManager ( vector = self , modules = [ module ] + list ( modules ) , name = kwargs . get ( "name" , "assembly" ) , id_ = kwargs . get ( "id" , "assembly" ) , )
return mgr . assemble ( )
|
def run_ut_py3_qemu ( ) :
"""Run unit tests in the emulator and copy the results back to the host through the mounted
volume in / mxnet"""
|
from vmcontrol import VM
with VM ( ) as vm :
qemu_provision ( vm . ssh_port )
logging . info ( "execute tests" )
qemu_ssh ( vm . ssh_port , "./runtime_functions.py" , "run_ut_python3_qemu_internal" )
qemu_rsync_to_host ( vm . ssh_port , "*.xml" , "mxnet" )
logging . info ( "copied to host" )
logging . info ( "tests finished, vm shutdown." )
vm . shutdown ( )
|
def _sparse_mux ( sel , vals ) :
"""Mux that avoids instantiating unnecessary mux _ 2s when possible .
: param WireVector sel : Select wire , determines what is selected on a given cycle
: param { int : WireVector } vals : dictionary to store the values that are
: return : Wirevector that signifies the change
This mux supports not having a full specification . indices that are not
specified are treated as Don ' t Cares"""
|
items = list ( vals . values ( ) )
if len ( vals ) <= 1 :
if len ( vals ) == 0 :
raise pyrtl . PyrtlError ( "Needs at least one parameter for val" )
return items [ 0 ]
if len ( sel ) == 1 :
try :
false_result = vals [ 0 ]
true_result = vals [ 1 ]
except KeyError :
raise pyrtl . PyrtlError ( "Failed to retrieve values for smartmux. " "The length of sel might be wrong" )
else :
half = 2 ** ( len ( sel ) - 1 )
first_dict = { indx : wire for indx , wire in vals . items ( ) if indx < half }
second_dict = { indx - half : wire for indx , wire in vals . items ( ) if indx >= half }
if not len ( first_dict ) :
return sparse_mux ( sel [ : - 1 ] , second_dict )
if not len ( second_dict ) :
return sparse_mux ( sel [ : - 1 ] , first_dict )
false_result = sparse_mux ( sel [ : - 1 ] , first_dict )
true_result = sparse_mux ( sel [ : - 1 ] , second_dict )
if _is_equivelent ( false_result , true_result ) :
return true_result
return pyrtl . select ( sel [ - 1 ] , falsecase = false_result , truecase = true_result )
|
def from_coo ( cls , obj , vartype = None ) :
"""Deserialize a binary quadratic model from a COOrdinate _ format encoding .
. . _ COOrdinate : https : / / en . wikipedia . org / wiki / Sparse _ matrix # Coordinate _ list _ ( COO )
Args :
obj : ( str / file ) :
Either a string or a ` . read ( ) ` - supporting ` file object ` _ that represents
linear and quadratic biases for a binary quadratic model . This data
is stored as a list of 3 - tuples , ( i , j , bias ) , where : math : ` i = j `
for linear biases .
vartype ( : class : ` . Vartype ` / str / set , optional ) :
Variable type for the binary quadratic model . Accepted input values :
* : class : ` . Vartype . SPIN ` , ` ` ' SPIN ' ` ` , ` ` { - 1 , 1 } ` `
* : class : ` . Vartype . BINARY ` , ` ` ' BINARY ' ` ` , ` ` { 0 , 1 } ` `
If not provided , the vartype must be specified with a header in the
file .
. . _ file object : https : / / docs . python . org / 3 / glossary . html # term - file - object
. . note : : Variables must use index lables ( numeric lables ) . Binary quadratic
models created from COOrdinate format encoding have offsets set to
zero .
Examples :
An example of a binary quadratic model encoded in COOrdinate format .
. . code - block : : none
0 0 0.50000
0 1 0.50000
1 1 - 1.50000
The Coordinate format with a header
. . code - block : : none
# vartype = SPIN
0 0 0.50000
0 1 0.50000
1 1 - 1.50000
This example saves a binary quadratic model to a COOrdinate - format file
and creates a new model by reading the saved file .
> > > import dimod
> > > bqm = dimod . BinaryQuadraticModel ( { 0 : - 1.0 , 1 : 1.0 } , { ( 0 , 1 ) : - 1.0 } , 0.0 , dimod . BINARY )
> > > with open ( ' tmp . qubo ' , ' w ' ) as file : # doctest : + SKIP
. . . bqm . to _ coo ( file )
> > > with open ( ' tmp . qubo ' , ' r ' ) as file : # doctest : + SKIP
. . . new _ bqm = dimod . BinaryQuadraticModel . from _ coo ( file , dimod . BINARY )
> > > any ( new _ bqm ) # doctest : + SKIP
True"""
|
import dimod . serialization . coo as coo
if isinstance ( obj , str ) :
return coo . loads ( obj , cls = cls , vartype = vartype )
return coo . load ( obj , cls = cls , vartype = vartype )
|
def save ( self , filename , strip_prefix = '' ) :
"""Save parameters to file .
Parameters
filename : str
Path to parameter file .
strip _ prefix : str , default ' '
Strip prefix from parameter names before saving ."""
|
arg_dict = { }
for param in self . values ( ) :
weight = param . _reduce ( )
if not param . name . startswith ( strip_prefix ) :
raise ValueError ( "Prefix '%s' is to be striped before saving, but Parameter's " "name '%s' does not start with '%s'. " "this may be due to your Block shares parameters from other " "Blocks or you forgot to use 'with name_scope()' when creating " "child blocks. For more info on naming, please see " "http://mxnet.incubator.apache.org/tutorials/basic/naming.html" % ( strip_prefix , param . name , strip_prefix ) )
arg_dict [ param . name [ len ( strip_prefix ) : ] ] = weight
ndarray . save ( filename , arg_dict )
|
def sendTo ( self , dest , chunkSize ) :
"""Send this difference to the dest Store ."""
|
vol = self . toVol
paths = self . sink . getPaths ( vol )
if self . sink == dest :
logger . info ( "Keep: %s" , self )
self . sink . keep ( self )
else : # Log , but don ' t skip yet , so we can log more detailed skipped actions later
skipDryRun ( logger , dest . dryrun , 'INFO' ) ( "Xfer: %s" , self )
receiveContext = dest . receive ( self , paths )
sendContext = self . sink . send ( self )
# try :
# receiveContext . metadata [ ' btrfsVersion ' ] = self . btrfsVersion
# except AttributeError :
# pass
transfer ( sendContext , receiveContext , chunkSize )
if vol . hasInfo ( ) :
infoContext = dest . receiveVolumeInfo ( paths )
if infoContext is None : # vol . writeInfo ( sys . stdout )
pass
else :
with infoContext as stream :
vol . writeInfo ( stream )
|
def _surfdens ( self , R , z , phi = 0. , t = 0. ) :
"""NAME :
_ surfdens
PURPOSE :
evaluate the surface density for this potential
INPUT :
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
the density
HISTORY :
2018-08-04 - Written - Bovy ( UofT )"""
|
if R > self . a :
return 0.
h = nu . sqrt ( self . a2 - R ** 2 )
if z < h :
return 0.
else :
return 1. / ( 2. * nu . pi * self . a * h )
|
def get_class ( name , config_key , module ) :
"""Get the class by its name as a string ."""
|
clsmembers = inspect . getmembers ( module , inspect . isclass )
for string_name , act_class in clsmembers :
if string_name == name :
return act_class
# Check if the user has specified a plugin and if the class is in there
cfg = get_project_configuration ( )
if config_key in cfg :
modname = os . path . splitext ( os . path . basename ( cfg [ config_key ] ) ) [ 0 ]
if os . path . isfile ( cfg [ config_key ] ) :
usermodule = imp . load_source ( modname , cfg [ config_key ] )
clsmembers = inspect . getmembers ( usermodule , inspect . isclass )
for string_name , act_class in clsmembers :
if string_name == name :
return act_class
else :
logging . warning ( "File '%s' does not exist. Adjust ~/.hwrtrc." , cfg [ 'data_analyzation_plugins' ] )
logging . debug ( "Unknown class '%s'." , name )
return None
|
def get_pval_uncorr ( self , study , log = sys . stdout ) :
"""Calculate the uncorrected pvalues for study items ."""
|
results = [ ]
study_in_pop = self . pop . intersection ( study )
# " 99 % 378 of 382 study items found in population "
go2studyitems = get_terms ( "study" , study_in_pop , self . assoc , self . obo_dag , log )
pop_n , study_n = self . pop_n , len ( study_in_pop )
allterms = set ( go2studyitems ) . union ( set ( self . go2popitems ) )
if log is not None : # Some study genes may not have been found in the population . Report from orig
study_n_orig = len ( study )
perc = 100.0 * study_n / study_n_orig if study_n_orig != 0 else 0.0
log . write ( "{R:3.0f}% {N:>6,} of {M:>6,} study items found in population({P})\n" . format ( N = study_n , M = study_n_orig , P = pop_n , R = perc ) )
if study_n :
log . write ( "Calculating {N:,} uncorrected p-values using {PFNC}\n" . format ( N = len ( allterms ) , PFNC = self . pval_obj . name ) )
# If no study genes were found in the population , return empty GOEA results
if not study_n :
return [ ]
calc_pvalue = self . pval_obj . calc_pvalue
for goid in allterms :
study_items = go2studyitems . get ( goid , set ( ) )
study_count = len ( study_items )
pop_items = self . go2popitems . get ( goid , set ( ) )
pop_count = len ( pop_items )
one_record = GOEnrichmentRecord ( goid , p_uncorrected = calc_pvalue ( study_count , study_n , pop_count , pop_n ) , study_items = study_items , pop_items = pop_items , ratio_in_study = ( study_count , study_n ) , ratio_in_pop = ( pop_count , pop_n ) )
results . append ( one_record )
return results
|
def add_value ( self , value , row , col ) :
"""Adds a single value ( cell ) to a worksheet at ( row , col ) .
Return the ( row , col ) where the value has been put .
: param value : Value to write to the sheet .
: param row : Row where the value should be written .
: param col : Column where the value should be written ."""
|
self . __values [ ( row , col ) ] = value
|
def execute ( desktop_file , files = None , return_cmd = False , background = False ) :
'''Execute a . desktop file .
Executes a given . desktop file path properly .
Args :
desktop _ file ( str ) : The path to the . desktop file .
files ( list ) : Any files to be launched by the . desktop . Defaults to empty list .
return _ cmd ( bool ) : Return the command ( as ` ` str ` ` ) instead of executing . Defaults to ` ` False ` ` .
background ( bool ) : Run command in background . Defaults to ` ` False ` ` .
Returns :
str : Only if ` ` return _ cmd ` ` . Returns command instead of running it . Else returns nothing .'''
|
# Attempt to manually parse and execute
desktop_file_exec = parse ( desktop_file ) [ 'Exec' ]
for i in desktop_file_exec . split ( ) :
if i . startswith ( '%' ) :
desktop_file_exec = desktop_file_exec . replace ( i , '' )
desktop_file_exec = desktop_file_exec . replace ( r'%F' , '' )
desktop_file_exec = desktop_file_exec . replace ( r'%f' , '' )
if files :
for i in files :
desktop_file_exec += ' ' + i
if parse ( desktop_file ) [ 'Terminal' ] : # Use eval and _ _ import _ _ to bypass a circular dependency
desktop_file_exec = eval ( ( '__import__("libdesktop").applications.terminal(exec_="%s",' ' keep_open_after_cmd_exec=True, return_cmd=True)' ) % desktop_file_exec )
if return_cmd :
return desktop_file_exec
desktop_file_proc = sp . Popen ( [ desktop_file_exec ] , shell = True )
if not background :
desktop_file_proc . wait ( )
|
def main ( args = None ) :
"""Phablet command line user interface
This function implements the phablet command line tool"""
|
parser = argparse . ArgumentParser ( description = _ ( "Run a command on Ubuntu Phablet" ) , epilog = """
This tool will start ssh on your connected Ubuntu Touch device, forward
a local port to the device, copy your ssh id down to the device (so you
can log in without a password), and then ssh into the device through
the locally forwarded port.
This results in a very nice shell, which for example can display the
output of 'top' at the correct terminal size, rather than being stuck
at 80x25 like 'adb shell'
Like ssh-copy-id, this script will push down the newest ssh key it can
find in ~/.ssh/*.pub, so if you find the wrong key being pushed down,
simply use 'touch' to make your desired key the newest one, and then
this script will find it.
""" )
dev_group = parser . add_argument_group ( _ ( "device connection options" ) )
dev_group . add_argument ( '-s' , '--serial' , action = 'store' , help = _ ( 'connect to the device with the specified serial number' ) , default = None )
if hasattr ( subprocess , 'TimeoutExpired' ) :
dev_group . add_argument ( '-t' , '--timeout' , type = float , default = 30.0 , help = _ ( 'timeout for device discovery' ) )
else :
dev_group . add_argument ( '-t' , '--timeout' , type = float , default = None , help = argparse . SUPPRESS )
dev_group . add_argument ( '-k' , '--public-key' , action = 'store' , default = None , help = _ ( 'use the specified public key' ) )
log_group = parser . add_argument_group ( _ ( "logging options" ) )
log_group . add_argument ( '--verbose' , action = 'store_const' , dest = 'log_level' , const = 'INFO' , help = _ ( 'be more verbose during connection set-up' ) )
log_group . add_argument ( '--log-level' , action = 'store' , help = _ ( 'set log level (for debugging)' ) , choices = [ logging . getLevelName ( level ) for level in [ logging . DEBUG , logging . INFO , logging . WARNING , logging . ERROR , logging . CRITICAL ] ] )
parser . add_argument ( 'cmd' , nargs = '...' , help = _ ( 'command to run on the phablet, ' ' if left out an interactive shell is started' ) )
parser . add_argument ( '--version' , action = 'version' , version = __version__ )
parser . set_defaults ( log_level = 'WARNING' )
ns = parser . parse_args ( args )
try : # Py3k
level = logging . _nameToLevel [ ns . log_level ]
except AttributeError : # Py27
level = logging . _levelNames [ ns . log_level ]
logging . basicConfig ( level = level , style = '{' , format = "[{levelname:10}] {message}" )
try :
phablet = Phablet ( ns . serial )
return phablet . run ( ns . cmd , timeout = ns . timeout , key = ns . public_key )
except PhabletError as exc :
_logger . critical ( "%s" , exc )
return 255
|
def sizes ( x ) :
"""Get a structure of sizes for a structure of nested arrays ."""
|
def size ( x ) :
try :
return x . size
except Exception : # pylint : disable = broad - except
return 0
return nested_map ( x , size )
|
def query ( self , q , data = None , union = True , limit = None ) :
"""Query your database with a raw string .
Parameters
q : str
Query string to execute
data : list , dict
Optional argument for handlebars - queries . Data will be passed to the
template and rendered using handlebars .
union : bool
Whether or not " UNION ALL " handlebars templates . This will return
any handlebars queries as a single data frame .
limit : int
Number of records to return
Examples
> > > from db import DemoDB
> > > db = DemoDB ( )
db . query ( " select * from Track " ) . head ( 2)
TrackId Name AlbumId MediaTypeId \\ \r
0 1 For Those About To Rock ( We Salute You ) 1 1
1 2 Balls to the Wall 2 2
< BLANKLINE >
GenreId Composer Milliseconds Bytes \\ \r
0 1 Angus Young , Malcolm Young , Brian Johnson 343719 11170334
1 1 None 342562 5510424
< BLANKLINE >
UnitPrice
0 0.99
1 0.99
db . query ( " select * from Track " , limit = 10)
TrackId Name AlbumId MediaTypeId 0 1 For Those About To Rock ( We Salute You ) 1 1
1 2 Balls to the Wall 2 2
2 3 Fast As a Shark 3 2
3 4 Restless and Wild 3 2
4 5 Princess of the Dawn 3 2
5 6 Put The Finger On You 1 1
6 7 Let ' s Get It Up 1 1
7 8 Inject The Venom 1 1
8 9 Snowballed 1 1
9 10 Evil Walks 1 1
GenreId Composer Milliseconds 0 1 Angus Young , Malcolm Young , Brian Johnson 343719
1 1 None 342562
2 1 F . Baltes , S . Kaufman , U . Dirkscneider & W . Ho . . . 230619
3 1 F . Baltes , R . A . Smith - Diesel , S . Kaufman , U . D . . . 252051
4 1 Deaffy & R . A . Smith - Diesel 375418
5 1 Angus Young , Malcolm Young , Brian Johnson 205662
6 1 Angus Young , Malcolm Young , Brian Johnson 233926
7 1 Angus Young , Malcolm Young , Brian Johnson 210834
8 1 Angus Young , Malcolm Young , Brian Johnson 203102
9 1 Angus Young , Malcolm Young , Brian Johnson 263497
Bytes UnitPrice
0 11170334 0.99
1 5510424 0.99
2 3990994 0.99
3 4331779 0.99
4 6290521 0.99
5 6713451 0.99
6 7636561 0.99
7 6852860 0.99
8 6599424 0.99
9 8611245 0.99
. . . SELECT
. . . a . Title ,
. . . t . Name ,
. . . t . UnitPrice
. . . FROM
. . . Album a
. . . INNER JOIN
. . . Track t
. . . on a . AlbumId = t . AlbumId ;
> > > len ( db . query ( q ) )
3503
db . query ( q , limit = 10)
Title 0 For Those About To Rock We Salute You
1 Balls to the Wall
2 Restless and Wild
3 Restless and Wild
4 Restless and Wild
5 For Those About To Rock We Salute You
6 For Those About To Rock We Salute You
7 For Those About To Rock We Salute You
8 For Those About To Rock We Salute You
9 For Those About To Rock We Salute You
Name UnitPrice
0 For Those About To Rock ( We Salute You ) 0.99
1 Balls to the Wall 0.99
2 Fast As a Shark 0.99
3 Restless and Wild 0.99
4 Princess of the Dawn 0.99
5 Put The Finger On You 0.99
6 Let ' s Get It Up 0.99
7 Inject The Venom 0.99
8 Snowballed 0.99
9 Evil Walks 0.99
> > > template = ' ' '
. . . SELECT
. . . ' { { name } } ' as table _ name ,
. . . COUNT ( * ) as cnt
. . . FROM
. . . { { name } }
. . . GROUP BY
. . . table _ name
> > > data = [
. . . { " name " : " Album " } ,
. . . { " name " : " Artist " } ,
. . . { " name " : " Track " }
db . query ( q , data = data )
table _ name cnt
0 Album 347
1 Artist 275
2 Track 3503
. . . SELECT
. . . { { # cols } }
. . . { { # if @ last } }
. . . { { else } }
. . . { { / if } }
. . . { { / cols } }
. . . FROM
. . . Album ;
> > > data = { " cols " : [ " AlbumId " , " Title " , " ArtistId " ] }
> > > len ( db . query ( q , data = data , union = False ) )
347
db . query ( q , data = data , union = False )
AlbumId Title ArtistId
0 1 For Those About To Rock We Salute You 1
1 2 Balls to the Wall 2
2 3 Restless and Wild 2
3 4 Let There Be Rock 1
4 5 Big Ones 3"""
|
if data :
q = self . _apply_handlebars ( q , data , union )
if limit :
q = self . _assign_limit ( q , limit )
return pd . read_sql ( q , self . con )
|
def list_functions ( region = None , key = None , keyid = None , profile = None ) :
'''List all Lambda functions visible in the current scope .
CLI Example :
. . code - block : : bash
salt myminion boto _ lambda . list _ functions'''
|
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
ret = [ ]
for funcs in __utils__ [ 'boto3.paged_call' ] ( conn . list_functions ) :
ret += funcs [ 'Functions' ]
return ret
|
def expand_matrix_in_orthogonal_basis ( m : np . ndarray , basis : Dict [ str , np . ndarray ] , ) -> value . LinearDict [ str ] :
"""Computes coefficients of expansion of m in basis .
We require that basis be orthogonal w . r . t . the Hilbert - Schmidt inner
product . We do not require that basis be orthonormal . Note that Pauli
basis ( I , X , Y , Z ) is orthogonal , but not orthonormal ."""
|
return value . LinearDict ( { name : ( hilbert_schmidt_inner_product ( b , m ) / hilbert_schmidt_inner_product ( b , b ) ) for name , b in basis . items ( ) } )
|
def get_padding_bias ( x ) :
"""Calculate bias tensor from padding values in tensor .
Bias tensor that is added to the pre - softmax multi - headed attention logits ,
which has shape [ batch _ size , num _ heads , length , length ] . The tensor is zero at
non - padding locations , and - 1e9 ( negative infinity ) at padding locations .
Args :
x : int tensor with shape [ batch _ size , length ]
Returns :
Attention bias tensor of shape [ batch _ size , 1 , 1 , length ] ."""
|
with tf . name_scope ( "attention_bias" ) :
padding = get_padding ( x )
attention_bias = padding * _NEG_INF
attention_bias = tf . expand_dims ( tf . expand_dims ( attention_bias , axis = 1 ) , axis = 1 )
return attention_bias
|
def circ_axial ( alpha , n ) :
"""Transforms n - axial data to a common scale .
Parameters
alpha : array
Sample of angles in radians
n : int
Number of modes
Returns
alpha : float
Transformed angles
Notes
Tranform data with multiple modes ( known as axial data ) to a unimodal
sample , for the purpose of certain analysis such as computation of a
mean resultant vector ( see Berens 2009 ) .
Examples
Transform degrees to unimodal radians in the Berens 2009 neuro dataset .
> > > import numpy as np
> > > from pingouin import read _ dataset
> > > from pingouin . circular import circ _ axial
> > > df = read _ dataset ( ' circular ' )
> > > alpha = df [ ' Orientation ' ] . values
> > > alpha = circ _ axial ( np . deg2rad ( alpha ) , 2)"""
|
alpha = np . array ( alpha )
return np . remainder ( alpha * n , 2 * np . pi )
|
async def _start ( self , app : web . Application ) -> None :
"""Start sirbot"""
|
logger . info ( 'Starting Sir Bot-a-lot ...' )
await self . _start_plugins ( )
logger . info ( 'Sir Bot-a-lot fully started' )
|
def parse_date ( datestring , default_timezone = UTC ) :
"""Parses ISO 8601 dates into datetime objects
The timezone is parsed from the date string . However it is quite common to
have dates without a timezone ( not strictly correct ) . In this case the
default timezone specified in default _ timezone is used . This is UTC by
default ."""
|
if not isinstance ( datestring , basestring ) :
raise ParseError ( "Expecting a string %r" % datestring )
m = ISO8601_REGEX . match ( datestring )
if not m :
raise ParseError ( "Unable to parse date string %r" % datestring )
groups = m . groupdict ( )
tz = parse_timezone ( groups [ "timezone" ] , default_timezone = default_timezone )
if groups [ "fraction" ] is None :
groups [ "fraction" ] = 0
else :
groups [ "fraction" ] = int ( float ( "0.%s" % groups [ "fraction" ] ) * 1e6 )
if groups [ "hour" ] == None and groups [ "minute" ] == None and groups [ "second" ] == None :
return datetime ( int ( groups [ "year" ] ) , int ( groups [ "month" ] ) , int ( groups [ "day" ] ) , tzinfo = tz )
else :
return datetime ( int ( groups [ "year" ] ) , int ( groups [ "month" ] ) , int ( groups [ "day" ] ) , int ( groups [ "hour" ] ) , int ( groups [ "minute" ] ) , int ( groups [ "second" ] ) , int ( groups [ "fraction" ] ) , tz )
|
def cachedproperty ( func ) :
"""A memoize decorator for class properties ."""
|
key = '_' + func . __name__
@ wraps ( func )
def get ( self ) :
try :
return getattr ( self , key )
except AttributeError :
val = func ( self )
setattr ( self , key , val )
return val
return property ( get )
|
def bulk_modify ( self , * filters_or_records , ** kwargs ) :
"""Shortcut to bulk modify records
. . versionadded : : 2.17.0
Args :
* filters _ or _ records ( tuple ) or ( Record ) : Either a list of Records , or a list of filters .
Keyword Args :
values ( dict ) : Dictionary of one or more ' field _ name ' : ' new _ value ' pairs to update
Notes :
Requires Swimlane 2.17 +
Examples :
# Bulk update records by filter
app . records . bulk _ modify (
# Query filters
( ' Field _ 1 ' , ' equals ' , value1 ) ,
( ' Field _ 2 ' , ' equals ' , value2 ) ,
# New values for records
values = {
" Field _ 3 " : value3,
" Field _ 4 " : value4,
# Bulk update records
record1 = app . records . get ( tracking _ id = ' APP - 1 ' )
record2 = app . records . get ( tracking _ id = ' APP - 2 ' )
record3 = app . records . get ( tracking _ id = ' APP - 3 ' )
app . records . bulk _ modify ( record1 , record2 , record3 , values = { " Field _ Name " : ' new value ' } )
Returns :
: class : ` string ` : Bulk Modify Job ID"""
|
values = kwargs . pop ( 'values' , None )
if kwargs :
raise ValueError ( 'Unexpected arguments: {}' . format ( kwargs ) )
if not values :
raise ValueError ( 'Must provide "values" as keyword argument' )
if not isinstance ( values , dict ) :
raise ValueError ( "values parameter must be dict of {'field_name': 'update_value'} pairs" )
_type = validate_filters_or_records ( filters_or_records )
request_payload = { }
record_stub = record_factory ( self . _app )
# build record _ id list
if _type is Record :
request_payload [ 'recordIds' ] = [ record . id for record in filters_or_records ]
# build filters
else :
filters = [ ]
for filter_tuples in filters_or_records :
field_name = record_stub . get_field ( filter_tuples [ 0 ] )
filters . append ( { "fieldId" : field_name . id , "filterType" : filter_tuples [ 1 ] , "value" : field_name . get_report ( filter_tuples [ 2 ] ) } )
request_payload [ 'filters' ] = filters
# Ensure all values are wrapped in a bulk modification operation , defaulting to Replace if not provided for
# backwards compatibility
for field_name in list ( values . keys ( ) ) :
modification_operation = values [ field_name ]
if not isinstance ( modification_operation , _BulkModificationOperation ) :
values [ field_name ] = Replace ( modification_operation )
# build modifications
modifications = [ ]
for field_name , modification_operation in values . items ( ) : # Lookup target field
modification_field = record_stub . get_field ( field_name )
if not modification_field . bulk_modify_support :
raise ValueError ( "Field '{}' of Type '{}', is not supported for bulk modify" . format ( field_name , modification_field . __class__ . __name__ ) )
modifications . append ( { "fieldId" : { "value" : modification_field . id , "type" : "id" } , "value" : modification_field . get_bulk_modify ( modification_operation . value ) , "type" : modification_operation . type } )
request_payload [ 'modifications' ] = modifications
response = self . _swimlane . request ( 'put' , "app/{0}/record/batch" . format ( self . _app . id ) , json = request_payload )
# Update records if instances were used to submit bulk modify request after request was successful
if _type is Record :
for record in filters_or_records :
for field_name , modification_operation in six . iteritems ( values ) :
record [ field_name ] = modification_operation . value
return response . text
|
def verify_response_time ( self , expected_below ) :
"""Verify that response time ( time span between request - response ) is reasonable .
: param expected _ below : integer
: return : Nothing
: raises : ValueError if timedelta > expected time"""
|
if self . timedelta > expected_below :
raise ValueError ( "Response time is more (%f) than expected (%f)!" % ( self . timedelta , expected_below ) )
|
def load_config_file ( self , location ) :
"""Load a rotation scheme and other options from a configuration file .
: param location : Any value accepted by : func : ` coerce _ location ( ) ` .
: returns : The configured or given : class : ` Location ` object ."""
|
location = coerce_location ( location )
for configured_location , rotation_scheme , options in load_config_file ( self . config_file , expand = False ) :
if configured_location . match ( location ) :
logger . verbose ( "Loading configuration for %s .." , location )
if rotation_scheme :
self . rotation_scheme = rotation_scheme
for name , value in options . items ( ) :
if value :
setattr ( self , name , value )
# Create a new Location object based on the directory of the
# given location and the execution context of the configured
# location , because :
# 1 . The directory of the configured location may be a filename
# pattern whereas we are interested in the expanded name .
# 2 . The execution context of the given location may lack some
# details of the configured location .
return Location ( context = configured_location . context , directory = location . directory , )
logger . verbose ( "No configuration found for %s." , location )
return location
|
def _parse_byte_data ( self , byte_data ) :
"""Extract the values from byte string ."""
|
self . length , self . data_type = unpack ( '<ii' , byte_data [ : self . size ] )
|
def maintainer ( self ) :
"""> > > package = yarg . get ( ' yarg ' )
> > > package . maintainer
Maintainer ( name = u ' Kura ' , email = u ' kura @ kura . io ' )"""
|
maintainer = namedtuple ( 'Maintainer' , 'name email' )
return maintainer ( name = self . _package [ 'maintainer' ] , email = self . _package [ 'maintainer_email' ] )
|
def _get_mapping ( self , schema ) :
"""Get mapping for given resource or item schema .
: param schema : resource or dict / list type item schema"""
|
properties = { }
for field , field_schema in schema . items ( ) :
field_mapping = self . _get_field_mapping ( field_schema )
if field_mapping :
properties [ field ] = field_mapping
return { 'properties' : properties }
|
def record_result ( self , res , prg = '' ) :
"""record the output of the command . Records the result , can have
multiple results , so will need to work out a consistent way to aggregate this"""
|
self . _log ( self . logFileResult , force_to_string ( res ) , prg )
|
def feed ( self , data ) :
"""added this check as sometimes we are getting the data in integer format instead of string"""
|
try :
self . rawdata = self . rawdata + data
except TypeError :
data = unicode ( data )
self . rawdata = self . rawdata + data
self . goahead ( 0 )
|
def setLength ( self , personID , length ) :
"""setLength ( string , double ) - > None
Sets the length in m for the given person ."""
|
self . _connection . _sendDoubleCmd ( tc . CMD_SET_PERSON_VARIABLE , tc . VAR_LENGTH , personID , length )
|
def trigger_all_change_callbacks ( self ) :
"""Trigger all callbacks that were set with on _ change ( ) ."""
|
return [ ret for key in DatastoreLegacy . store [ self . domain ] . keys ( ) for ret in self . trigger_change_callbacks ( key ) ]
|
def generate_machine_id ( new = False , destination_file = constants . machine_id_file ) :
"""Generate a machine - id if / etc / insights - client / machine - id does not exist"""
|
machine_id = None
machine_id_file = None
logging_name = 'machine-id'
if os . path . isfile ( destination_file ) and not new :
logger . debug ( 'Found %s' , destination_file )
with open ( destination_file , 'r' ) as machine_id_file :
machine_id = machine_id_file . read ( )
else :
logger . debug ( 'Could not find %s file, creating' , logging_name )
machine_id = str ( uuid . uuid4 ( ) )
logger . debug ( "Creating %s" , destination_file )
write_to_disk ( destination_file , content = machine_id )
return str ( machine_id ) . strip ( )
|
def from_pauli ( pauli , coeff = 1.0 ) :
"""Make new Term from an Pauli operator"""
|
if pauli . is_identity or coeff == 0 :
return Term ( ( ) , coeff )
return Term ( ( pauli , ) , coeff )
|
def mkdir ( directory , exists_okay ) :
"""Create a directory on the board .
Mkdir will create the specified directory on the board . One argument is
required , the full path of the directory to create .
Note that you cannot recursively create a hierarchy of directories with one
mkdir command , instead you must create each parent directory with separate
mkdir command calls .
For example to make a directory under the root called ' code ' :
ampy - - port / board / serial / port mkdir / code"""
|
# Run the mkdir command .
board_files = files . Files ( _board )
board_files . mkdir ( directory , exists_okay = exists_okay )
|
def shell_split ( text ) :
"""Split the string ` text ` using shell - like syntax
This avoids breaking single / double - quoted strings ( e . g . containing
strings with spaces ) . This function is almost equivalent to the shlex . split
function ( see standard library ` shlex ` ) except that it is supporting
unicode strings ( shlex does not support unicode until Python 2.7.3 ) ."""
|
assert is_text_string ( text )
# in case a QString is passed . . .
pattern = r'(\s+|(?<!\\)".*?(?<!\\)"|(?<!\\)\'.*?(?<!\\)\')'
out = [ ]
for token in re . split ( pattern , text ) :
if token . strip ( ) :
out . append ( token . strip ( '"' ) . strip ( "'" ) )
return out
|
def handle_symbol_search ( self , call_id , payload ) :
"""Handler for symbol search results"""
|
self . log . debug ( 'handle_symbol_search: in %s' , Pretty ( payload ) )
syms = payload [ "syms" ]
qfList = [ ]
for sym in syms :
p = sym . get ( "pos" )
if p :
item = self . editor . to_quickfix_item ( str ( p [ "file" ] ) , p [ "line" ] , str ( sym [ "name" ] ) , "info" )
qfList . append ( item )
self . editor . write_quickfix_list ( qfList , "Symbol Search" )
|
def update_feed ( self , feed , feed_id ) :
"""UpdateFeed .
[ Preview API ] Change the attributes of a feed .
: param : class : ` < FeedUpdate > < azure . devops . v5_0 . feed . models . FeedUpdate > ` feed : A JSON object containing the feed settings to be updated .
: param str feed _ id : Name or Id of the feed .
: rtype : : class : ` < Feed > < azure . devops . v5_0 . feed . models . Feed > `"""
|
route_values = { }
if feed_id is not None :
route_values [ 'feedId' ] = self . _serialize . url ( 'feed_id' , feed_id , 'str' )
content = self . _serialize . body ( feed , 'FeedUpdate' )
response = self . _send ( http_method = 'PATCH' , location_id = 'c65009a7-474a-4ad1-8b42-7d852107ef8c' , version = '5.0-preview.1' , route_values = route_values , content = content )
return self . _deserialize ( 'Feed' , response )
|
def comparison ( ) :
r"""CommandLine :
python - m utool . experimental . dynamic _ connectivity comparison - - profile
python - m utool . experimental . dynamic _ connectivity comparison"""
|
n = 12
a , b = 9 , 20
num = 3
import utool
for timer in utool . Timerit ( num , 'old bst version (PY)' ) :
g = nx . balanced_tree ( 2 , n )
self = TestETT . from_tree ( g , version = 'bst' , fast = False )
with timer :
self . delete_edge_bst_version ( a , b , bstjoin = False )
import utool
for timer in utool . Timerit ( num , 'new bst version (PY) (with join)' ) :
g = nx . balanced_tree ( 2 , n )
self = TestETT . from_tree ( g , version = 'bst' , fast = False )
with timer :
self . delete_edge_bst_version ( a , b , bstjoin = True )
import utool
for timer in utool . Timerit ( num , 'old bst version (C)' ) :
g = nx . balanced_tree ( 2 , n )
self = TestETT . from_tree ( g , version = 'bst' , fast = True )
with timer :
self . delete_edge_bst_version ( a , b , bstjoin = False )
import utool
for timer in utool . Timerit ( num , 'new bst version (C) (with join)' ) :
g = nx . balanced_tree ( 2 , n )
self = TestETT . from_tree ( g , version = 'bst' , fast = True )
with timer :
self . delete_edge_bst_version ( a , b , bstjoin = True )
import utool
for timer in utool . Timerit ( num , 'list version' ) :
g = nx . balanced_tree ( 2 , n )
self = TestETT . from_tree ( g , version = 'list' )
with timer :
self . delete_edge_list_version ( a , b )
pass
|
def delete ( self , ** kwargs ) :
"""Removes this app object from the platform .
The current user must be a developer of the app ."""
|
if self . _dxid is not None :
return dxpy . api . app_delete ( self . _dxid , ** kwargs )
else :
return dxpy . api . app_delete ( 'app-' + self . _name , alias = self . _alias , ** kwargs )
|
def _ensure_url_has_path ( self , url ) :
"""ensure the url has a path component
eg . http : / / example . com # abc converted to http : / / example . com / # abc"""
|
inx = url . find ( '://' )
if inx > 0 :
rest = url [ inx + 3 : ]
elif url . startswith ( '//' ) :
rest = url [ 2 : ]
else :
rest = url
if '/' in rest :
return url
scheme , netloc , path , query , frag = urlsplit ( url )
if not path :
path = '/'
url = urlunsplit ( ( scheme , netloc , path , query , frag ) )
return url
|
def dd2dm ( dd ) :
"""Convert decimal to degrees , decimal minutes"""
|
d , m , s = dd2dms ( dd )
m = m + float ( s ) / 3600
return d , m , s
|
def normalize_full_name_false ( decl ) :
"""Cached variant of normalize
Args :
decl ( declaration . declaration _ t ) : the declaration
Returns :
str : normalized name"""
|
if decl . cache . normalized_full_name_false is None :
decl . cache . normalized_full_name_false = normalize ( declaration_utils . full_name ( decl , with_defaults = False ) )
return decl . cache . normalized_full_name_false
|
def make_name_from_git ( repo , branch , limit = 53 , separator = '-' , hash_size = 5 ) :
"""return name string representing the given git repo and branch
to be used as a build name .
NOTE : Build name will be used to generate pods which have a
limit of 64 characters and is composed as :
< buildname > - < buildnumber > - < podsuffix >
rhel7-1 - build
Assuming ' - XXXX ' ( 5 chars ) and ' - build ' ( 6 chars ) as default
suffixes , name should be limited to 53 chars ( 64 - 11 ) .
OpenShift is very peculiar in which BuildConfig names it
allows . For this reason , only certain characters are allowed .
Any disallowed characters will be removed from repo and
branch names .
: param repo : str , the git repository to be used
: param branch : str , the git branch to be used
: param limit : int , max name length
: param separator : str , used to separate the repo and branch in name
: return : str , name representing git repo and branch ."""
|
branch = branch or 'unknown'
full = urlparse ( repo ) . path . lstrip ( '/' ) + branch
repo = git_repo_humanish_part_from_uri ( repo )
shaval = sha256 ( full . encode ( 'utf-8' ) ) . hexdigest ( )
hash_str = shaval [ : hash_size ]
limit = limit - len ( hash_str ) - 1
sanitized = sanitize_strings_for_openshift ( repo , branch , limit , separator , False )
return separator . join ( filter ( None , ( sanitized , hash_str ) ) )
|
def curve_constructor ( curve ) :
"""Image for : class ` . Curve ` docstring ."""
|
if NO_IMAGES :
return
ax = curve . plot ( 256 )
line = ax . lines [ 0 ]
nodes = curve . _nodes
ax . plot ( nodes [ 0 , : ] , nodes [ 1 , : ] , color = "black" , linestyle = "None" , marker = "o" )
add_patch ( ax , nodes , line . get_color ( ) )
ax . axis ( "scaled" )
ax . set_xlim ( - 0.125 , 1.125 )
ax . set_ylim ( - 0.0625 , 0.5625 )
save_image ( ax . figure , "curve_constructor.png" )
|
def sample_surface_even ( mesh , count ) :
"""Sample the surface of a mesh , returning samples which are
approximately evenly spaced .
Parameters
mesh : Trimesh object
count : number of points to return
Returns
samples : ( count , 3 ) points in space on the surface of mesh
face _ index : ( count , ) indices of faces for each sampled point"""
|
from . points import remove_close
radius = np . sqrt ( mesh . area / ( 2 * count ) )
samples , ids = sample_surface ( mesh , count * 5 )
result , mask = remove_close ( samples , radius )
return result , ids [ mask ]
|
def dumps ( voevent , pretty_print = False , xml_declaration = True , encoding = 'UTF-8' ) :
"""Converts voevent to string .
. . note : : Default encoding is UTF - 8 , in line with VOE2.0 schema .
Declaring the encoding can cause diffs with the original loaded VOEvent ,
but I think it ' s probably the right thing to do ( and lxml doesn ' t
really give you a choice anyway ) .
Args :
voevent ( : class : ` Voevent ` ) : Root node of the VOevent etree .
pretty _ print ( bool ) : indent the output for improved human - legibility
when possible . See also :
http : / / lxml . de / FAQ . html # why - doesn - t - the - pretty - print - option - reformat - my - xml - output
xml _ declaration ( bool ) : Prepends a doctype tag to the string output ,
i . e . something like ` ` < ? xml version = ' 1.0 ' encoding = ' UTF - 8 ' ? > ` `
Returns :
bytes : Bytestring containing raw XML representation of VOEvent ."""
|
vcopy = copy . deepcopy ( voevent )
_return_to_standard_xml ( vcopy )
s = etree . tostring ( vcopy , pretty_print = pretty_print , xml_declaration = xml_declaration , encoding = encoding )
return s
|
def _get_post_url ( self , obj ) :
"""Needed to retrieve the changelist url as Folder / File can be extended
and admin url may change"""
|
# Code from django ModelAdmin to determine changelist on the fly
opts = obj . _meta
return reverse ( 'admin:%s_%s_changelist' % ( opts . app_label , opts . model_name ) , current_app = self . admin_site . name )
|
def verify_experiment_module ( verbose ) :
"""Perform basic sanity checks on experiment . py ."""
|
ok = True
if not os . path . exists ( "experiment.py" ) :
return False
# Bootstrap a package in a temp directory and make it importable :
temp_package_name = "TEMP_VERIFICATION_PACKAGE"
tmp = tempfile . mkdtemp ( )
clone_dir = os . path . join ( tmp , temp_package_name )
to_ignore = shutil . ignore_patterns ( os . path . join ( ".git" , "*" ) , "*.db" , "snapshots" , "data" , "server.log" )
shutil . copytree ( os . getcwd ( ) , clone_dir , ignore = to_ignore )
initialize_experiment_package ( clone_dir )
from dallinger_experiment import experiment
if clone_dir not in experiment . __file__ :
raise ImportError ( "Checking the wrong experiment.py... aborting." )
classes = inspect . getmembers ( experiment , inspect . isclass )
exps = [ c for c in classes if ( c [ 1 ] . __bases__ [ 0 ] . __name__ in "Experiment" ) ]
# Clean up :
for entry in [ k for k in sys . modules if temp_package_name in k ] :
del sys . modules [ entry ]
# Run checks :
if len ( exps ) == 0 :
log ( "✗ experiment.py does not define an experiment class." , delay = 0 , chevrons = False , verbose = verbose , )
ok = False
elif len ( exps ) == 1 :
log ( "✓ experiment.py defines 1 experiment" , delay = 0 , chevrons = False , verbose = verbose , )
else :
log ( "✗ experiment.py defines more than one experiment class." , delay = 0 , chevrons = False , verbose = verbose , )
ok = False
return ok
|
def ack ( self , delivery_tag , multiple = False ) :
'''Acknowledge delivery of a message . If multiple = True , acknowledge up - to
and including delivery _ tag .'''
|
args = Writer ( )
args . write_longlong ( delivery_tag ) . write_bit ( multiple )
self . send_frame ( MethodFrame ( self . channel_id , 60 , 80 , args ) )
|
def getDirectory ( rh ) :
"""Get the virtual machine ' s directory statements .
Input :
Request Handle with the following properties :
function - ' CMDVM '
subfunction - ' CMD '
userid - userid of the virtual machine
Output :
Request Handle updated with the results .
Return code - 0 : ok , non - zero : error"""
|
rh . printSysLog ( "Enter getVM.getDirectory" )
parms = [ "-T" , rh . userid ]
results = invokeSMCLI ( rh , "Image_Query_DM" , parms )
if results [ 'overallRC' ] == 0 :
results [ 'response' ] = re . sub ( '\*DVHOPT.*' , '' , results [ 'response' ] )
rh . printLn ( "N" , results [ 'response' ] )
else : # SMAPI API failed .
rh . printLn ( "ES" , results [ 'response' ] )
rh . updateResults ( results )
# Use results from invokeSMCLI
rh . printSysLog ( "Exit getVM.getDirectory, rc: " + str ( rh . results [ 'overallRC' ] ) )
return rh . results [ 'overallRC' ]
|
def _setbin_safe ( self , binstring ) :
"""Reset the bitstring to the value given in binstring ."""
|
binstring = tidy_input_string ( binstring )
# remove any 0b if present
binstring = binstring . replace ( '0b' , '' )
self . _setbin_unsafe ( binstring )
|
def compute_tls13_resumption_secret ( self ) :
"""self . handshake _ messages should be ClientHello . . . ClientFinished ."""
|
if self . connection_end == "server" :
hkdf = self . prcs . hkdf
elif self . connection_end == "client" :
hkdf = self . pwcs . hkdf
rs = hkdf . derive_secret ( self . tls13_master_secret , b"resumption master secret" , b"" . join ( self . handshake_messages ) )
self . tls13_derived_secrets [ "resumption_secret" ] = rs
|
def sign_execute_withdrawal ( withdrawal_params , key_pair ) :
"""Function to execute the withdrawal request by signing the transaction generated from the create withdrawal function .
Execution of this function is as follows : :
sign _ execute _ withdrawal ( withdrawal _ params = signable _ params , private _ key = eth _ private _ key )
The expected return result for this function is as follows : :
' id ' : ' 3e1c0802 - b44e - 4681 - a94d - 29c1dec2f518 ' ,
' timestamp ' : 1542090738192,
' signature ' : ' e05a7b7bd30eb85959d75ea634cee06ad35d96502a763ae40 . . . . '
: param withdrawal _ params : Parameters passed from the create withdrawal function to be signed and confirmed .
: type withdrawal _ params : dict
: param key _ pair : The NEO key pair to be used to sign messages for the NEO Blockchain .
: type key _ pair : KeyPair
: return : Dictionary of parameters to be sent to the Switcheo API"""
|
withdrawal_id = withdrawal_params [ 'id' ]
signable_params = { 'id' : withdrawal_id , 'timestamp' : get_epoch_milliseconds ( ) }
encoded_message = encode_message ( signable_params )
execute_params = signable_params . copy ( )
execute_params [ 'signature' ] = sign_message ( encoded_message = encoded_message , private_key_hex = private_key_to_hex ( key_pair = key_pair ) )
return execute_params
|
def outLineReceived ( self , line ) :
"""Handle data via stdout linewise . This is useful if you turned off
buffering .
In your subclass , override this if you want to handle the line as a
protocol line in addition to logging it . ( You may upcall this function
safely . )"""
|
log_debug ( '<<< {name} stdout >>> {line}' , name = self . name , line = self . outFilter ( line ) )
|
def value ( self ) :
"""Returns the current load average as a value between 0.0 ( representing
the * min _ load _ average * value ) and 1.0 ( representing the
* max _ load _ average * value ) . These default to 0.0 and 1.0 respectively ."""
|
load_average_range = self . max_load_average - self . min_load_average
return ( self . load_average - self . min_load_average ) / load_average_range
|
def dumps ( obj , skipkeys = False , ensure_ascii = True , check_circular = True , allow_nan = True , cls = None , indent = None , separators = None , encoding = 'utf-8' , default = None , use_decimal = True , namedtuple_as_object = True , tuple_as_array = True , bigint_as_string = False , sort_keys = False , item_sort_key = None , for_json = False , ignore_nan = False , int_as_string_bitcount = None , iterable_as_array = False , ** kw ) :
"""Serialize ` ` obj ` ` to a JSON formatted ` ` str ` ` .
If ` ` skipkeys ` ` is false then ` ` dict ` ` keys that are not basic types
( ` ` str ` ` , ` ` unicode ` ` , ` ` int ` ` , ` ` long ` ` , ` ` float ` ` , ` ` bool ` ` , ` ` None ` ` )
will be skipped instead of raising a ` ` TypeError ` ` .
If ` ` ensure _ ascii ` ` is false , then the return value will be a
` ` unicode ` ` instance subject to normal Python ` ` str ` ` to ` ` unicode ` `
coercion rules instead of being escaped to an ASCII ` ` str ` ` .
If ` ` check _ circular ` ` is false , then the circular reference check
for container types will be skipped and a circular reference will
result in an ` ` OverflowError ` ` ( or worse ) .
If ` ` allow _ nan ` ` is false , then it will be a ` ` ValueError ` ` to
serialize out of range ` ` float ` ` values ( ` ` nan ` ` , ` ` inf ` ` , ` ` - inf ` ` ) in
strict compliance of the JSON specification , instead of using the
JavaScript equivalents ( ` ` NaN ` ` , ` ` Infinity ` ` , ` ` - Infinity ` ` ) .
If ` ` indent ` ` is a string , then JSON array elements and object members
will be pretty - printed with a newline followed by that string repeated
for each level of nesting . ` ` None ` ` ( the default ) selects the most compact
representation without any newlines . For backwards compatibility with
versions of simplejson earlier than 2.1.0 , an integer is also accepted
and is converted to a string with that many spaces .
If specified , ` ` separators ` ` should be an
` ` ( item _ separator , key _ separator ) ` ` tuple . The default is ` ` ( ' , ' , ' : ' ) ` `
if * indent * is ` ` None ` ` and ` ` ( ' , ' , ' : ' ) ` ` otherwise . To get the most
compact JSON representation , you should specify ` ` ( ' , ' , ' : ' ) ` ` to eliminate
whitespace .
` ` encoding ` ` is the character encoding for str instances , default is UTF - 8.
` ` default ( obj ) ` ` is a function that should return a serializable version
of obj or raise TypeError . The default simply raises TypeError .
If * use _ decimal * is true ( default : ` ` True ` ` ) then decimal . Decimal
will be natively serialized to JSON with full precision .
If * namedtuple _ as _ object * is true ( default : ` ` True ` ` ) ,
: class : ` tuple ` subclasses with ` ` _ asdict ( ) ` ` methods will be encoded
as JSON objects .
If * tuple _ as _ array * is true ( default : ` ` True ` ` ) ,
: class : ` tuple ` ( and subclasses ) will be encoded as JSON arrays .
If * iterable _ as _ array * is true ( default : ` ` False ` ` ) ,
any object not in the above table that implements ` ` _ _ iter _ _ ( ) ` `
will be encoded as a JSON array .
If * bigint _ as _ string * is true ( not the default ) , ints 2 * * 53 and higher
or lower than - 2 * * 53 will be encoded as strings . This is to avoid the
rounding that happens in Javascript otherwise .
If * int _ as _ string _ bitcount * is a positive number ( n ) , then int of size
greater than or equal to 2 * * n or lower than or equal to - 2 * * n will be
encoded as strings .
If specified , * item _ sort _ key * is a callable used to sort the items in
each dictionary . This is useful if you want to sort items other than
in alphabetical order by key . This option takes precendence over
* sort _ keys * .
If * sort _ keys * is true ( default : ` ` False ` ` ) , the output of dictionaries
will be sorted by item .
If * for _ json * is true ( default : ` ` False ` ` ) , objects with a ` ` for _ json ( ) ` `
method will use the return value of that method for encoding as JSON
instead of the object .
If * ignore _ nan * is true ( default : ` ` False ` ` ) , then out of range
: class : ` float ` values ( ` ` nan ` ` , ` ` inf ` ` , ` ` - inf ` ` ) will be serialized as
` ` null ` ` in compliance with the ECMA - 262 specification . If true , this will
override * allow _ nan * .
To use a custom ` ` JSONEncoder ` ` subclass ( e . g . one that overrides the
` ` . default ( ) ` ` method to serialize additional types ) , specify it with
the ` ` cls ` ` kwarg . NOTE : You should use * default * instead of subclassing
whenever possible ."""
|
# cached encoder
if ( not skipkeys and ensure_ascii and check_circular and allow_nan and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and use_decimal and namedtuple_as_object and tuple_as_array and not iterable_as_array and not bigint_as_string and not sort_keys and not item_sort_key and not for_json and not ignore_nan and int_as_string_bitcount is None and not kw ) :
return _default_encoder . encode ( obj )
if cls is None :
cls = JSONEncoder
return cls ( skipkeys = skipkeys , ensure_ascii = ensure_ascii , check_circular = check_circular , allow_nan = allow_nan , indent = indent , separators = separators , encoding = encoding , default = default , use_decimal = use_decimal , namedtuple_as_object = namedtuple_as_object , tuple_as_array = tuple_as_array , iterable_as_array = iterable_as_array , bigint_as_string = bigint_as_string , sort_keys = sort_keys , item_sort_key = item_sort_key , for_json = for_json , ignore_nan = ignore_nan , int_as_string_bitcount = int_as_string_bitcount , ** kw ) . encode ( obj )
|
def __init_from_csc ( self , csc , params_str , ref_dataset ) :
"""Initialize data from a CSC matrix ."""
|
if len ( csc . indices ) != len ( csc . data ) :
raise ValueError ( 'Length mismatch: {} vs {}' . format ( len ( csc . indices ) , len ( csc . data ) ) )
self . handle = ctypes . c_void_p ( )
ptr_indptr , type_ptr_indptr , __ = c_int_array ( csc . indptr )
ptr_data , type_ptr_data , _ = c_float_array ( csc . data )
assert csc . shape [ 0 ] <= MAX_INT32
csc . indices = csc . indices . astype ( np . int32 , copy = False )
_safe_call ( _LIB . LGBM_DatasetCreateFromCSC ( ptr_indptr , ctypes . c_int ( type_ptr_indptr ) , csc . indices . ctypes . data_as ( ctypes . POINTER ( ctypes . c_int32 ) ) , ptr_data , ctypes . c_int ( type_ptr_data ) , ctypes . c_int64 ( len ( csc . indptr ) ) , ctypes . c_int64 ( len ( csc . data ) ) , ctypes . c_int64 ( csc . shape [ 0 ] ) , c_str ( params_str ) , ref_dataset , ctypes . byref ( self . handle ) ) )
return self
|
def broadcast ( * sinks_ ) :
"""The | broadcast | decorator creates a | push | object that receives a
message by ` ` yield ` ` and then sends this message on to all the given sinks .
. . | broadcast | replace : : : py : func : ` broadcast `"""
|
@ push
def bc ( ) :
sinks = [ s ( ) for s in sinks_ ]
while True :
msg = yield
for s in sinks :
s . send ( msg )
return bc
|
def equal ( lhs , rhs ) :
"""Returns the result of element - wise * * equal to * * ( = = ) comparison operation with
broadcasting .
For each element in input arrays , return 1 ( true ) if corresponding elements are same ,
otherwise return 0 ( false ) .
Equivalent to ` ` lhs = = rhs ` ` and ` ` mx . nd . broadcast _ equal ( lhs , rhs ) ` ` .
. . note : :
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape .
Parameters
lhs : scalar or mxnet . ndarray . array
First array to be compared .
rhs : scalar or mxnet . ndarray . array
Second array to be compared . If ` ` lhs . shape ! = rhs . shape ` ` , they must be
broadcastable to a common shape .
Returns
NDArray
Output array of boolean values .
Examples
> > > x = mx . nd . ones ( ( 2,3 ) )
> > > y = mx . nd . arange ( 2 ) . reshape ( ( 2,1 ) )
> > > z = mx . nd . arange ( 2 ) . reshape ( ( 1,2 ) )
> > > x . asnumpy ( )
array ( [ [ 1 . , 1 . , 1 . ] ,
[ 1 . , 1 . , 1 . ] ] , dtype = float32)
> > > y . asnumpy ( )
array ( [ [ 0 . ] ,
[ 1 . ] ] , dtype = float32)
> > > z . asnumpy ( )
array ( [ [ 0 . , 1 . ] ] , dtype = float32)
> > > ( x = = 1 ) . asnumpy ( )
array ( [ [ 1 . , 1 . , 1 . ] ,
[ 1 . , 1 . , 1 . ] ] , dtype = float32)
> > > ( x = = y ) . asnumpy ( )
array ( [ [ 0 . , 0 . , 0 . ] ,
[ 1 . , 1 . , 1 . ] ] , dtype = float32)
> > > mx . nd . equal ( x , y ) . asnumpy ( )
array ( [ [ 0 . , 0 . , 0 . ] ,
[ 1 . , 1 . , 1 . ] ] , dtype = float32)
> > > ( z = = y ) . asnumpy ( )
array ( [ [ 1 . , 0 . ] ,
[ 0 . , 1 . ] ] , dtype = float32)"""
|
# pylint : disable = no - member , protected - access
return _ufunc_helper ( lhs , rhs , op . broadcast_equal , lambda x , y : 1 if x == y else 0 , _internal . _equal_scalar , None )
|
def configure ( ctx , integration , args , show_args , editable ) :
"""Configure an integration with default parameters .
You can still provide one - off integration arguments to : func : ` honeycomb . commands . service . run ` if required ."""
|
home = ctx . obj [ "HOME" ]
integration_path = plugin_utils . get_plugin_path ( home , defs . INTEGRATIONS , integration , editable )
logger . debug ( "running command %s (%s)" , ctx . command . name , ctx . params , extra = { "command" : ctx . command . name , "params" : ctx . params } )
logger . debug ( "loading {} ({})" . format ( integration , integration_path ) )
integration = register_integration ( integration_path )
if show_args :
return plugin_utils . print_plugin_args ( integration_path )
# get our integration class instance
integration_args = plugin_utils . parse_plugin_args ( args , config_utils . get_config_parameters ( integration_path ) )
args_file = os . path . join ( integration_path , defs . ARGS_JSON )
with open ( args_file , "w" ) as f :
data = json . dumps ( integration_args )
logger . debug ( "writing %s to %s" , data , args_file )
f . write ( json . dumps ( integration_args ) )
click . secho ( "[*] {0} has been configured, make sure to test it with `honeycomb integration test {0}`" . format ( integration . name ) )
|
def DictOf ( name , * fields ) :
"""This function creates a dict type with the specified name and fields .
> > > from pyws . functions . args import DictOf , Field
> > > dct = DictOf (
. . . ' HelloWorldDict ' , Field ( ' hello ' , str ) , Field ( ' hello ' , int ) )
> > > issubclass ( dct , Dict )
True
> > > dct . _ _ name _ _
' HelloWorldDict '
> > > len ( dct . fields )"""
|
ret = type ( name , ( Dict , ) , { 'fields' : [ ] } )
# noinspection PyUnresolvedReferences
ret . add_fields ( * fields )
return ret
|
from typing import List , Tuple , Union
def count_frequency_of_elements ( t : Tuple [ Union [ str , int ] ] , l : List [ Union [ str , int ] ] ) -> int :
"""A function to count the occurrences of all elements from a list within a tuple .
Args :
t ( Tuple [ Union [ str , int ] ] ) : A tuple containing elements .
l ( List [ union [ str , int ] ] ) : A list of elements to be counted within the tuple .
Returns :
int : The count of occurrences of all list elements in the tuple .
Examples :
> > > count _ frequency _ of _ elements ( ( ' a ' , ' a ' , ' c ' , ' b ' , ' d ' ) , [ ' a ' , ' b ' ] )
> > > count _ frequency _ of _ elements ( ( 1 , 2 , 3 , 1 , 4 , 6 , 7 , 1 , 4 ) , [ 1 , 4 , 7 ] )
> > > count _ frequency _ of _ elements ( ( 1 , 2 , 3 , 4 , 5 , 6 ) , [ 1 , 2 ] )"""
|
total_count = 0
for element in t :
if element in l :
total_count += 1
return total_count
|
def remove_node ( self , node , stop = False ) :
"""Removes a node from the cluster .
By default , it doesn ' t also stop the node , just remove from
the known hosts of this cluster .
: param node : node to remove
: type node : : py : class : ` Node `
: param stop : Stop the node
: type stop : bool"""
|
if node . kind not in self . nodes :
raise NodeNotFound ( "Unable to remove node %s: invalid node type `%s`." , node . name , node . kind )
else :
try :
index = self . nodes [ node . kind ] . index ( node )
if self . nodes [ node . kind ] [ index ] :
del self . nodes [ node . kind ] [ index ]
if stop :
node . stop ( )
self . _naming_policy . free ( node . kind , node . name )
self . repository . save_or_update ( self )
except ValueError :
raise NodeNotFound ( "Node %s not found in cluster" % node . name )
|
def list_upgrades ( refresh = True , ** kwargs ) : # pylint : disable = unused - argument
'''List all available package upgrades .
CLI Example :
. . code - block : : bash
salt ' * ' pkg . list _ upgrades'''
|
ret = { }
if salt . utils . data . is_true ( refresh ) :
refresh_db ( )
cmd = [ 'opkg' , 'list-upgradable' ]
call = __salt__ [ 'cmd.run_all' ] ( cmd , output_loglevel = 'trace' , python_shell = False )
if call [ 'retcode' ] != 0 :
comment = ''
if 'stderr' in call :
comment += call [ 'stderr' ]
if 'stdout' in call :
comment += call [ 'stdout' ]
raise CommandExecutionError ( comment )
else :
out = call [ 'stdout' ]
for line in out . splitlines ( ) :
name , _oldversion , newversion = line . split ( ' - ' )
ret [ name ] = newversion
return ret
|
def _GetPathSegmentIndexForValueWeights ( self , value_weights ) :
"""Retrieves the index of the path segment based on value weights .
Args :
value _ weights : the value weights object ( instance of _ PathSegmentWeights ) .
Returns :
An integer containing the path segment index .
Raises :
RuntimeError : is no path segment index can be found ."""
|
largest_weight = value_weights . GetLargestWeight ( )
if largest_weight > 0 :
value_weight_indexes = value_weights . GetIndexesForWeight ( largest_weight )
else :
value_weight_indexes = [ ]
if value_weight_indexes :
path_segment_index = value_weight_indexes [ 0 ]
else :
path_segment_index = value_weights . GetFirstAvailableIndex ( )
if path_segment_index is None :
raise RuntimeError ( 'No path segment index found.' )
return path_segment_index
|
def start_instance ( self , key_name , public_key_path , private_key_path , security_group , flavor , image_id , image_userdata , username = None , node_name = None , network_ids = None , price = None , timeout = None , boot_disk_device = None , boot_disk_size = None , boot_disk_type = None , boot_disk_iops = None , placement_group = None , ** kwargs ) :
"""Starts a new instance on the cloud using the given properties .
The following tasks are done to start an instance :
* establish a connection to the cloud web service
* check ssh keypair and upload it if it does not yet exist . This is
a locked process , since this function might be called in multiple
threads and we only want the key to be stored once .
* check if the security group exists
* run the instance with the given properties
: param str key _ name : name of the ssh key to connect
: param str public _ key _ path : path to ssh public key
: param str private _ key _ path : path to ssh private key
: param str security _ group : firewall rule definition to apply on the
instance
: param str flavor : machine type to use for the instance
: param str image _ id : image type ( os ) to use for the instance
: param str image _ userdata : command to execute after startup
: param str username : username for the given ssh key , default None
: param float price : Spot instance price ( if 0 , do not use spot instances ) .
: param int price : Timeout ( in seconds ) waiting for spot instances ;
only used if price > 0.
: param str boot _ disk _ device : Root volume device path if not / dev / sda1
: param str boot _ disk _ size : Target size , in GiB , for the root volume
: param str boot _ disk _ type : Type of root volume ( standard , gp2 , io1)
: param str boot _ disk _ iops : Provisioned IOPS for the root volume
: param str placement _ group : Enable low - latency networking between
compute nodes .
: return : str - instance id of the started instance"""
|
connection = self . _connect ( )
log . debug ( "Checking keypair `%s`." , key_name )
# the ` _ check _ keypair ` method has to be called within a lock ,
# since it will upload the key if it does not exist and if this
# happens for every node at the same time ec2 will throw an error
# message ( see issue # 79)
with BotoCloudProvider . __node_start_lock :
self . _check_keypair ( key_name , public_key_path , private_key_path )
log . debug ( "Checking security group `%s`." , security_group )
security_group_id = self . _check_security_group ( security_group )
# image _ id = self . _ find _ image _ id ( image _ id )
if network_ids :
interfaces = [ ]
for subnet in network_ids . split ( ',' ) :
subnet_id = self . _check_subnet ( subnet )
interfaces . append ( boto . ec2 . networkinterface . NetworkInterfaceSpecification ( subnet_id = subnet_id , groups = [ security_group_id ] , associate_public_ip_address = self . request_floating_ip ) )
interfaces = boto . ec2 . networkinterface . NetworkInterfaceCollection ( * interfaces )
security_groups = [ ]
else :
interfaces = None
security_groups = [ security_group ]
# get defaults for ` price ` and ` timeout ` from class instance
if price is None :
price = self . price
if timeout is None :
timeout = self . timeout
if boot_disk_size :
dev_root = boto . ec2 . blockdevicemapping . BlockDeviceType ( )
dev_root . size = int ( boot_disk_size )
dev_root . delete_on_termination = True
if boot_disk_type :
dev_root . volume_type = boot_disk_type
if boot_disk_iops :
dev_root . iops = int ( boot_disk_iops )
bdm = boto . ec2 . blockdevicemapping . BlockDeviceMapping ( )
dev_name = boot_disk_device if boot_disk_device else "/dev/sda1"
bdm [ dev_name ] = dev_root
else :
bdm = None
try : # start spot instance if bid is specified
if price :
log . info ( "Requesting spot instance with price `%s` ..." , price )
request = connection . request_spot_instances ( price , image_id , key_name = key_name , security_groups = security_groups , instance_type = flavor , user_data = image_userdata , network_interfaces = interfaces , placement_group = placement_group , block_device_map = bdm , instance_profile_name = self . _instance_profile ) [ - 1 ]
# wait until spot request is fullfilled ( will wait
# forever if no timeout is given )
start_time = time . time ( )
timeout = ( float ( timeout ) if timeout else 0 )
log . info ( "Waiting for spot instance (will time out in %d seconds) ..." , timeout )
while request . status . code != 'fulfilled' :
if timeout and time . time ( ) - start_time > timeout :
request . cancel ( )
raise RuntimeError ( 'spot instance timed out' )
time . sleep ( self . POLL_INTERVAL )
# update request status
request = connection . get_all_spot_instance_requests ( request_ids = request . id ) [ - 1 ]
else :
reservation = connection . run_instances ( image_id , key_name = key_name , security_groups = security_groups , instance_type = flavor , user_data = image_userdata , network_interfaces = interfaces , placement_group = placement_group , block_device_map = bdm , instance_profile_name = self . _instance_profile )
except Exception as ex :
log . error ( "Error starting instance: %s" , ex )
if "TooManyInstances" in ex :
raise ClusterError ( ex )
else :
raise InstanceError ( ex )
if price :
vm = connection . get_only_instances ( instance_ids = [ request . instance_id ] ) [ - 1 ]
else :
vm = reservation . instances [ - 1 ]
vm . add_tag ( "Name" , node_name )
# cache instance object locally for faster access later on
self . _instances [ vm . id ] = vm
return vm . id
|
def icon ( self ) :
"""Returns the URL of a recommended icon for display ."""
|
if self . _icon == '' and self . details != None and 'icon' in self . details :
self . _icon = self . details [ 'icon' ]
return self . _icon
|
def get_or_load_name ( self , type_ , id_ , method ) :
"""read - through cache for a type of object ' s name .
If we don ' t have a cached name for this type / id , then we will query the
live Koji server and store the value before returning .
: param type _ : str , " user " or " tag "
: param id _ : int , eg . 123456
: param method : function to call if this value is not in the cache .
This method must return a deferred that fires with an
object with a " . name " attribute .
: returns : deferred that when fired returns a str , or None"""
|
name = self . get_name ( type_ , id_ )
if name is not None :
defer . returnValue ( name )
instance = yield method ( id_ )
if instance is None :
defer . returnValue ( None )
self . put_name ( type_ , id_ , instance . name )
defer . returnValue ( instance . name )
|
def turn_physical_off ( self ) :
"""NAME :
turn _ physical _ off
PURPOSE :
turn off automatic returning of outputs in physical units
INPUT :
( none )
OUTPUT :
( none )
HISTORY :
2014-06-17 - Written - Bovy ( IAS )"""
|
self . _roSet = False
self . _voSet = False
self . _orb . turn_physical_off ( )
|
def object_factory ( api , api_version , kind ) :
"""Dynamically builds a Python class for the given Kubernetes object in an API .
For example :
api = pykube . HTTPClient ( . . . )
NetworkPolicy = pykube . object _ factory ( api , " networking . k8s . io / v1 " , " NetworkPolicy " )
This enables construction of any Kubernetes object kind without explicit support
from pykube .
Currently , the HTTPClient passed to this function will not be bound to the returned type .
It is planned to fix this , but in the mean time pass it as you would normally ."""
|
resource_list = api . resource_list ( api_version )
resource = next ( ( resource for resource in resource_list [ "resources" ] if resource [ "kind" ] == kind ) , None )
base = NamespacedAPIObject if resource [ "namespaced" ] else APIObject
return type ( kind , ( base , ) , { "version" : api_version , "endpoint" : resource [ "name" ] , "kind" : kind } )
|
def update ( self , move ) :
"""Updates position by applying selected move
: type : move : Move"""
|
if move is None :
raise TypeError ( "Move cannot be type None" )
if self . king_loc_dict is not None and isinstance ( move . piece , King ) :
self . king_loc_dict [ move . color ] = move . end_loc
# Invalidates en - passant
for square in self :
pawn = square
if isinstance ( pawn , Pawn ) :
pawn . just_moved_two_steps = False
# Sets King and Rook has _ moved property to True is piece has moved
if type ( move . piece ) is King or type ( move . piece ) is Rook :
move . piece . has_moved = True
elif move . status == notation_const . MOVEMENT and isinstance ( move . piece , Pawn ) and fabs ( move . end_loc . rank - move . start_loc . rank ) == 2 :
move . piece . just_moved_two_steps = True
if move . status == notation_const . KING_SIDE_CASTLE :
self . move_piece ( Location ( move . end_loc . rank , 7 ) , Location ( move . end_loc . rank , 5 ) )
self . piece_at_square ( Location ( move . end_loc . rank , 5 ) ) . has_moved = True
elif move . status == notation_const . QUEEN_SIDE_CASTLE :
self . move_piece ( Location ( move . end_loc . rank , 0 ) , Location ( move . end_loc . rank , 3 ) )
self . piece_at_square ( Location ( move . end_loc . rank , 3 ) ) . has_moved = True
elif move . status == notation_const . EN_PASSANT :
self . remove_piece_at_square ( Location ( move . start_loc . rank , move . end_loc . file ) )
elif move . status == notation_const . PROMOTE or move . status == notation_const . CAPTURE_AND_PROMOTE :
try :
self . remove_piece_at_square ( move . start_loc )
self . place_piece_at_square ( move . promoted_to_piece ( move . color , move . end_loc ) , move . end_loc )
except TypeError as e :
raise ValueError ( "Promoted to piece cannot be None in Move {}\n{}" . format ( repr ( move ) , e ) )
return
self . move_piece ( move . piece . location , move . end_loc )
|
def get_link_page_text ( link_page ) :
"""Construct the dialog box to display a list of links to the user ."""
|
text = ''
for i , link in enumerate ( link_page ) :
capped_link_text = ( link [ 'text' ] if len ( link [ 'text' ] ) <= 20 else link [ 'text' ] [ : 19 ] + '…' )
text += '[{}] [{}]({})\n' . format ( i , capped_link_text , link [ 'href' ] )
return text
|
def drop ( self , force = False ) :
"""Drop the database
Parameters
drop : boolean , default False
Drop any objects if they exist , and do not fail if the databaes does
not exist"""
|
self . client . drop_database ( self . name , force = force )
|
def _read_para_rvs_hmac ( self , code , cbit , clen , * , desc , length , version ) :
"""Read HIP RVS _ HMAC parameter .
Structure of HIP RVS _ HMAC parameter [ RFC 8004 ] :
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| HMAC |
| | Padding |
Octets Bits Name Description
0 0 rvs _ hmac . type Parameter Type
1 15 rvs _ hmac . critical Critical Bit
2 16 rvs _ hmac . length Length of Contents
4 32 rvs _ hmac . hmac HMAC
? ? - Padding"""
|
_hmac = self . _read_fileng ( clen )
rvs_hmac = dict ( type = desc , critical = cbit , length = clen , hmac = _hmac , )
_plen = length - clen
if _plen :
self . _read_fileng ( _plen )
return rvs_hmac
|
def _iter_names ( self ) :
"""Generate a key / value pair for each name in this table . The key is a
( platform _ id , name _ id ) 2 - tuple and the value is the unicode text
corresponding to that key ."""
|
table_format , count , strings_offset = self . _table_header
table_bytes = self . _table_bytes
for idx in range ( count ) :
platform_id , name_id , name = self . _read_name ( table_bytes , idx , strings_offset )
if name is None :
continue
yield ( ( platform_id , name_id ) , name )
|
def _optimise_barcodes ( self , data , min_bar_height = 20 , min_bar_count = 100 , max_gap_size = 30 , min_percent_white = 0.2 , max_percent_white = 0.8 , ** kwargs ) :
"""min _ bar _ height = Minimum height of black bars in px . Set this too
low and it might pick up text and data matrices ,
too high and it might pick up borders , tables , etc .
min _ bar _ count = Minimum number of parallel black bars before a
pattern is considered a potential barcode .
max _ gap _ size = Biggest white gap in px allowed between black bars .
This is only important if you have multiple
barcodes next to each other .
min _ percent _ white = Minimum percentage of white bars between black
bars . This helps to ignore solid rectangles .
max _ percent _ white = Maximum percentage of white bars between black
bars . This helps to ignore solid rectangles ."""
|
re_bars = re . compile ( r'1{%s,}' % min_bar_height )
bars = { }
for i , line in enumerate ( data ) :
for match in re_bars . finditer ( line ) :
try :
bars [ match . span ( ) ] . append ( i )
except KeyError :
bars [ match . span ( ) ] = [ i ]
grouped_bars = [ ]
for span , seen_at in bars . items ( ) :
group = [ ]
for coords in seen_at :
if group and coords - group [ - 1 ] > max_gap_size :
grouped_bars . append ( ( span , group ) )
group = [ ]
group . append ( coords )
grouped_bars . append ( ( span , group ) )
suspected_barcodes = [ ]
for span , seen_at in grouped_bars :
if len ( seen_at ) < min_bar_count :
continue
pc_white = len ( seen_at ) / float ( seen_at [ - 1 ] - seen_at [ 0 ] )
if pc_white >= min_percent_white and pc_white <= max_percent_white :
suspected_barcodes . append ( ( span , seen_at ) )
for span , seen_at in suspected_barcodes :
barcode = [ ]
for line in data [ seen_at [ 0 ] : seen_at [ - 1 ] + 1 ] :
barcode . append ( line [ span [ 0 ] ] )
barcode = '' . join ( barcode )
# Do the actual optimisation
barcode = self . _optimise_barcode ( barcode )
barcode = list ( barcode )
barcode . reverse ( )
width = span [ 1 ] - span [ 0 ]
for i in range ( seen_at [ 0 ] , seen_at [ - 1 ] + 1 ) :
line = data [ i ]
line = ( line [ : span [ 0 ] ] + ( barcode . pop ( ) * width ) + line [ span [ 1 ] : ] )
data [ i ] = line
return data
|
def get_brain ( brain_or_object ) :
"""Return a ZCatalog brain for the object
: param brain _ or _ object : A single catalog brain or content object
: type brain _ or _ object : ATContentType / DexterityContentType / CatalogBrain
: returns : True if the object is a catalog brain
: rtype : bool"""
|
if is_brain ( brain_or_object ) :
return brain_or_object
if is_root ( brain_or_object ) :
return brain_or_object
# fetch the brain by UID
uid = get_uid ( brain_or_object )
uc = get_tool ( "uid_catalog" )
results = uc ( { "UID" : uid } ) or search ( query = { 'UID' : uid } )
if len ( results ) == 0 :
return None
if len ( results ) > 1 :
fail ( 500 , "More than one object with UID={} found in portal_catalog" . format ( uid ) )
return results [ 0 ]
|
def term_with_coeff ( term , coeff ) :
"""Change the coefficient of a PauliTerm .
: param PauliTerm term : A PauliTerm object
: param Number coeff : The coefficient to set on the PauliTerm
: returns : A new PauliTerm that duplicates term but sets coeff
: rtype : PauliTerm"""
|
if not isinstance ( coeff , Number ) :
raise ValueError ( "coeff must be a Number" )
new_pauli = term . copy ( )
# We cast to a complex number to ensure that internally the coefficients remain compatible .
new_pauli . coefficient = complex ( coeff )
return new_pauli
|
def print_table ( * args , ** kwargs ) :
"""if csv :
import csv
t = csv . writer ( sys . stdout , delimiter = " ; " )
t . writerow ( header )
else :
t = PrettyTable ( header )
t . align = " r "
t . align [ " details " ] = " l " """
|
t = format_table ( * args , ** kwargs )
click . echo ( t )
|
def longest_existing_path ( _path ) :
r"""Returns the longest root of _ path that exists
Args :
_ path ( str ) : path string
Returns :
str : _ path - path string
CommandLine :
python - m utool . util _ path - - exec - longest _ existing _ path
Example :
> > > # ENABLE _ DOCTEST
> > > from utool . util _ path import * # NOQA
> > > import utool as ut
> > > target = dirname ( ut . _ _ file _ _ )
> > > _ path = join ( target , ' nonexist / foobar ' )
> > > existing _ path = longest _ existing _ path ( _ path )
> > > result = ( ' existing _ path = % s ' % ( str ( existing _ path ) , ) )
> > > print ( result )
> > > assert existing _ path = = target"""
|
existing_path = _path
while True :
_path_new = os . path . dirname ( existing_path )
if exists ( _path_new ) :
existing_path = _path_new
break
if _path_new == existing_path :
print ( '!!! [utool] This is a very illformated path indeed.' )
existing_path = ''
break
existing_path = _path_new
return existing_path
|
def _load_sym ( sym , logger = logging ) :
"""Given a str as a path the symbol . json file or a symbol , returns a Symbol object ."""
|
if isinstance ( sym , str ) : # sym is a symbol file path
cur_path = os . path . dirname ( os . path . realpath ( __file__ ) )
symbol_file_path = os . path . join ( cur_path , sym )
logger . info ( 'Loading symbol from file %s' % symbol_file_path )
return sym_load ( symbol_file_path )
elif isinstance ( sym , Symbol ) :
return sym
else :
raise ValueError ( '_load_sym only accepts Symbol or path to the symbol file,' ' while received type %s' % str ( type ( sym ) ) )
|
def zstack_proxy_iterator ( self , s = 0 , c = 0 , t = 0 ) :
"""Return iterator of : class : ` jicimagelib . image . ProxyImage ` instances in the zstack .
: param s : series
: param c : channel
: param t : timepoint
: returns : zstack : class : ` jicimagelib . image . ProxyImage ` iterator"""
|
for proxy_image in self :
if proxy_image . in_zstack ( s = s , c = c , t = t ) :
yield proxy_image
|
def build_table ( self , msg ) :
"""Format each row of the table ."""
|
rows = msg . split ( '\n' )
if rows :
header_row , * body_rows = rows
self . create_md_row ( header_row , True )
for row in body_rows :
self . create_md_row ( row )
|
def best_match ( self , target , choices ) :
"""Return the best match ."""
|
all = self . all_matches
try :
best = next ( all ( target , choices , group = False ) )
return best
except StopIteration :
pass
|
def execute ( self ) :
"""Starts a new cluster ."""
|
cluster_template = self . params . cluster
if self . params . cluster_name :
cluster_name = self . params . cluster_name
else :
cluster_name = self . params . cluster
creator = make_creator ( self . params . config , storage_path = self . params . storage )
if cluster_template not in creator . cluster_conf :
raise ClusterNotFound ( "No cluster template named `{0}`" . format ( cluster_template ) )
# possibly overwrite node mix from config
cluster_nodes_conf = creator . cluster_conf [ cluster_template ] [ 'nodes' ]
for kind , num in self . params . nodes_override . items ( ) :
if kind not in cluster_nodes_conf :
raise ConfigurationError ( "No node group `{kind}` defined" " in cluster template `{template}`" . format ( kind = kind , template = cluster_template ) )
cluster_nodes_conf [ kind ] [ 'num' ] = num
# First , check if the cluster is already created .
try :
cluster = creator . load_cluster ( cluster_name )
except ClusterNotFound :
try :
cluster = creator . create_cluster ( cluster_template , cluster_name )
except ConfigurationError as err :
log . error ( "Starting cluster %s: %s" , cluster_template , err )
return
try :
print ( "Starting cluster `{0}` with:" . format ( cluster . name ) )
for cls in cluster . nodes :
print ( "* {0:d} {1} nodes." . format ( len ( cluster . nodes [ cls ] ) , cls ) )
print ( "(This may take a while...)" )
min_nodes = dict ( ( kind , cluster_nodes_conf [ kind ] [ 'min_num' ] ) for kind in cluster_nodes_conf )
cluster . start ( min_nodes , self . params . max_concurrent_requests )
if self . params . no_setup :
print ( "NOT configuring the cluster as requested." )
else :
print ( "Configuring the cluster ..." )
print ( "(this too may take a while)" )
ok = cluster . setup ( )
if ok :
print ( "\nYour cluster `{0}` is ready!" . format ( cluster . name ) )
else :
print ( "\nWARNING: YOUR CLUSTER `{0}` IS NOT READY YET!" . format ( cluster . name ) )
print ( cluster_summary ( cluster ) )
except ( KeyError , ImageError , SecurityGroupError , ClusterError ) as err :
log . error ( "Could not start cluster `%s`: %s" , cluster . name , err )
raise
|
def make_box_pixel_mask_from_col_row ( column , row , default = 0 , value = 1 ) :
'''Generate box shaped mask from column and row lists . Takes the minimum and maximum value from each list .
Parameters
column : iterable , int
List of colums values .
row : iterable , int
List of row values .
default : int
Value of pixels that are not selected by the mask .
value : int
Value of pixels that are selected by the mask .
Returns
numpy . ndarray'''
|
# FE columns and rows start from 1
col_array = np . array ( column ) - 1
row_array = np . array ( row ) - 1
if np . any ( col_array >= 80 ) or np . any ( col_array < 0 ) or np . any ( row_array >= 336 ) or np . any ( row_array < 0 ) :
raise ValueError ( 'Column and/or row out of range' )
shape = ( 80 , 336 )
mask = np . full ( shape , default , dtype = np . uint8 )
if column and row :
mask [ col_array . min ( ) : col_array . max ( ) + 1 , row_array . min ( ) : row_array . max ( ) + 1 ] = value
# advanced indexing
return mask
|
def findAll ( self , pattern ) :
"""Searches for an image pattern in the given region
Returns ` ` Match ` ` object if ` ` pattern ` ` exists , empty array otherwise ( does not
throw exception ) . Sikuli supports OCR search with a text parameter . This does not ( yet ) ."""
|
find_time = time . time ( )
r = self . clipRegionToScreen ( )
if r is None :
raise ValueError ( "Region outside all visible screens" )
return None
seconds = self . autoWaitTimeout
if not isinstance ( pattern , Pattern ) :
if not isinstance ( pattern , basestring ) :
raise TypeError ( "find expected a string [image path] or Pattern object" )
pattern = Pattern ( pattern )
needle = cv2 . imread ( pattern . path )
if needle is None :
raise ValueError ( "Unable to load image '{}'" . format ( pattern . path ) )
needle_height , needle_width , needle_channels = needle . shape
positions = [ ]
timeout = time . time ( ) + seconds
# Check TemplateMatcher for valid matches
matches = [ ]
while time . time ( ) < timeout and len ( matches ) == 0 :
matcher = TemplateMatcher ( r . getBitmap ( ) )
matches = matcher . findAllMatches ( needle , pattern . similarity )
time . sleep ( 1 / self . _defaultScanRate if self . _defaultScanRate is not None else 1 / Settings . WaitScanRate )
if len ( matches ) == 0 :
Debug . info ( "Couldn't find '{}' with enough similarity." . format ( pattern . path ) )
return iter ( [ ] )
# Matches found ! Turn them into Match objects
lastMatches = [ ]
for match in matches :
position , confidence = match
x , y = position
lastMatches . append ( Match ( confidence , pattern . offset , ( ( x + self . x , y + self . y ) , ( needle_width , needle_height ) ) ) )
self . _lastMatches = iter ( lastMatches )
Debug . info ( "Found match(es) for pattern '{}' at similarity ({})" . format ( pattern . path , pattern . similarity ) )
self . _lastMatchTime = ( time . time ( ) - find_time ) * 1000
# Capture find time in milliseconds
return self . _lastMatches
|
def _apply_krauss_single_qubit ( krauss : Union [ Tuple [ Any ] , Sequence [ Any ] ] , args : 'ApplyChannelArgs' ) -> np . ndarray :
"""Use slicing to apply single qubit channel ."""
|
zero_left = linalg . slice_for_qubits_equal_to ( args . left_axes , 0 )
one_left = linalg . slice_for_qubits_equal_to ( args . left_axes , 1 )
zero_right = linalg . slice_for_qubits_equal_to ( args . right_axes , 0 )
one_right = linalg . slice_for_qubits_equal_to ( args . right_axes , 1 )
for krauss_op in krauss :
np . copyto ( dst = args . target_tensor , src = args . auxiliary_buffer0 )
linalg . apply_matrix_to_slices ( args . target_tensor , krauss_op , [ zero_left , one_left ] , out = args . auxiliary_buffer1 )
# No need to transpose as we are acting on the tensor
# representation of matrix , so transpose is done for us .
linalg . apply_matrix_to_slices ( args . auxiliary_buffer1 , np . conjugate ( krauss_op ) , [ zero_right , one_right ] , out = args . target_tensor )
args . out_buffer += args . target_tensor
return args . out_buffer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.