signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get_client_rect ( self ) :
"""Get the window ' s client area coordinates in the desktop .
@ rtype : L { win32 . Rect }
@ return : Rectangle occupied by the window ' s client area in the desktop .
@ raise WindowsError : An error occured while processing this request .""" | cr = win32 . GetClientRect ( self . get_handle ( ) )
cr . left , cr . top = self . client_to_screen ( cr . left , cr . top )
cr . right , cr . bottom = self . client_to_screen ( cr . right , cr . bottom )
return cr |
def new ( self , bootstrap_with = None , use_timer = False , incr = False , with_proof = False ) :
"""Actual constructor of the solver .""" | assert not incr or not with_proof , 'Incremental mode and proof tracing cannot be set together.'
if not self . glucose :
self . glucose = pysolvers . glucose41_new ( )
if bootstrap_with :
for clause in bootstrap_with :
self . add_clause ( clause )
self . use_timer = use_timer
self . call_time = 0.0
# time spent for the last call to oracle
self . accu_time = 0.0
# time accumulated for all calls to oracle
if incr :
pysolvers . glucose41_setincr ( self . glucose )
if with_proof :
self . prfile = tempfile . TemporaryFile ( )
pysolvers . glucose41_tracepr ( self . glucose , self . prfile ) |
def get_asset_content_lookup_session ( self , proxy = None ) :
"""Gets the ` ` OsidSession ` ` associated with the asset content lookup service .
return : ( osid . repository . AssetLookupSession ) - the new
` ` AssetLookupSession ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ asset _ lookup ( ) ` ` is ` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ asset _ lookup ( ) ` ` is ` ` true ` ` . *""" | return AssetContentLookupSession ( self . _provider_manager . get_asset_content_lookup_session ( proxy ) , self . _config_map ) |
def create_unique_autosave_filename ( self , filename , autosave_dir ) :
"""Create unique autosave file name for specified file name .
Args :
filename ( str ) : original file name
autosave _ dir ( str ) : directory in which autosave files are stored""" | basename = osp . basename ( filename )
autosave_filename = osp . join ( autosave_dir , basename )
if autosave_filename in self . name_mapping . values ( ) :
counter = 0
root , ext = osp . splitext ( basename )
while autosave_filename in self . name_mapping . values ( ) :
counter += 1
autosave_basename = '{}-{}{}' . format ( root , counter , ext )
autosave_filename = osp . join ( autosave_dir , autosave_basename )
return autosave_filename |
def read_plink ( file_prefix , verbose = True ) :
r"""Read PLINK files into Pandas data frames .
Parameters
file _ prefix : str
Path prefix to the set of PLINK files . It supports loading many BED
files at once using globstrings wildcard .
verbose : bool
` ` True ` ` for progress information ; ` ` False ` ` otherwise .
Returns
: class : ` pandas . DataFrame `
Alleles .
: class : ` pandas . DataFrame `
Samples .
: class : ` numpy . ndarray `
Genotype .
Examples
We have shipped this package with an example so can load and inspect by
doing
. . doctest : :
> > > from pandas _ plink import read _ plink
> > > from pandas _ plink import example _ file _ prefix
> > > ( bim , fam , bed ) = read _ plink ( example _ file _ prefix ( ) , verbose = False )
> > > print ( bim . head ( ) ) # doctest : + NORMALIZE _ WHITESPACE
chrom snp cm pos a0 a1 i
0 1 rs10399749 0.0 45162 G C 0
1 1 rs2949420 0.0 45257 C T 1
2 1 rs2949421 0.0 45413 0 0 2
3 1 rs2691310 0.0 46844 A T 3
4 1 rs4030303 0.0 72434 0 G 4
> > > print ( fam . head ( ) ) # doctest : + NORMALIZE _ WHITESPACE
fid iid father mother gender trait i
0 Sample _ 1 Sample _ 1 0 0 1 - 9 0
1 Sample _ 2 Sample _ 2 0 0 2 - 9 1
2 Sample _ 3 Sample _ 3 Sample _ 1 Sample _ 2 2 - 9 2
> > > print ( bed . compute ( ) ) # doctest : + NORMALIZE _ WHITESPACE
[ [ 2 . 2 . 1 . ]
[ 2 . 1 . 2 . ]
[ nan nan nan ]
[ nan nan 1 . ]
[ 2 . 2 . 2 . ]
[ 2 . 2 . 2 . ]
[ 2 . 1 . 0 . ]
[ 2 . 2 . 2 . ]
[ 1 . 2 . 2 . ]
[ 2 . 1 . 2 . ] ]
The values of the ` ` bed ` ` matrix denote how many alleles ` ` a1 ` ` ( see
output of data frame ` ` bim ` ` ) are in the corresponding position and
individual . Notice the column ` ` i ` ` in ` ` bim ` ` and ` ` fam ` ` data frames .
It maps to the corresponding position of the bed matrix :
. . doctest : :
> > > chrom1 = bim . query ( " chrom = = ' 1 ' " )
> > > X = bed [ chrom1 . i . values , : ] . compute ( )
> > > print ( X ) # doctest : + NORMALIZE _ WHITESPACE
[ [ 2 . 2 . 1 . ]
[ 2 . 1 . 2 . ]
[ nan nan nan ]
[ nan nan 1 . ]
[ 2 . 2 . 2 . ]
[ 2 . 2 . 2 . ]
[ 2 . 1 . 0 . ]
[ 2 . 2 . 2 . ]
[ 1 . 2 . 2 . ]
[ 2 . 1 . 2 . ] ]
It also allows the use of the wildcard character ` ` * ` ` for mapping
multiple BED files at
once : ` ` ( bim , fam , bed ) = read _ plink ( " chrom * " ) ` ` .
In this case , only one of the FAM files will be used to define
sample information . Data from BIM and BED files are concatenated to
provide a single view of the files .""" | from dask . array import concatenate
file_prefixes = sorted ( glob ( file_prefix ) )
if len ( file_prefixes ) == 0 :
file_prefixes = [ file_prefix . replace ( "*" , "" ) ]
file_prefixes = sorted ( _clean_prefixes ( file_prefixes ) )
fn = [ ]
for fp in file_prefixes :
fn . append ( { s : "%s.%s" % ( fp , s ) for s in [ "bed" , "bim" , "fam" ] } )
pbar = tqdm ( desc = "Mapping files" , total = 3 * len ( fn ) , disable = not verbose )
msg = "Reading bim file(s)..."
bim = _read_file ( fn , msg , lambda fn : _read_bim ( fn [ "bim" ] ) , pbar )
if len ( file_prefixes ) > 1 :
if verbose :
msg = "Multiple files read in this order: {}"
print ( msg . format ( [ basename ( f ) for f in file_prefixes ] ) )
nmarkers = dict ( )
index_offset = 0
for i , bi in enumerate ( bim ) :
nmarkers [ fn [ i ] [ "bed" ] ] = bi . shape [ 0 ]
bi [ "i" ] += index_offset
index_offset += bi . shape [ 0 ]
bim = pd . concat ( bim , axis = 0 , ignore_index = True )
msg = "Reading fam file(s)..."
fam = _read_file ( [ fn [ 0 ] ] , msg , lambda fn : _read_fam ( fn [ "fam" ] ) , pbar ) [ 0 ]
nsamples = fam . shape [ 0 ]
bed = _read_file ( fn , "Reading bed file(s)..." , lambda fn : _read_bed ( fn [ "bed" ] , nsamples , nmarkers [ fn [ "bed" ] ] ) , pbar , )
bed = concatenate ( bed , axis = 0 )
pbar . close ( )
return ( bim , fam , bed ) |
def get_domain_info ( self , domain ) :
"""Get the GoDaddy supplied information about a specific domain .
: param domain : The domain to obtain info about .
: type domain : str
: return A JSON string representing the domain information""" | url = self . API_TEMPLATE + self . DOMAIN_INFO . format ( domain = domain )
return self . _get_json_from_response ( url ) |
def store_oui ( self , port_uuid , oui_type , oui_data ) :
"""Function for storing the OUI .
param uuid : UUID of the vNIC
param oui _ type : OUI ID
param oui _ data : OUI Opaque Data""" | self . oui_vif_map [ port_uuid ] = { 'oui_id' : oui_type , 'oui_data' : oui_data } |
def create_command_history_subscription ( self , issued_command = None , on_data = None , timeout = 60 ) :
"""Create a new command history subscription .
: param . IssuedCommand [ ] issued _ command : ( Optional ) Previously issued
commands . If not provided updates
from any command are received .
: param on _ data : Function that gets called with : class : ` . CommandHistory `
updates .
: param float timeout : The amount of seconds to wait for the request
to complete .
: return : Future that can be used to manage the background websocket
subscription
: rtype : . CommandHistorySubscription""" | options = web_pb2 . CommandHistorySubscriptionRequest ( )
options . ignorePastCommands = True
if issued_command :
options . commandId . extend ( _build_command_ids ( issued_command ) )
manager = WebSocketSubscriptionManager ( self . _client , resource = 'cmdhistory' , options = options )
# Represent subscription as a future
subscription = CommandHistorySubscription ( manager )
wrapped_callback = functools . partial ( _wrap_callback_parse_cmdhist_data , subscription , on_data )
manager . open ( wrapped_callback , instance = self . _instance , processor = self . _processor )
# Wait until a reply or exception is received
subscription . reply ( timeout = timeout )
return subscription |
def int_to_bytes ( int_ , width = None ) :
""". . _ int _ to _ bytes :
Converts the ` ` int ` ` ` ` int _ ` ` to a ` ` bytes ` ` object .
` ` len ( result ) = = width ` ` .
If ` ` width ` ` is None , a number of bytes that is able to hold the
number is choosen , depending on ` ` int _ . bit _ length ( ) ` ` .
See also : bytes _ to _ int _""" | if ( width == None ) :
width = int_ . bit_length ( )
byts = math . ceil ( width / 8 )
return bytes ( [ ( int_ >> ( shift * 8 ) ) & 0xff for shift in range ( byts ) ] ) |
def line_distance_similarity ( p1a , p1b , p2a , p2b , T = CLOSE_DISTANCE_THRESHOLD ) :
"""Line distance similarity between two line segments
Args :
p1a ( [ float , float ] ) : x and y coordinates . Line A start
p1b ( [ float , float ] ) : x and y coordinates . Line A end
p2a ( [ float , float ] ) : x and y coordinates . Line B start
p2b ( [ float , float ] ) : x and y coordinates . Line B end
Returns :
float : between 0 and 1 . Where 1 is very similar and 0 is completely different""" | d1 = distance_similarity ( p1a , p1b , p2a , T = T )
d2 = distance_similarity ( p1a , p1b , p2b , T = T )
return abs ( d1 + d2 ) * 0.5 |
def _exclude_pattern ( self , pattern , anchor = True , prefix = None , is_regex = False ) :
"""Remove strings ( presumably filenames ) from ' files ' that match
' pattern ' .
Other parameters are the same as for ' include _ pattern ( ) ' , above .
The list ' self . files ' is modified in place . Return True if files are
found .
This API is public to allow e . g . exclusion of SCM subdirs , e . g . when
packaging source distributions""" | found = False
pattern_re = self . _translate_pattern ( pattern , anchor , prefix , is_regex )
for f in list ( self . files ) :
if pattern_re . search ( f ) :
self . files . remove ( f )
found = True
return found |
def explain_weights ( self , ** kwargs ) :
"""Call : func : ` eli5 . show _ weights ` for the locally - fit
classification pipeline . Keyword arguments are passed
to : func : ` eli5 . show _ weights ` .
: func : ` fit ` must be called before using this method .""" | self . _fix_target_names ( kwargs )
return eli5 . explain_weights ( self . clf_ , vec = self . vec_ , ** kwargs ) |
def _get_basin_response_term ( self , C , z2pt5 ) :
"""Returns the basin response term defined in equation 20""" | f_sed = np . zeros ( len ( z2pt5 ) )
idx = z2pt5 < 1.0
f_sed [ idx ] = ( C [ "c14" ] + C [ "c15" ] * float ( self . CONSTS [ "SJ" ] ) ) * ( z2pt5 [ idx ] - 1.0 )
idx = z2pt5 > 3.0
f_sed [ idx ] = C [ "c16" ] * C [ "k3" ] * exp ( - 0.75 ) * ( 1.0 - np . exp ( - 0.25 * ( z2pt5 [ idx ] - 3.0 ) ) )
return f_sed |
def autoIndentBlock ( self , block , char = '\n' ) :
"""Indent block after Enter pressed or trigger character typed""" | currentText = block . text ( )
spaceAtStartLen = len ( currentText ) - len ( currentText . lstrip ( ) )
currentIndent = currentText [ : spaceAtStartLen ]
indent = self . _smartIndenter . computeIndent ( block , char )
if indent is not None and indent != currentIndent :
self . _qpart . replaceText ( block . position ( ) , spaceAtStartLen , indent ) |
def get_sites ( self , filter_func = lambda x : True ) :
"""Returns a list of TSquareSite objects that represent the sites available
to a user .
@ param filter _ func - A function taking in a Site object as a parameter
that returns a True or False , depending on whether
or not that site should be returned by this
function . Filter _ func should be used to create
filters on the list of sites ( i . e . user ' s
preferences on what sites to display by default ) .
If not specified , no filter is applied .
@ returns - A list of TSquareSite objects encapsulating t - square ' s JSON
response .""" | response = self . _session . get ( BASE_URL_TSQUARE + 'site.json' )
response . raise_for_status ( )
# raise an exception if not 200 : OK
site_list = response . json ( ) [ 'site_collection' ]
if not site_list : # this means that this t - square session expired . It ' s up
# to the user to re - authenticate .
self . _authenticated = False
raise SessionExpiredException ( 'The session has expired' )
result_list = [ ]
for site in site_list :
t_site = TSquareSite ( ** site )
if not hasattr ( t_site , "props" ) :
t_site . props = { }
if not 'banner-crn' in t_site . props :
t_site . props [ 'banner-crn' ] = None
if not 'term' in t_site . props :
t_site . props [ 'term' ] = None
if not 'term_eid' in t_site . props :
t_site . props [ 'term_eid' ] = None
if filter_func ( t_site ) :
result_list . append ( t_site )
return result_list |
def fillna ( self , value = None , method = None , limit = None ) :
"""Fill missing values with ` value ` .
Parameters
value : scalar , optional
method : str , optional
. . warning : :
Using ' method ' will result in high memory use ,
as all ` fill _ value ` methods will be converted to
an in - memory ndarray
limit : int , optional
Returns
SparseArray
Notes
When ` value ` is specified , the result ' s ` ` fill _ value ` ` depends on
` ` self . fill _ value ` ` . The goal is to maintain low - memory use .
If ` ` self . fill _ value ` ` is NA , the result dtype will be
` ` SparseDtype ( self . dtype , fill _ value = value ) ` ` . This will preserve
amount of memory used before and after filling .
When ` ` self . fill _ value ` ` is not NA , the result dtype will be
` ` self . dtype ` ` . Again , this preserves the amount of memory used .""" | if ( ( method is None and value is None ) or ( method is not None and value is not None ) ) :
raise ValueError ( "Must specify one of 'method' or 'value'." )
elif method is not None :
msg = "fillna with 'method' requires high memory usage."
warnings . warn ( msg , PerformanceWarning )
filled = interpolate_2d ( np . asarray ( self ) , method = method , limit = limit )
return type ( self ) ( filled , fill_value = self . fill_value )
else :
new_values = np . where ( isna ( self . sp_values ) , value , self . sp_values )
if self . _null_fill_value : # This is essentially just updating the dtype .
new_dtype = SparseDtype ( self . dtype . subtype , fill_value = value )
else :
new_dtype = self . dtype
return self . _simple_new ( new_values , self . _sparse_index , new_dtype ) |
def bulk_overwrite ( self , entities_and_kinds ) :
"""Update the group to the given entities and sub - entity groups .
After this operation , the only members of this EntityGroup
will be the given entities , and sub - entity groups .
: type entities _ and _ kinds : List of ( Entity , EntityKind ) pairs .
: param entities _ and _ kinds : A list of entity , entity - kind pairs
to set to the EntityGroup . In the pairs the entity - kind
can be ` ` None ` ` , to add a single entity , or some entity
kind to add all sub - entities of that kind .""" | EntityGroupMembership . objects . filter ( entity_group = self ) . delete ( )
return self . bulk_add_entities ( entities_and_kinds ) |
def check_variable_names ( self , ds ) :
"""Ensures all variables have a standard _ name set .""" | msgs = [ ]
count = 0
for k , v in ds . variables . items ( ) :
if 'standard_name' in v . ncattrs ( ) :
count += 1
else :
msgs . append ( "Variable '{}' missing standard_name attr" . format ( k ) )
return Result ( BaseCheck . MEDIUM , ( count , len ( ds . variables ) ) , 'Variable Names' , msgs ) |
def stats ( self , name , value ) :
"""Calculates min / average / max statistics based on the current and previous values .
: param name : a counter name of Statistics type
: param value : a value to update statistics""" | counter = self . get ( name , CounterType . Statistics )
self . _calculate_stats ( counter , value )
self . _update ( ) |
def loads ( s , key = None , salt = 'django.core.signing' , serializer = JSONSerializer , max_age = None ) :
"""Reverse of dumps ( ) , raise BadSignature if signature fails .
The serializer is expected to accept a bytestring .""" | # TimestampSigner . unsign ( ) returns str but base64 and zlib compression
# operate on bytes .
base64d = force_bytes ( TimestampSigner ( key , salt = salt ) . unsign ( s , max_age = max_age ) )
decompress = False
if base64d [ : 1 ] == b'.' : # It ' s compressed ; uncompress it first
base64d = base64d [ 1 : ]
decompress = True
data = b64_decode ( base64d )
if decompress :
data = zlib . decompress ( data )
return serializer ( ) . loads ( data ) |
def write_entrez2gene ( file_path , entrez2gene , logger ) :
"""Writes Entrez ID - > gene symbol mapping to a tab - delimited text file .
Parameters
file _ path : str
The path of the output file .
entrez2gene : dict
The mapping of Entrez IDs to gene symbols .
Returns
None""" | with misc . smart_open_write ( file_path , mode = 'wb' ) as ofh :
writer = csv . writer ( ofh , dialect = 'excel-tab' , lineterminator = os . linesep )
for k in sorted ( entrez2gene . keys ( ) , key = lambda x : int ( x ) ) :
writer . writerow ( [ k , entrez2gene [ k ] ] )
logger . info ( 'Output written to file "%s".' , file_path ) |
def init ( self , single = None , swallow_exceptions = True ) :
"""Handles all important disk - mounting tasks , i . e . calls the : func : ` Disk . init ` function on all underlying
disks . It yields every volume that is encountered , including volumes that have not been mounted .
: param single : indicates whether the : class : ` Disk ` should be mounted as a single disk , not as a single disk or
whether it should try both ( defaults to : const : ` None ` )
: type single : bool | None
: param swallow _ exceptions : specify whether you want the init calls to swallow exceptions
: rtype : generator""" | for d in self . disks :
for v in d . init ( single , swallow_exceptions = swallow_exceptions ) :
yield v |
def build_losses ( self , logits_real , logits_fake ) :
"""Build standard GAN loss and set ` self . g _ loss ` and ` self . d _ loss ` .
D and G play two - player minimax game with value function V ( G , D )
min _ G max _ D V ( D , G ) = IE _ { x ~ p _ data } [ log D ( x ) ] + IE _ { z ~ p _ fake } [ log ( 1 - D ( G ( z ) ) ) ]
Args :
logits _ real ( tf . Tensor ) : discrim logits from real samples
logits _ fake ( tf . Tensor ) : discrim logits from fake samples produced by generator""" | with tf . name_scope ( "GAN_loss" ) :
score_real = tf . sigmoid ( logits_real )
score_fake = tf . sigmoid ( logits_fake )
tf . summary . histogram ( 'score-real' , score_real )
tf . summary . histogram ( 'score-fake' , score_fake )
with tf . name_scope ( "discrim" ) :
d_loss_pos = tf . reduce_mean ( tf . nn . sigmoid_cross_entropy_with_logits ( logits = logits_real , labels = tf . ones_like ( logits_real ) ) , name = 'loss_real' )
d_loss_neg = tf . reduce_mean ( tf . nn . sigmoid_cross_entropy_with_logits ( logits = logits_fake , labels = tf . zeros_like ( logits_fake ) ) , name = 'loss_fake' )
d_pos_acc = tf . reduce_mean ( tf . cast ( score_real > 0.5 , tf . float32 ) , name = 'accuracy_real' )
d_neg_acc = tf . reduce_mean ( tf . cast ( score_fake < 0.5 , tf . float32 ) , name = 'accuracy_fake' )
d_accuracy = tf . add ( .5 * d_pos_acc , .5 * d_neg_acc , name = 'accuracy' )
self . d_loss = tf . add ( .5 * d_loss_pos , .5 * d_loss_neg , name = 'loss' )
with tf . name_scope ( "gen" ) :
self . g_loss = tf . reduce_mean ( tf . nn . sigmoid_cross_entropy_with_logits ( logits = logits_fake , labels = tf . ones_like ( logits_fake ) ) , name = 'loss' )
g_accuracy = tf . reduce_mean ( tf . cast ( score_fake > 0.5 , tf . float32 ) , name = 'accuracy' )
add_moving_summary ( self . g_loss , self . d_loss , d_accuracy , g_accuracy ) |
def cns_vwl_pstr_long ( self ) :
"""Return a new IPAString , containing only :
1 . the consonants ,
2 . the vowels , and
3 . the primary stress diacritics , and
4 . the long suprasegmental
in the current string .
: rtype : IPAString""" | return IPAString ( ipa_chars = [ c for c in self . ipa_chars if ( c . is_letter ) or ( c . is_suprasegmental and ( c . is_primary_stress or c . is_long ) ) ] ) |
def create_function_f_y ( self ) :
"""output function""" | return ca . Function ( 'y' , [ self . t , self . x , self . m , self . p , self . c , self . ng , self . nu ] , [ self . y_rhs ] , [ 't' , 'x' , 'm' , 'p' , 'c' , 'ng' , 'nu' ] , [ 'y' ] , self . func_opt ) |
def get_valid_paths ( self , path ) :
"""There are some restrictions on the valid directory structures :
1 . There can be only one vasp run in each directory . Nested directories
are fine .
2 . Directories designated " relax1 " , " relax2 " are considered to be 2
parts of an aflow style run .
3 . Directories containing vasp output with " . relax1 " and " . relax2 " are
also considered as 2 parts of an aflow style run .""" | ( parent , subdirs , files ) = path
if set ( self . runs ) . intersection ( subdirs ) :
return [ parent ]
if not any ( [ parent . endswith ( os . sep + r ) for r in self . runs ] ) and len ( glob . glob ( os . path . join ( parent , "vasprun.xml*" ) ) ) > 0 :
return [ parent ]
return [ ] |
def _output_text ( complete_output , categories ) :
"""Output the results obtained in text format .
: return : str , html formatted output""" | output = ""
for result in complete_output :
list_result = complete_output [ result ]
if list_result :
list_result_sorted = sorted ( list_result , key = lambda x : list_result [ x ] , reverse = True )
output += "\n\n{0}:\n" . format ( result )
for element in list_result_sorted :
output += "\n{0} {1}" . format ( list_result [ element ] , element )
output += "\n--"
return output |
def crop_indices ( image , lowerind , upperind ) :
"""Create a proper ANTsImage sub - image by indexing the image with indices .
This is similar to but different from array sub - setting in that
the resulting sub - image can be decropped back into its place without
having to store its original index locations explicitly .
ANTsR function : ` cropIndices `
Arguments
image : ANTsImage
image to crop
lowerind : list / tuple of integers
vector of lower index , should be length image dimensionality
upperind : list / tuple of integers
vector of upper index , should be length image dimensionality
Returns
ANTsImage
Example
> > > import ants
> > > fi = ants . image _ read ( ants . get _ ants _ data ( " r16 " ) )
> > > cropped = ants . crop _ indices ( fi , ( 10,10 ) , ( 100,100 ) )
> > > cropped = ants . smooth _ image ( cropped , 5 )
> > > decropped = ants . decrop _ image ( cropped , fi )""" | inpixeltype = 'float'
if image . pixeltype != 'float' :
inpixeltype = image . pixeltype
image = image . clone ( 'float' )
if ( image . dimension != len ( lowerind ) ) or ( image . dimension != len ( upperind ) ) :
raise ValueError ( 'image dimensionality and index length must match' )
libfn = utils . get_lib_fn ( 'cropImageF%i' % image . dimension )
itkimage = libfn ( image . pointer , image . pointer , 1 , 2 , lowerind , upperind )
ants_image = iio . ANTsImage ( pixeltype = 'float' , dimension = image . dimension , components = image . components , pointer = itkimage )
if inpixeltype != 'float' :
ants_image = ants_image . clone ( inpixeltype )
return ants_image |
def datetime ( self ) :
"""[ datetime . datetime ] 当前快照数据的时间戳""" | try :
dt = self . _tick_dict [ 'datetime' ]
except ( KeyError , ValueError ) :
return datetime . datetime . min
else :
if not isinstance ( dt , datetime . datetime ) :
if dt > 10000000000000000 : # ms
return convert_ms_int_to_datetime ( dt )
else :
return convert_int_to_datetime ( dt )
return dt |
def delete ( self ) :
"""DELETE / : login / machines / : id
Initiate deletion of a stopped remote machine .""" | j , r = self . datacenter . request ( 'DELETE' , self . path )
r . raise_for_status ( ) |
def _compute_labels ( self , element , data , mapping ) :
"""Computes labels for the nodes and adds it to the data .""" | if element . vdims :
edges = Dataset ( element ) [ element [ element . vdims [ 0 ] . name ] > 0 ]
nodes = list ( np . unique ( [ edges . dimension_values ( i ) for i in range ( 2 ) ] ) )
nodes = element . nodes . select ( ** { element . nodes . kdims [ 2 ] . name : nodes } )
else :
nodes = element
label_dim = nodes . get_dimension ( self . label_index )
labels = self . labels
if label_dim and labels :
if self . label_index not in [ 2 , None ] :
self . param . warning ( "Cannot declare style mapping for 'labels' option " "and declare a label_index; ignoring the label_index." )
elif label_dim :
labels = label_dim
if isinstance ( labels , basestring ) :
labels = element . nodes . get_dimension ( labels )
if labels is None :
text = [ ]
if isinstance ( labels , dim ) :
text = labels . apply ( element , flat = True )
else :
text = element . nodes . dimension_values ( labels )
text = [ labels . pprint_value ( v ) for v in text ]
value_dim = element . vdims [ 0 ]
text_labels = [ ]
for i , node in enumerate ( element . _sankey [ 'nodes' ] ) :
if len ( text ) :
label = text [ i ]
else :
label = ''
if self . show_values :
value = value_dim . pprint_value ( node [ 'value' ] )
if label :
label = '%s - %s' % ( label , value )
else :
label = value
if value_dim . unit :
label += ' %s' % value_dim . unit
if label :
text_labels . append ( label )
ys = nodes . dimension_values ( 1 )
nodes = element . _sankey [ 'nodes' ]
if nodes :
offset = ( nodes [ 0 ] [ 'x1' ] - nodes [ 0 ] [ 'x0' ] ) / 4.
else :
offset = 0
if self . label_position == 'right' :
xs = np . array ( [ node [ 'x1' ] for node in nodes ] ) + offset
else :
xs = np . array ( [ node [ 'x0' ] for node in nodes ] ) - offset
data [ 'text_1' ] = dict ( x = xs , y = ys , text = [ str ( l ) for l in text_labels ] )
align = 'left' if self . label_position == 'right' else 'right'
mapping [ 'text_1' ] = dict ( text = 'text' , x = 'x' , y = 'y' , text_baseline = 'middle' , text_align = align ) |
def makedirs_ ( path , owner = None , grant_perms = None , deny_perms = None , inheritance = True , reset = False ) :
'''Ensure that the parent directory containing this path is available .
Args :
path ( str ) :
The full path to the directory .
. . note : :
The path must end with a trailing slash otherwise the
directory ( s ) will be created up to the parent directory . For
example if path is ` ` C : \\ temp \\ test ` ` , then it would be treated
as ` ` C : \\ temp \\ ` ` but if the path ends with a trailing slash
like ` ` C : \\ temp \\ test \\ ` ` , then it would be treated as
` ` C : \\ temp \\ test \\ ` ` .
owner ( str ) :
The owner of the directory . If not passed , it will be the account
that created the directory , likely SYSTEM .
grant _ perms ( dict ) :
A dictionary containing the user / group and the basic permissions to
grant , ie : ` ` { ' user ' : { ' perms ' : ' basic _ permission ' } } ` ` . You can also
set the ` ` applies _ to ` ` setting here . The default is
` ` this _ folder _ subfolders _ files ` ` . Specify another ` ` applies _ to ` `
setting like this :
. . code - block : : yaml
{ ' user ' : { ' perms ' : ' full _ control ' , ' applies _ to ' : ' this _ folder ' } }
To set advanced permissions use a list for the ` ` perms ` ` parameter , ie :
. . code - block : : yaml
{ ' user ' : { ' perms ' : [ ' read _ attributes ' , ' read _ ea ' ] , ' applies _ to ' : ' this _ folder ' } }
deny _ perms ( dict ) :
A dictionary containing the user / group and permissions to deny along
with the ` ` applies _ to ` ` setting . Use the same format used for the
` ` grant _ perms ` ` parameter . Remember , deny permissions supersede
grant permissions .
inheritance ( bool ) :
If True the object will inherit permissions from the parent , if
False , inheritance will be disabled . Inheritance setting will not
apply to parent directories if they must be created .
reset ( bool ) :
If ` ` True ` ` the existing DACL will be cleared and replaced with the
settings defined in this function . If ` ` False ` ` , new entries will be
appended to the existing DACL . Default is ` ` False ` ` .
. . versionadded : : 2018.3.0
Returns :
bool : True if successful
Raises :
CommandExecutionError : If unsuccessful
CLI Example :
. . code - block : : bash
# To grant the ' Users ' group ' read & execute ' permissions .
salt ' * ' file . makedirs C : \\ Temp \\ Administrators " { ' Users ' : { ' perms ' : ' read _ execute ' } } "
# Locally using salt call
salt - call file . makedirs C : \\ Temp \\ Administrators " { ' Users ' : { ' perms ' : ' read _ execute ' , ' applies _ to ' : ' this _ folder _ only ' } } "
# Specify advanced attributes with a list
salt ' * ' file . makedirs C : \\ Temp \\ Administrators " { ' jsnuffy ' : { ' perms ' : [ ' read _ attributes ' , ' read _ ea ' ] , ' applies _ to ' : ' this _ folder _ only ' } } "''' | path = os . path . expanduser ( path )
# walk up the directory structure until we find the first existing
# directory
dirname = os . path . normpath ( os . path . dirname ( path ) )
if os . path . isdir ( dirname ) : # There ' s nothing for us to do
msg = 'Directory \'{0}\' already exists' . format ( dirname )
log . debug ( msg )
return msg
if os . path . exists ( dirname ) :
msg = 'The path \'{0}\' already exists and is not a directory' . format ( dirname )
log . debug ( msg )
return msg
directories_to_create = [ ]
while True :
if os . path . isdir ( dirname ) :
break
directories_to_create . append ( dirname )
current_dirname = dirname
dirname = os . path . dirname ( dirname )
if current_dirname == dirname :
raise SaltInvocationError ( 'Recursive creation for path \'{0}\' would result in an ' 'infinite loop. Please use an absolute path.' . format ( dirname ) )
# create parent directories from the topmost to the most deeply nested one
directories_to_create . reverse ( )
for directory_to_create in directories_to_create : # all directories have the user , group and mode set ! !
log . debug ( 'Creating directory: %s' , directory_to_create )
mkdir ( path = directory_to_create , owner = owner , grant_perms = grant_perms , deny_perms = deny_perms , inheritance = inheritance , reset = reset )
return True |
def profile ( model , inputs = None , n_texts = 10000 ) :
"""Profile a spaCy pipeline , to find out which functions take the most time .
Input should be formatted as one JSON object per line with a key " text " .
It can either be provided as a JSONL file , or be read from sys . sytdin .
If no input file is specified , the IMDB dataset is loaded via Thinc .""" | msg = Printer ( )
if inputs is not None :
inputs = _read_inputs ( inputs , msg )
if inputs is None :
n_inputs = 25000
with msg . loading ( "Loading IMDB dataset via Thinc..." ) :
imdb_train , _ = thinc . extra . datasets . imdb ( )
inputs , _ = zip ( * imdb_train )
msg . info ( "Loaded IMDB dataset and using {} examples" . format ( n_inputs ) )
inputs = inputs [ : n_inputs ]
with msg . loading ( "Loading model '{}'..." . format ( model ) ) :
nlp = load_model ( model )
msg . good ( "Loaded model '{}'" . format ( model ) )
texts = list ( itertools . islice ( inputs , n_texts ) )
cProfile . runctx ( "parse_texts(nlp, texts)" , globals ( ) , locals ( ) , "Profile.prof" )
s = pstats . Stats ( "Profile.prof" )
msg . divider ( "Profile stats" )
s . strip_dirs ( ) . sort_stats ( "time" ) . print_stats ( ) |
def mask_average ( dset , mask ) :
'''Returns average of voxels in ` ` dset ` ` within non - zero voxels of ` ` mask ` `''' | o = nl . run ( [ '3dmaskave' , '-q' , '-mask' , mask , dset ] )
if o :
return float ( o . output . split ( ) [ - 1 ] ) |
def __load ( self , path ) :
"""Method to load the serialized dataset from disk .""" | try :
path = os . path . abspath ( path )
with open ( path , 'rb' ) as df : # loaded _ dataset = pickle . load ( df )
self . __data , self . __classes , self . __labels , self . __dtype , self . __description , self . __num_features , self . __feature_names = pickle . load ( df )
# ensure the loaded dataset is valid
self . __validate ( self . __data , self . __classes , self . __labels )
except IOError as ioe :
raise IOError ( 'Unable to read the dataset from file: {}' , format ( ioe ) )
except :
raise |
def _zom_arg ( lexer ) :
"""Return zero or more arguments .""" | tok = next ( lexer )
# ' , ' EXPR ZOM _ X
if isinstance ( tok , COMMA ) :
return ( _expr ( lexer ) , ) + _zom_arg ( lexer )
# null
else :
lexer . unpop_token ( tok )
return tuple ( ) |
def Filter ( self , function = None ) :
"""Construct Textable from the rows of which the function returns true .
Args :
function : A function applied to each row which returns a bool . If
function is None , all rows with empty column values are
removed .
Returns :
A new TextTable ( )
Raises :
TableError : When an invalid row entry is Append ( ) ' d""" | flat = lambda x : x if isinstance ( x , str ) else '' . join ( [ flat ( y ) for y in x ] )
if function is None :
function = lambda row : bool ( flat ( row . values ) )
new_table = self . __class__ ( )
# pylint : disable = protected - access
new_table . _table = [ self . header ]
for row in self :
if function ( row ) is True :
new_table . Append ( row )
return new_table |
async def remove ( self , * args , ** kwargs ) :
"""Delete Secret
Delete the secret associated with some key .
This method is ` ` stable ` `""" | return await self . _makeApiCall ( self . funcinfo [ "remove" ] , * args , ** kwargs ) |
def import_obj ( clsname , default_module = None ) :
"""Import the object given by clsname .
If default _ module is specified , import from this module .""" | if default_module is not None :
if not clsname . startswith ( default_module + '.' ) :
clsname = '{0}.{1}' . format ( default_module , clsname )
mod , clsname = clsname . rsplit ( '.' , 1 )
mod = importlib . import_module ( mod )
try :
obj = getattr ( mod , clsname )
except AttributeError :
raise ImportError ( 'Cannot import {0} from {1}' . format ( clsname , mod ) )
return obj |
def assure_relation ( cls , cms_page ) :
"""Assure that we have a foreign key relation , pointing from CascadePage onto CMSPage .""" | try :
cms_page . cascadepage
except cls . DoesNotExist :
cls . objects . create ( extended_object = cms_page ) |
def bin_exact_kb_dense ( M , positions , length = 10 ) :
"""Perform the kb - binning procedure with total bin lengths being exactly
set to that of the specified input . Fragments overlapping two potential
bins will be split and related contact counts will be divided according
to overlap proportions in each bin .""" | unit = 10 ** 3
ul = unit * length
units = positions / ul
n = len ( positions )
idx = [ i for i in range ( n - 1 ) if np . ceil ( units [ i ] ) < np . ceil ( units [ i + 1 ] ) ]
m = len ( idx ) - 1
N = np . zeros ( ( m , m ) )
remainders = [ 0 ] + [ np . abs ( units [ i ] - units [ i + 1 ] ) for i in range ( m ) ]
for i in range ( m ) :
N [ i ] = np . array ( [ ( M [ idx [ j ] : idx [ j + 1 ] , idx [ i ] : idx [ i + 1 ] ] . sum ( ) - remainders [ j ] * M [ i ] [ j ] + remainders [ j + 1 ] * M [ i + 1 ] [ j ] ) for j in range ( m ) ] )
return N |
def RunMetadata ( self , run , tag ) :
"""Get the session . run ( ) metadata associated with a TensorFlow run and tag .
Args :
run : A string name of a TensorFlow run .
tag : A string name of the tag associated with a particular session . run ( ) .
Raises :
KeyError : If the run is not found , or the tag is not available for the
given run .
Returns :
The metadata in the form of ` RunMetadata ` protobuf data structure .""" | accumulator = self . GetAccumulator ( run )
return accumulator . RunMetadata ( tag ) |
def _contentful_user_agent ( self ) :
"""Sets the X - Contentful - User - Agent header .""" | header = { }
from . import __version__
header [ 'sdk' ] = { 'name' : 'contentful-management.py' , 'version' : __version__ }
header [ 'app' ] = { 'name' : self . application_name , 'version' : self . application_version }
header [ 'integration' ] = { 'name' : self . integration_name , 'version' : self . integration_version }
header [ 'platform' ] = { 'name' : 'python' , 'version' : platform . python_version ( ) }
os_name = platform . system ( )
if os_name == 'Darwin' :
os_name = 'macOS'
elif not os_name or os_name == 'Java' :
os_name = None
elif os_name and os_name not in [ 'macOS' , 'Windows' ] :
os_name = 'Linux'
header [ 'os' ] = { 'name' : os_name , 'version' : platform . release ( ) }
def format_header ( key , values ) :
header = "{0} {1}" . format ( key , values [ 'name' ] )
if values [ 'version' ] is not None :
header = "{0}/{1}" . format ( header , values [ 'version' ] )
return "{0};" . format ( header )
result = [ ]
for k , values in header . items ( ) :
if not values [ 'name' ] :
continue
result . append ( format_header ( k , values ) )
return ' ' . join ( result ) |
def apply_partial_inheritance ( self , prop ) :
"""Define property with inheritance value of the property
: param prop : property
: type prop : str
: return : None""" | for i in itertools . chain ( iter ( list ( self . items . values ( ) ) ) , iter ( list ( self . templates . values ( ) ) ) ) :
self . get_property_by_inheritance ( i , prop )
# If a " null " attribute was inherited , delete it
try :
if getattr ( i , prop ) == 'null' :
delattr ( i , prop )
except AttributeError : # pragma : no cover , simple protection
pass |
def add_actions ( self , entries , user_data = None ) :
"""The add _ actions ( ) method is a convenience method that creates a number
of gtk . Action objects based on the information in the list of action
entry tuples contained in entries and adds them to the action group .
The entry tuples can vary in size from one to six items with the
following information :
* The name of the action . Must be specified .
* The stock id for the action . Optional with a default value of None
if a label is specified .
* The label for the action . This field should typically be marked
for translation , see the set _ translation _ domain ( ) method . Optional
with a default value of None if a stock id is specified .
* The accelerator for the action , in the format understood by the
gtk . accelerator _ parse ( ) function . Optional with a default value of
None .
* The tooltip for the action . This field should typically be marked
for translation , see the set _ translation _ domain ( ) method . Optional
with a default value of None .
* The callback function invoked when the action is activated .
Optional with a default value of None .
The " activate " signals of the actions are connected to the callbacks and
their accel paths are set to < Actions > / group - name / action - name .""" | try :
iter ( entries )
except ( TypeError ) :
raise TypeError ( 'entries must be iterable' )
def _process_action ( name , stock_id = None , label = None , accelerator = None , tooltip = None , callback = None ) :
action = Action ( name = name , label = label , tooltip = tooltip , stock_id = stock_id )
if callback is not None :
if user_data is None :
action . connect ( 'activate' , callback )
else :
action . connect ( 'activate' , callback , user_data )
self . add_action_with_accel ( action , accelerator )
for e in entries : # using inner function above since entries can leave out optional arguments
_process_action ( * e ) |
def surface_nodes ( self ) :
""": param points : a list of Point objects
: returns : a Node of kind ' griddedSurface '""" | line = [ ]
for point in self . mesh :
line . append ( point . longitude )
line . append ( point . latitude )
line . append ( point . depth )
return [ Node ( 'griddedSurface' , nodes = [ Node ( 'gml:posList' , { } , line ) ] ) ] |
def expire ( key , seconds , host = None , port = None , db = None , password = None ) :
'''Set a keys time to live in seconds
CLI Example :
. . code - block : : bash
salt ' * ' redis . expire foo 300''' | server = _connect ( host , port , db , password )
return server . expire ( key , seconds ) |
def head ( self , stream = 'stdout' , num_lines = 10 ) :
"""Head a specified stream ( stdout or stderr ) by num _ lines .""" | target = self . _map_string_to_file ( stream )
if not target : # no current temp file
last_run = self . backend . get_latest_run_log ( self . parent_job . job_id , self . name )
if not last_run :
return None
return self . _head_string ( last_run [ 'tasks' ] [ self . name ] [ stream ] , num_lines )
else :
return self . _head_temp_file ( target , num_lines ) |
def index ( self ) :
"""Retrieve the attribute index number .
Args : :
no argument
Returns : :
attribute index number ( starting at 0)
C library equivalent : SDfindattr""" | self . _index = _C . SDfindattr ( self . _obj . _id , self . _name )
_checkErr ( 'find' , self . _index , 'illegal attribute name' )
return self . _index |
def old_properties_names_to_new ( self ) : # pragma : no cover , never called
"""Convert old Nagios2 names to Nagios3 new names
TODO : still useful ?
: return : None""" | for i in itertools . chain ( iter ( list ( self . items . values ( ) ) ) , iter ( list ( self . templates . values ( ) ) ) ) :
i . old_properties_names_to_new ( ) |
def getmask ( self , blc = ( ) , trc = ( ) , inc = ( ) ) :
"""Get image mask .
Using the arguments blc ( bottom left corner ) , trc ( top right corner ) ,
and inc ( stride ) it is possible to get a mask slice . Not all axes
need to be specified . Missing values default to begin , end , and 1.
The mask is returned as a numpy array . Its dimensionality is the same
as the dimensionality of the image , even if an axis has length 1.
Note that the casacore images use the convention that a mask value
True means good and False means bad . However , numpy uses the opposite .
Therefore the mask will be negated , so it can be used directly in
numpy operations .
If the image has no mask , an array will be returned with all values
set to False .""" | return numpy . logical_not ( self . _getmask ( self . _adjustBlc ( blc ) , self . _adjustTrc ( trc ) , self . _adjustInc ( inc ) ) ) |
def _notifyDone ( self ) :
'''Allow any other editatoms waiting on me to complete to resume''' | if self . notified :
return
self . doneevent . set ( )
for buid in self . mybldgbuids :
del self . allbldgbuids [ buid ]
self . notified = True |
def plot_prof_2 ( self , mod , species , xlim1 , xlim2 ) :
"""Plot one species for cycle between xlim1 and xlim2
Parameters
mod : string or integer
Model to plot , same as cycle number .
species : list
Which species to plot .
xlim1 , xlim2 : float
Mass coordinate range .""" | mass = self . se . get ( mod , 'mass' )
Xspecies = self . se . get ( mod , 'yps' , species )
pyl . plot ( mass , Xspecies , '-' , label = str ( mod ) + ', ' + species )
pyl . xlim ( xlim1 , xlim2 )
pyl . legend ( ) |
def coverageCounts ( self ) :
"""For each location in the subject , return a count of how many times that
location is covered by a read .
@ return : a C { Counter } where the keys are the C { int } locations on the
subject and the value is the number of times the location is
covered by a read .""" | coverageCounts = Counter ( )
for start , end in self . _intervals :
coverageCounts . update ( range ( max ( 0 , start ) , min ( self . _targetLength , end ) ) )
return coverageCounts |
def read_log ( self , logfile ) :
"""The read _ log method returns a memory efficient generator for rows in a Bro log .
Usage :
rows = my _ bro _ reader . read _ log ( logfile )
for row in rows :
do something with row
Args :
logfile : The Bro Log file .""" | # Make sure we ' re at the beginning
logfile . seek ( 0 )
# First parse the header of the bro log
field_names , _ = self . _parse_bro_header ( logfile )
# Note : SO stupid to write a csv reader , but csv . DictReader on Bro
# files was doing something weird with generator output that
# affected zeroRPC and gave ' could not route _ zpc _ more ' error .
# So wrote my own , put a sleep at the end , seems to fix it .
while 1 :
_line = next ( logfile ) . strip ( )
if not _line . startswith ( '#close' ) :
yield self . _cast_dict ( dict ( zip ( field_names , _line . split ( self . delimiter ) ) ) )
else :
time . sleep ( .1 )
# Give time for zeroRPC to finish messages
break |
def scope_groups ( self ) :
"""Return a new raw REST interface to scope _ group resources
: rtype : : py : class : ` ns1 . rest . ipam . Scopegroups `""" | import ns1 . rest . ipam
return ns1 . rest . ipam . Scopegroups ( self . config ) |
def get_output ( self , output_files , clear = True ) :
"Get the output files as an id indexed dict ." | patt = re . compile ( r'(.*?)-semantics.*?' )
for outpath in output_files :
if outpath is None :
logger . warning ( "Found outpath with value None. Skipping." )
continue
re_out = patt . match ( path . basename ( outpath ) )
if re_out is None :
raise SparserError ( "Could not get prefix from output path %s." % outpath )
prefix = re_out . groups ( ) [ 0 ]
if prefix . startswith ( 'PMC' ) :
prefix = prefix [ 3 : ]
if prefix . isdecimal ( ) : # In this case we assume the prefix is a tcid .
prefix = int ( prefix )
try :
with open ( outpath , 'rt' ) as f :
content = json . load ( f )
except Exception as e :
logger . exception ( e )
logger . error ( "Could not load reading content from %s." % outpath )
content = None
self . add_result ( prefix , content )
if clear :
input_path = outpath . replace ( '-semantics.json' , '.nxml' )
try :
remove ( outpath )
remove ( input_path )
except Exception as e :
logger . exception ( e )
logger . error ( "Could not remove sparser files %s and %s." % ( outpath , input_path ) )
return self . results |
def str2key ( self , keyString ) :
"""Parse a human readable key sequence .
If no error is raised , then ` ` keyString ` ` could be
successfully converted into a valid key sequence and is
henceforth represented by this object .
| Args |
* ` ` keyString ` ` ( * * QtmacsKeysequence * * ) : eg . " < ctrl > + f "
| Returns |
* * None * *
| Raises |
* * * QtmacsKeysequenceError * * if ` ` keyString ` ` could not be parsed .""" | # Ensure the string is non - empty .
if keyString == '' :
raise QtmacsKeysequenceError ( 'Cannot parse empty string' )
tmp = str ( keyString )
tmp = tmp . replace ( '<' , '<' )
tmp = tmp . replace ( '>' , '>' )
keyStringHtml = '<b>{}</b>.' . format ( tmp )
del tmp
# Remove leading and trailing white spaces , and reduce
# sequences of white spaces to a single white space . If this
# results in an emtpy string ( typically the case when the user
# tries to register a white space with ' ' instead of with
# ' < space > ' ) then raise an error .
rawKeyStr = keyString . strip ( )
if len ( rawKeyStr ) == 0 :
msg = 'Cannot parse the key combination {}.' . format ( keyStringHtml )
raise QtmacsKeysequenceError ( msg )
# Split the string at these white spaces and convert eg .
# " < ctrl > + x < ctrl > + f " first into
# " < ctrl > + x < ctrl > + f " and from there into the list of
# individual key combinations [ " < ctrl > + x " , " < ctrl > + f " ] .
rawKeyStr = re . sub ( ' +' , ' ' , rawKeyStr )
rawKeyStr = rawKeyStr . split ( ' ' )
# Now process the key combinations one by one . By definition .
for key in rawKeyStr : # Find all bracketed keys in the key combination
# ( eg . < ctrl > , < space > ) .
desc_keys = re . findall ( '<.*?>' , key )
# There are four possibilities :
# * no bracketed key ( eg . " x " or " X " )
# * one bracketed key ( eg . " < ctrl > + x " , or just " < space > " )
# * two bracketed keys ( eg . " < ctrl > + < space > " or " < ctrl > + < alt > + f " )
# * three bracketed keys ( eg . < ctrl > + < alt > + < space > ) .
if len ( desc_keys ) == 0 : # No bracketed key means no modifier , so the key must
# stand by itself .
modStr = [ '<NONE>' ]
keyStr = key
elif len ( desc_keys ) == 1 :
if '+' not in key : # If no ' + ' sign is present then it must be
# bracketed key without any modifier
# ( eg . " < space > " ) .
modStr = [ '<NONE>' ]
keyStr = key
else : # Since a ' + ' sign and exactly one bracketed key
# is available , it must be a modifier plus a
# normal key ( eg . " < ctrl > + f " , " < alt > + + " ) .
idx = key . find ( '+' )
modStr = [ key [ : idx ] ]
keyStr = key [ idx + 1 : ]
elif len ( desc_keys ) == 2 : # There are either two modifiers and a normal key
# ( eg . " < ctrl > + < alt > + x " ) or one modifier and one
# bracketed key ( eg . " < ctrl > + < space > " ) .
if ( key . count ( '+' ) == 0 ) or ( key . count ( '+' ) > 3 ) : # A valid key combination must feature at least
# one - and at most three " + " symbols .
msg = 'Cannot parse the key combination {}.'
msg = msg . format ( keyStringHtml )
raise QtmacsKeysequenceError ( msg )
elif key . count ( '+' ) == 1 : # One modifier and one bracketed key
# ( eg . " < ctrl > + < space > " ) .
idx = key . find ( '+' )
modStr = [ key [ : idx ] ]
keyStr = key [ idx + 1 : ]
elif ( key . count ( '+' ) == 2 ) or ( key . count ( '+' ) == 3 ) : # Two modifiers and one normal key
# ( eg . " < ctrl > + < alt > + f " , " < ctrl > + < alt > + + " ) .
idx1 = key . find ( '+' )
idx2 = key . find ( '+' , idx1 + 1 )
modStr = [ key [ : idx1 ] , key [ idx1 + 1 : idx2 ] ]
keyStr = key [ idx2 + 1 : ]
elif len ( desc_keys ) == 3 :
if key . count ( '+' ) == 2 : # There are two modifiers and one bracketed key
# ( eg . " < ctrl > + < alt > + < space > " ) .
idx1 = key . find ( '+' )
idx2 = key . find ( '+' , idx1 + 1 )
modStr = [ key [ : idx1 ] , key [ idx1 + 1 : idx2 ] ]
keyStr = key [ idx2 + 1 : ]
else : # A key combination with three bracketed entries
# must have exactly two ' + ' symbols . It cannot be
# valid otherwise .
msg = 'Cannot parse the key combination {}.'
msg = msg . format ( keyStringHtml )
raise QtmacsKeysequenceError ( msg )
else :
msg = 'Cannot parse the key combination {}.'
msg = msg . format ( keyStringHtml )
raise QtmacsKeysequenceError ( msg )
# The dictionary keys that map the modifiers and bracketed
# keys to Qt constants are all upper case by
# convention . Therefore , convert all modifier keys and
# bracketed normal keys .
modStr = [ _ . upper ( ) for _ in modStr ]
if ( keyStr [ 0 ] == '<' ) and ( keyStr [ - 1 ] == '>' ) :
keyStr = keyStr . upper ( )
# Convert the text version of the modifier key into the
# QFlags structure used by Qt by " or " ing them
# together . The loop is necessary because more than one
# modifier may be active ( eg . < ctrl > + < alt > ) .
modQt = QtCore . Qt . NoModifier
for mod in modStr : # Ensure that the modifier actually exists ( eg . the
# user might have made type like " < ctlr > " instead of
# " < ctrl > " ) . Also , the keys in the dictionary consist
# of only upper case letter for the modifier keys .
if mod not in self . modDict :
msg = 'Cannot parse the key combination {}.'
msg = msg . format ( keyStringHtml )
raise QtmacsKeysequenceError ( msg )
# Since the modifier exists in the dictionary , " or "
# them with the other flags .
modQt = modQt | self . modDict [ mod ]
# Repeat the modifier procedure for the key . However ,
# unlike for the modifiers , no loop is necessary here
# because only one key can be pressed at the same time .
if keyStr in self . keyDict :
modQt_shift , keyQt = self . keyDict [ keyStr ]
else :
msg = 'Cannot parse the key combination {}.'
msg = msg . format ( keyStringHtml )
raise QtmacsKeysequenceError ( msg )
# Construct a new QKeyEvent . Note that the general
# modifier ( ie . < ctrl > and < alt > ) still need to be
# combined with shift modifier if the key demands it . This
# combination is a simple " or " on the QFlags structure .
# Also note that the " text " argument is omitted because Qt
# is smart enough to determine it internally .
key_event = QtGui . QKeyEvent ( QtCore . QEvent . KeyPress , keyQt , modQt | modQt_shift )
# Finally , append this key to the key sequence represented
# by this object .
self . appendQKeyEvent ( key_event ) |
def discovery_multicast ( self ) :
"""Installs the multicast discovery bundles and instantiates components""" | # Install the bundle
self . context . install_bundle ( "pelix.remote.discovery.multicast" ) . start ( )
with use_waiting_list ( self . context ) as ipopo : # Instantiate the discovery
ipopo . add ( rs . FACTORY_DISCOVERY_MULTICAST , "pelix-discovery-multicast" ) |
def channel ( layer , n_channel , batch = None ) :
"""Visualize a single channel""" | if batch is None :
return lambda T : tf . reduce_mean ( T ( layer ) [ ... , n_channel ] )
else :
return lambda T : tf . reduce_mean ( T ( layer ) [ batch , ... , n_channel ] ) |
def cube ( cls , center = [ 0 , 0 , 0 ] , radius = [ 1 , 1 , 1 ] ) :
"""Construct an axis - aligned solid cuboid . Optional parameters are ` center ` and
` radius ` , which default to ` [ 0 , 0 , 0 ] ` and ` [ 1 , 1 , 1 ] ` . The radius can be
specified using a single number or a list of three numbers , one for each axis .
Example code : :
cube = CSG . cube (
center = [ 0 , 0 , 0 ] ,
radius = 1""" | c = Vector ( 0 , 0 , 0 )
r = [ 1 , 1 , 1 ]
if isinstance ( center , list ) :
c = Vector ( center )
if isinstance ( radius , list ) :
r = radius
else :
r = [ radius , radius , radius ]
polygons = list ( map ( lambda v : Polygon ( list ( map ( lambda i : Vertex ( Vector ( c . x + r [ 0 ] * ( 2 * bool ( i & 1 ) - 1 ) , c . y + r [ 1 ] * ( 2 * bool ( i & 2 ) - 1 ) , c . z + r [ 2 ] * ( 2 * bool ( i & 4 ) - 1 ) ) , None ) , v [ 0 ] ) ) ) , [ [ [ 0 , 4 , 6 , 2 ] , [ - 1 , 0 , 0 ] ] , [ [ 1 , 3 , 7 , 5 ] , [ + 1 , 0 , 0 ] ] , [ [ 0 , 1 , 5 , 4 ] , [ 0 , - 1 , 0 ] ] , [ [ 2 , 6 , 7 , 3 ] , [ 0 , + 1 , 0 ] ] , [ [ 0 , 2 , 3 , 1 ] , [ 0 , 0 , - 1 ] ] , [ [ 4 , 5 , 7 , 6 ] , [ 0 , 0 , + 1 ] ] ] ) )
return CSG . fromPolygons ( polygons ) |
def kv ( d ) :
"""Equivalent to dict . items ( ) .
Usage : :
> > > for key , node in DictTree . kv ( d ) :
> > > print ( key , DictTree . getattr ( node , " population " ) )
MD 200000
VA 100000""" | return ( ( key , value ) for key , value in iteritems ( d ) if key != _meta ) |
def shrink ( self , amount ) :
"""Kill off worker threads ( not below self . min ) .""" | # Grow / shrink the pool if necessary .
# Remove any dead threads from our list
for t in self . _threads :
if not t . isAlive ( ) :
self . _threads . remove ( t )
amount -= 1
# calculate the number of threads above the minimum
n_extra = max ( len ( self . _threads ) - self . min , 0 )
# don ' t remove more than amount
n_to_remove = min ( amount , n_extra )
# put shutdown requests on the queue equal to the number of threads
# to remove . As each request is processed by a worker , that worker
# will terminate and be culled from the list .
for n in range ( n_to_remove ) :
self . _queue . put ( _SHUTDOWNREQUEST ) |
def deleteColumn ( self , networkId , tableType , columnName , verbose = None ) :
"""Deletes the column specified by the ` columnName ` parameter from the table speficied by the ` tableType ` and ` networkId ` parameters .
: param networkId : SUID of the network containing the table from which to delete the column
: param tableType : Table Type from which to delete the column
: param columnName : Name of the column to delete
: param verbose : print more
: returns : default : successful operation""" | response = api ( url = self . ___url + 'networks/' + str ( networkId ) + '/tables/' + str ( tableType ) + '/columns/' + str ( columnName ) + '' , method = "DELETE" , verbose = verbose )
return response |
def extractSurface ( image , radius = 0.5 ) :
"""` ` vtkExtractSurface ` ` filter . Input is a ` ` vtkImageData ` ` .
Generate zero - crossing isosurface from truncated signed distance volume .""" | fe = vtk . vtkExtractSurface ( )
fe . SetInputData ( image )
fe . SetRadius ( radius )
fe . Update ( )
return Actor ( fe . GetOutput ( ) ) |
def copy_to_clipboard ( dat ) :
"""复制 ` ` dat ` ` 内容到 剪切板中
: return : None""" | p = subprocess . Popen ( [ 'pbcopy' ] , stdin = subprocess . PIPE )
p . stdin . write ( to_bytes ( dat ) )
p . stdin . close ( )
p . communicate ( ) |
def lstlec ( string , n , lenvals , array ) :
"""Given a character string and an ordered array of character
strings , find the index of the largest array element less than
or equal to the given string .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / lstlec _ c . html
: param string : Upper bound value to search against .
: type string : str
: param n : Number elements in array .
: type n : int
: param lenvals : String length .
: type lenvals : int
: param array : Array of possible lower bounds .
: type array : list
: return :
index of the last element of array that is
lexically less than or equal to string .
: rtype : int""" | string = stypes . stringToCharP ( string )
array = stypes . listToCharArrayPtr ( array , xLen = lenvals , yLen = n )
n = ctypes . c_int ( n )
lenvals = ctypes . c_int ( lenvals )
return libspice . lstlec_c ( string , n , lenvals , array ) |
async def request ( self , api_commands ) :
"""Make a request .""" | if not isinstance ( api_commands , list ) :
result = await self . _execute ( api_commands )
return result
commands = ( self . _execute ( api_command ) for api_command in api_commands )
command_results = await asyncio . gather ( * commands , loop = self . _loop )
return command_results |
def crop ( gens , seconds = 5 , cropper = None ) :
'''Crop the generator to a finite number of frames
Return a generator which outputs the provided generator limited
to enough samples to produce seconds seconds of audio ( default 5s )
at the provided frame rate .''' | if hasattr ( gens , "next" ) : # single generator
gens = ( gens , )
if cropper == None :
cropper = lambda gen : itertools . islice ( gen , 0 , seconds * sampler . FRAME_RATE )
cropped = [ cropper ( gen ) for gen in gens ]
return cropped [ 0 ] if len ( cropped ) == 1 else cropped |
def verify ( self ) :
"""Verify the completeness of the data .
Raises :
ValueError : When this chat is invalid .""" | if any ( not i for i in ( self . chat_uid , self . module_id ) ) :
raise ValueError ( "Chat data is incomplete." )
if not isinstance ( self . chat_type , ChatType ) :
raise ValueError ( "Invalid chat type." )
if self . chat_type == ChatType . Group :
if any ( not isinstance ( i , EFBChat ) or not i . chat_type == ChatType . User for i in self . members ) :
raise ValueError ( "The group has an invalid member." )
if self . group is not None and ( not isinstance ( self . group , EFBChat ) or not self . group . chat_type == ChatType . Group ) :
raise ValueError ( "The member is in an invalid group." ) |
def get_scene ( self ) :
"""- get _ scene ( ) : It return the x and y position , the smoothing length
of the particles and the index of the particles that are active in
the scene . In principle this is an internal function and you don ' t
need this data .""" | return self . _x , self . _y , self . _hsml , self . _m , self . _kview |
def proj_l1 ( v , gamma , axis = None , method = None ) :
r"""Projection operator of the : math : ` \ ell _ 1 ` norm .
Parameters
v : array _ like
Input array : math : ` \ mathbf { v } `
gamma : float
Parameter : math : ` \ gamma `
axis : None or int or tuple of ints , optional ( default None )
Axes of ` v ` over which to compute the : math : ` \ ell _ 1 ` norm . If
` None ` , an entire multi - dimensional array is treated as a
vector . If axes are specified , then distinct norm values are
computed over the indices of the remaining axes of input array
method : None or str , optional ( default None )
Solver method to use . If ` None ` , the most appropriate choice is
made based on the ` axis ` parameter . Valid methods are
- ' scalarroot '
The solution is computed via the method of Sec . 6.5.2 in
: cite : ` parikh - 2014 - proximal ` .
- ' sortcumsum '
The solution is computed via the method of
: cite : ` duchi - 2008 - efficient ` .
Returns
x : ndarray
Output array""" | if method is None :
if axis is None :
method = 'scalarroot'
else :
method = 'sortcumsum'
if method == 'scalarroot' :
if axis is not None :
raise ValueError ( 'Method scalarroot only supports axis=None' )
return _proj_l1_scalar_root ( v , gamma )
elif method == 'sortcumsum' :
if isinstance ( axis , tuple ) :
vtr , rsi = ndto2d ( v , axis )
xtr = _proj_l1_sortsum ( vtr , gamma , axis = 1 )
return ndfrom2d ( xtr , rsi )
else :
return _proj_l1_sortsum ( v , gamma , axis )
else :
raise ValueError ( 'Unknown solver method %s' % method ) |
def rowCount ( self , parent ) :
"""Return the number of rows under the given parent .
When the parent is valid return rowCount the number
of children of parent .
: param parent : the parent index
: type parent : : class : ` QtCore . QModelIndex ` :
: returns : the row count
: rtype : int
: raises : None""" | if parent . column ( ) > 0 :
return 0
if not parent . isValid ( ) :
parentItem = self . _root
else :
parentItem = parent . internalPointer ( )
return parentItem . child_count ( ) |
def _BuildHttpRoutingMap ( self , router_cls ) :
"""Builds a werkzeug routing map out of a given router class .""" | if not issubclass ( router_cls , api_call_router . ApiCallRouter ) :
raise ValueError ( "Router has to be an instance of ApiCallRouter." )
routing_map = routing . Map ( )
# Note : we traverse methods of the base class ( ApiCallRouter ) to avoid
# potential problems caused by child router classes using the @ Http
# annotation ( thus adding additional unforeseen HTTP paths / methods ) . We
# don ' t want the HTTP API to depend on a particular router implementation .
for _ , metadata in iteritems ( router_cls . GetAnnotatedMethods ( ) ) :
for http_method , path , unused_options in metadata . http_methods :
routing_map . add ( routing . Rule ( path , methods = [ http_method ] , endpoint = metadata ) )
# This adds support for the next version of the API that uses
# standartized JSON protobuf serialization .
routing_map . add ( routing . Rule ( path . replace ( "/api/" , "/api/v2/" ) , methods = [ http_method ] , endpoint = metadata ) )
return routing_map |
def preprocess ( self , raw_inputs ) :
"""Args :
raw _ inputs ( list of Images ) : a list of PIL Image objects
Returns :
array ( float32 ) : num images * height * width * num channels""" | image_arrays = [ ]
for raw_im in raw_inputs :
im = raw_im . resize ( VGG16_DIM [ : 2 ] , Image . ANTIALIAS )
im = im . convert ( 'RGB' )
arr = np . array ( im ) . astype ( 'float32' )
image_arrays . append ( arr )
all_raw_inputs = np . array ( image_arrays )
return imagenet_utils . preprocess_input ( all_raw_inputs ) |
def kind ( units ) :
"""Find the kind of given units .
Parameters
units : string
The units of interest
Returns
string
The kind of the given units . If no match is found , returns None .""" | for k , v in dicts . items ( ) :
if units in v . keys ( ) :
return k |
def base_variables ( self ) :
"""A mapping from the base _ variable names to the variables""" | if isinstance ( self . data , InteractiveList ) :
return dict ( chain ( * map ( lambda arr : six . iteritems ( arr . psy . base_variables ) , self . data ) ) )
else :
return self . data . psy . base_variables |
def html_visit ( self , node ) :
"""Visitor method for Need - node of builder ' html ' .
Does only wrap the Need - content into an extra < div > with class = need""" | self . body . append ( self . starttag ( node , 'div' , '' , CLASS = 'need' ) ) |
def _bootstrapped_fit_iter ( fitter_args_kargs , n = None , datalist = None , pdatalist = None , ** kargs ) :
"""Iterator that returns bootstrap copies of a fit .
Bootstrap iterator for | MultiFitter | fits analogous to
: meth : ` lsqfit . bootstrapped _ fit _ iter ` . The bootstrap uses the
same parameters as the last fit done by the fitter unless they
are overridden by ` ` kargs ` ` .
Args :
n ( int ) : Maximum number of iterations if ` ` n ` ` is not ` ` None ` ` ;
otherwise there is no maximum . Default is ` ` None ` ` .
datalist ( iter ) : Collection of bootstrap data sets for fitter .
pdatalist ( iter ) : Collection of bootstrap processed data sets for
fitter .
kargs ( dict ) : Overrides arguments in original fit .
Returns :
Iterator that returns an | nonlinear _ fit | object
containing results from the fit to the next data set in
` ` datalist ` ` .""" | fitter , args , okargs = fitter_args_kargs
for k in okargs :
if k not in kargs :
kargs [ k ] = okargs [ k ]
if 'p0' not in kargs :
kargs [ 'p0' ] = args [ 'p0' ]
if datalist is not None :
pdatalist = ( MultiFitter . process_data ( d , args [ 'models' ] ) for d in datalist )
elif pdatalist is None :
pdata = args [ 'pdata' ]
if pdata is None :
pdata = MultiFitter . process_data ( args [ 'data' ] , args [ 'models' ] )
pdatalist = gvar . bootstrap_iter ( pdata , n )
i = 0
for pdata in pdatalist :
i += 1
if n is not None and i > n :
break
fit = fitter ( pdata = pdata , prior = args [ 'prior' ] , ** kargs )
yield fit |
def delete_certificate_issuer ( self , certificate_issuer_id , ** kwargs ) : # noqa : E501
"""Delete certificate issuer . # noqa : E501
Delete a certificate issuer by ID . < br > * * Example usage : * * ` ` ` curl - X DELETE \\ - H ' authorization : < valid access token > ' \\ https : / / api . us - east - 1 . mbedcloud . com / v3 / certificate - issuers / 0162155dc77d507b9d48a91b00000 ` ` ` # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass asynchronous = True
> > > thread = api . delete _ certificate _ issuer ( certificate _ issuer _ id , asynchronous = True )
> > > result = thread . get ( )
: param asynchronous bool
: param str certificate _ issuer _ id : Certificate issuer ID . < br > The ID of the certificate issuer . An active certificate issuer may not be deleted . ( required )
: return : None
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'asynchronous' ) :
return self . delete_certificate_issuer_with_http_info ( certificate_issuer_id , ** kwargs )
# noqa : E501
else :
( data ) = self . delete_certificate_issuer_with_http_info ( certificate_issuer_id , ** kwargs )
# noqa : E501
return data |
def zerosFactory ( shape , dtype = float ) :
"""Creates a new NumPy array using ` arrayFactory ( ) ` and fills it with zeros .""" | arr = arrayFactory ( shape = shape , dtype = dtype )
arr . fill ( 0 )
return arr |
def assign_interval ( data ) :
"""Identify coverage based on percent of genome covered and relation to targets .
Classifies coverage into 3 categories :
- genome : Full genome coverage
- regional : Regional coverage , like exome capture , with off - target reads
- amplicon : Amplication based regional coverage without off - target reads""" | if not dd . get_coverage_interval ( data ) :
vrs = dd . get_variant_regions_merged ( data )
callable_file = dd . get_sample_callable ( data )
if vrs :
callable_size = pybedtools . BedTool ( vrs ) . total_coverage ( )
else :
callable_size = pybedtools . BedTool ( callable_file ) . total_coverage ( )
total_size = sum ( [ c . size for c in ref . file_contigs ( dd . get_ref_file ( data ) , data [ "config" ] ) ] )
genome_cov_pct = callable_size / float ( total_size )
if genome_cov_pct > GENOME_COV_THRESH :
cov_interval = "genome"
offtarget_pct = 0.0
elif not vrs :
cov_interval = "regional"
offtarget_pct = 0.0
else :
offtarget_pct = _count_offtarget ( data , dd . get_align_bam ( data ) or dd . get_work_bam ( data ) , vrs or callable_file , "variant_regions" )
if offtarget_pct > OFFTARGET_THRESH :
cov_interval = "regional"
else :
cov_interval = "amplicon"
logger . info ( "%s: Assigned coverage as '%s' with %.1f%% genome coverage and %.1f%% offtarget coverage" % ( dd . get_sample_name ( data ) , cov_interval , genome_cov_pct * 100.0 , offtarget_pct * 100.0 ) )
data [ "config" ] [ "algorithm" ] [ "coverage_interval" ] = cov_interval
return data |
def parse ( self , sentence , params = None , headers = None ) :
"""Request a parse of * sentence * and return the response .
Args :
sentence ( str ) : sentence to be parsed
params ( dict ) : a dictionary of request parameters
headers ( dict ) : a dictionary of additional request headers
Returns :
A ParseResponse containing the results , if the request was
successful .
Raises :
requests . HTTPError : if the status code was not 200""" | if params is None :
params = { }
params [ 'input' ] = sentence
hdrs = { 'Accept' : 'application/json' }
if headers is not None :
hdrs . update ( headers )
url = urljoin ( self . server , 'parse' )
r = requests . get ( url , params = params , headers = hdrs )
if r . status_code == 200 :
return _RestResponse ( r . json ( ) )
else :
r . raise_for_status ( ) |
def get ( self , name , default = None ) :
'''Return the first part with that name or a default value ( None ) .''' | for part in self :
if name == part . name :
return part
return default |
def relative_humidity_wet_psychrometric ( dry_bulb_temperature , web_bulb_temperature , pressure , ** kwargs ) :
r"""Calculate the relative humidity with wet bulb and dry bulb temperatures .
This uses a psychrometric relationship as outlined in [ WMO8-2014 ] _ , with
coefficients from [ Fan1987 ] _ .
Parameters
dry _ bulb _ temperature : ` pint . Quantity `
Dry bulb temperature
web _ bulb _ temperature : ` pint . Quantity `
Wet bulb temperature
pressure : ` pint . Quantity `
Total atmospheric pressure
Returns
` pint . Quantity `
Relative humidity
Notes
. . math : : RH = \ frac { e } { e _ s }
* : math : ` RH ` is relative humidity as a unitless ratio
* : math : ` e ` is vapor pressure from the wet psychrometric calculation
* : math : ` e _ s ` is the saturation vapor pressure
See Also
psychrometric _ vapor _ pressure _ wet , saturation _ vapor _ pressure""" | return ( psychrometric_vapor_pressure_wet ( dry_bulb_temperature , web_bulb_temperature , pressure , ** kwargs ) / saturation_vapor_pressure ( dry_bulb_temperature ) ) |
def close_error_dlg ( self ) :
"""Close error dialog .""" | if self . error_dlg . dismiss_box . isChecked ( ) :
self . dismiss_error = True
self . error_dlg . reject ( ) |
def staged_predict ( self , X ) :
"""Predict hazard at each stage for X .
This method allows monitoring ( i . e . determine error on testing set )
after each stage .
Parameters
X : array - like , shape = ( n _ samples , n _ features )
The input samples .
Returns
y : generator of array of shape = ( n _ samples , )
The predicted value of the input samples .""" | check_is_fitted ( self , 'estimators_' )
# if dropout wasn ' t used during training , proceed as usual ,
# otherwise consider scaling factor of individual trees
if not hasattr ( self , "scale_" ) :
for y in self . _staged_decision_function ( X ) :
yield self . _scale_prediction ( y . ravel ( ) )
else :
for y in self . _dropout_staged_decision_function ( X ) :
yield self . _scale_prediction ( y . ravel ( ) ) |
def mouseDragEvent ( self , ev , axis = None ) :
"""Customized mouse dragging , where the right drag is bounding box zoom
: param ev : event object containing drag state info
: type ev : : py : class : ` MouseDragEvent < pyqtgraph : pyqtgraph . GraphicsScene . mouseEvents . MouseDragEvent > `""" | if self . _customMouse and ev . button ( ) == QtCore . Qt . RightButton :
ev . accept ( )
# # we accept all buttons
# directly copy - pasted from ViewBox for ViewBox . RectMode
if ev . isFinish ( ) : # # This is the final move in the drag ; change the view scale now
# print " finish "
pos = ev . pos ( )
self . rbScaleBox . hide ( )
# ax = QtCore . QRectF ( Point ( self . pressPos ) , Point ( self . mousePos ) )
ax = QtCore . QRectF ( Point ( ev . buttonDownPos ( ev . button ( ) ) ) , Point ( pos ) )
ax = self . childGroup . mapRectFromParent ( ax )
self . showAxRect ( ax )
self . axHistoryPointer += 1
self . axHistory = self . axHistory [ : self . axHistoryPointer ] + [ ax ]
else : # # update shape of scale box
self . updateScaleBox ( ev . buttonDownPos ( ) , ev . pos ( ) )
else :
state = None
# ctrl reverses mouse operation axis
if ev . modifiers ( ) == QtCore . Qt . ControlModifier :
state = self . mouseEnabled ( )
self . setMouseEnabled ( not state [ 0 ] , not state [ 1 ] )
super ( SpikeyViewBox , self ) . mouseDragEvent ( ev , axis )
if state is not None :
self . setMouseEnabled ( * state ) |
def resolve ( self , from_email , resolution = None ) :
"""Resolve an incident using a valid email address .""" | if from_email is None or not isinstance ( from_email , six . string_types ) :
raise MissingFromEmail ( from_email )
endpoint = '/' . join ( ( self . endpoint , self . id , ) )
add_headers = { 'from' : from_email , }
data = { 'incident' : { 'type' : 'incident' , 'status' : 'resolved' , } }
if resolution is not None :
data [ 'resolution' ] = resolution
result = self . request ( 'PUT' , endpoint = endpoint , add_headers = add_headers , data = data , )
return result |
def release_parts ( version ) :
"""Split RPM Release string into ( numeric X . Y . Z part , milestone , rest ) .
: returns : a three - element tuple ( number , milestone , rest ) . If we cannot
determine the " milestone " or " rest " , those will be an empty
string .""" | numver , tail = version_parts ( version )
if numver and not re . match ( r'\d' , numver ) : # entire release is macro a la % { release }
tail = numver
numver = ''
m = re . match ( r'(\.?(?:%\{\?milestone\}|[^%.]+))(.*)$' , tail )
if m :
milestone = m . group ( 1 )
rest = m . group ( 2 )
else :
milestone = ''
rest = tail
return numver , milestone , rest |
def groupby ( xs , key_fn ) :
"""Group elements of the list ` xs ` by keys generated from calling ` key _ fn ` .
Returns a dictionary which maps keys to sub - lists of ` xs ` .""" | result = defaultdict ( list )
for x in xs :
key = key_fn ( x )
result [ key ] . append ( x )
return result |
def _chirp_mass ( self ) :
"""Chirp mass calculation""" | return ( self . m1 * self . m2 ) ** ( 3. / 5. ) / ( self . m1 + self . m2 ) ** ( 1. / 5. ) |
def _read_ipv6_opts_options ( self , length ) :
"""Read IPv6 _ Opts options .
Positional arguments :
* length - - int , length of options
Returns :
* dict - - extracted IPv6 _ Opts options""" | counter = 0
# length of read options
optkind = list ( )
# option type list
options = dict ( )
# dict of option data
while counter < length : # break when eol triggered
code = self . _read_unpack ( 1 )
if not code :
break
# extract parameter
abbr , desc = _IPv6_Opts_OPT . get ( code , ( 'None' , 'Unassigned' ) )
data = _IPv6_Opts_PROC ( abbr ) ( self , code , desc = desc )
enum = _OPT_TYPE . get ( code )
# record parameter data
counter += data [ 'length' ]
if enum in optkind :
if isinstance ( options [ abbr ] , tuple ) :
options [ abbr ] += ( Info ( data ) , )
else :
options [ abbr ] = ( Info ( options [ abbr ] ) , Info ( data ) )
else :
optkind . append ( enum )
options [ abbr ] = data
# check threshold
if counter != length :
raise ProtocolError ( f'{self.alias}: invalid format' )
return tuple ( optkind ) , options |
def solid ( self , x , y ) :
"""Determine whether the pixel x , y is nonzero
Parameters
x , y : int
The pixel of interest .
Returns
solid : bool
True if the pixel is not zero .""" | if not ( 0 <= x < self . xsize ) or not ( 0 <= y < self . ysize ) :
return False
if self . data [ x , y ] == 0 :
return False
return True |
def remove_condition ( self , rank ) :
"""Remove a condition element using it ' s rank . You can find the
rank and element for a match condition by iterating the match
condition : :
> > > rule1 = rm . route _ map _ rules . get ( 0)
> > > for condition in rule1 . match _ condition :
. . . condition
Condition ( rank = 1 , element = ExternalBGPPeer ( name = bgppeer ) )
Condition ( rank = 2 , element = IPAccessList ( name = myacl ) )
Condition ( rank = 3 , element = Metric ( value = 20 ) )
Then delete by rank . Call update on the rule after making the
modification .
: param int rank : rank of the condition to remove
: raises UpdateElementFailed : failed to update rule
: return : None""" | self . conditions [ : ] = [ r for r in self . conditions if r . get ( 'rank' ) != rank ] |
def teardown_websocket ( self , func : Callable ) -> Callable :
"""Add a teardown websocket function to the Blueprint .
This is designed to be used as a decorator , and has the same
arguments as : meth : ` ~ quart . Quart . teardown _ websocket ` . It
applies only to requests that are routed to an endpoint in
this blueprint . An example usage ,
. . code - block : : python
blueprint = Blueprint ( _ _ name _ _ )
@ blueprint . teardown _ websocket
def teardown ( ) :""" | self . record_once ( lambda state : state . app . teardown_websocket ( func , self . name ) )
return func |
def sort_references_dict ( refs ) :
"""Sorts a reference dictionary into a standard order
The keys of the references are also sorted , and the keys for the data for each
reference are put in a more canonical order .""" | if _use_odict :
refs_sorted = OrderedDict ( )
else :
refs_sorted = dict ( )
# We insert this first , That is ok - it will be overwritten
# with the sorted version later
refs_sorted [ 'molssi_bse_schema' ] = refs [ 'molssi_bse_schema' ]
# This sorts the entries by reference key ( author1985a , etc )
for k , v in sorted ( refs . items ( ) ) :
refs_sorted [ k ] = sort_single_reference ( v )
return refs_sorted |
def p_Dictionary ( p ) :
"""Dictionary : dictionary IDENTIFIER Inheritance " { " DictionaryMembers " } " " ; " """ | p [ 0 ] = model . Dictionary ( name = p [ 2 ] , parent = p [ 3 ] , members = p [ 5 ] ) |
def run ( self , context = None , options = None ) :
"""Run axe against the current page .
: param context : which page part ( s ) to analyze and / or what to exclude .
: param options : dictionary of aXe options .""" | template = ( "var callback = arguments[arguments.length - 1];" + "axe.run(%s).then(results => callback(results))" )
args = ""
# If context parameter is passed , add to args
if context is not None :
args += "%r" % context
# Add comma delimiter only if both parameters are passed
if context is not None and options is not None :
args += ","
# If options parameter is passed , add to args
if options is not None :
args += "%s" % options
command = template % args
response = self . selenium . execute_async_script ( command )
return response |
def repr_arg ( d ) :
"""formats a function argument prettily but as working code
unicode encodable as ascii is formatted as str""" | if isinstance ( d , dict ) : # if d can be expressed in key = value syntax :
return "{%s}" % ", " . join ( "%s: %s" % ( repr_arg ( k ) , repr_arg ( v ) ) for k , v in d . items ( ) )
if isinstance ( d , list ) :
return "[%s]" % ", " . join ( repr_arg ( elem ) for elem in d )
if isinstance ( d , unicode ) :
try :
return repr ( d . encode ( "ascii" ) )
except UnicodeEncodeError :
return repr ( d )
return repr ( d ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.