signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get ( self , index : int ) -> TOption [ T ] :
""": param index :
Usage :
> > > TList ( [ 1 , 2 , 3 , 4 , 5 ] ) . get ( 3)
Option - - > 4
> > > TList ( [ 1 , 2 , 3 , 4 , 5 ] ) . get ( 5)
Option - - > None"""
|
return TOption ( self [ index ] ) if len ( self ) > index else TOption ( None )
|
def contributors ( self ) :
"""Property to retrieve or access the list of contributors ."""
|
if not self . _contributors :
self . _contributors = self . get_contributors ( )
return self . _contributors
|
def transform ( self , data ) :
""": param data : DataFrame with column to encode
: return : encoded Series"""
|
with timer ( 'transform %s' % self . name , logging . DEBUG ) :
transformed = super ( Token , self ) . transform ( self . tokenize ( data ) )
return transformed . reshape ( ( len ( data ) , self . sequence_length ) )
|
def start ( self ) :
""": meth : ` WStoppableTask . start ` implementation that creates new thread"""
|
start_event = self . start_event ( )
stop_event = self . stop_event ( )
ready_event = self . ready_event ( )
def thread_target ( ) :
try :
start_event . set ( )
self . thread_started ( )
if ready_event is not None :
ready_event . set ( )
except Exception as e :
self . exception_event ( ) . set ( )
self . thread_exception ( e )
if self . __thread is None :
if stop_event is not None :
stop_event . clear ( )
if ready_event is not None :
ready_event . clear ( )
self . exception_event ( ) . clear ( )
self . __thread = Thread ( target = thread_target , name = self . thread_name ( ) )
self . __thread . start ( )
|
def _draw_arrow ( self , x1 , y1 , x2 , y2 , Dx , Dy , label = "" , width = 1.0 , arrow_curvature = 1.0 , color = "grey" , patchA = None , patchB = None , shrinkA = 0 , shrinkB = 0 , arrow_label_size = None ) :
"""Draws a slightly curved arrow from ( x1 , y1 ) to ( x2 , y2 ) .
Will allow the given patches at start end end ."""
|
# set arrow properties
dist = _sqrt ( ( ( x2 - x1 ) / float ( Dx ) ) ** 2 + ( ( y2 - y1 ) / float ( Dy ) ) ** 2 )
arrow_curvature *= 0.075
# standard scale
rad = arrow_curvature / ( dist )
tail_width = width
head_width = max ( 0.5 , 2 * width )
head_length = head_width
self . ax . annotate ( "" , xy = ( x2 , y2 ) , xycoords = 'data' , xytext = ( x1 , y1 ) , textcoords = 'data' , arrowprops = dict ( arrowstyle = 'simple,head_length=%f,head_width=%f,tail_width=%f' % ( head_length , head_width , tail_width ) , color = color , shrinkA = shrinkA , shrinkB = shrinkB , patchA = patchA , patchB = patchB , connectionstyle = "arc3,rad=%f" % - rad ) , zorder = 0 )
# weighted center position
center = _np . array ( [ 0.55 * x1 + 0.45 * x2 , 0.55 * y1 + 0.45 * y2 ] )
v = _np . array ( [ x2 - x1 , y2 - y1 ] )
# 1 - > 2 vector
vabs = _np . abs ( v )
vnorm = _np . array ( [ v [ 1 ] , - v [ 0 ] ] )
# orthogonal vector
vnorm = _np . divide ( vnorm , _np . linalg . norm ( vnorm ) )
# normalize
# cross product to determine the direction into which vnorm points
z = _np . cross ( v , vnorm )
if z < 0 :
vnorm *= - 1
offset = 0.5 * arrow_curvature * ( ( vabs [ 0 ] / ( vabs [ 0 ] + vabs [ 1 ] ) ) * Dx + ( vabs [ 1 ] / ( vabs [ 0 ] + vabs [ 1 ] ) ) * Dy )
ptext = center + offset * vnorm
self . ax . text ( ptext [ 0 ] , ptext [ 1 ] , label , size = arrow_label_size , horizontalalignment = 'center' , verticalalignment = 'center' , zorder = 1 )
|
def _parse_response ( self , result_page ) :
"""Takes a result page of sending the sms , returns an extracted tuple :
( ' numeric _ err _ code ' , ' < sent _ queued _ message _ id > ' , ' < smsglobalmsgid > ' )
Returns None if unable to extract info from result _ page , it should be
safe to assume that it was either a failed result or worse , the interface
contract has changed ."""
|
# Sample result _ page , single line - > " OK : 0 ; Sent queued message ID : 2063619577732703 SMSGlobalMsgID : 6171799108850954"
resultline = result_page . splitlines ( ) [ 0 ]
# get result line
if resultline . startswith ( 'ERROR:' ) :
raise Exception ( resultline . replace ( 'ERROR: ' , '' ) )
patt = re . compile ( r'^.+?:\s*(.+?)\s*;\s*Sent queued message ID:\s*(.+?)\s*SMSGlobalMsgID:(.+?)$' , re . IGNORECASE )
m = patt . match ( resultline )
if m :
return ( m . group ( 1 ) , m . group ( 2 ) , m . group ( 3 ) )
return None
|
def split_number_and_unit ( s ) :
"""Parse a string that consists of a integer number and an optional unit .
@ param s a non - empty string that starts with an int and is followed by some letters
@ return a triple of the number ( as int ) and the unit"""
|
if not s :
raise ValueError ( 'empty value' )
s = s . strip ( )
pos = len ( s )
while pos and not s [ pos - 1 ] . isdigit ( ) :
pos -= 1
number = int ( s [ : pos ] )
unit = s [ pos : ] . strip ( )
return ( number , unit )
|
def error ( msg , delay = 0.5 , chevrons = True , verbose = True ) :
"""Log a message to stdout ."""
|
if verbose :
if chevrons :
click . secho ( "\n❯❯ " + msg , err = True , fg = "red" )
else :
click . secho ( msg , err = True , fg = "red" )
time . sleep ( delay )
|
async def get_token ( cls , host , ** params ) :
"""POST / oauth / v2 / token
Get a new token
: param host : host of the service
: param params : will contain :
params = { " grant _ type " : " password " ,
" client _ id " : " a string " ,
" client _ secret " : " a string " ,
" username " : " a login " ,
" password " : " a password " }
: return : access token"""
|
params [ 'grant_type' ] = "password"
path = "/oauth/v2/token"
async with aiohttp . ClientSession ( ) as sess :
async with sess . post ( host + path , data = params ) as resp :
data = await cls . handle_json_response ( resp )
return data . get ( "access_token" )
|
def is_on_filesystem ( value , ** kwargs ) :
"""Indicate whether ` ` value ` ` is a file or directory that exists on the local
filesystem .
: param value : The value to evaluate .
: returns : ` ` True ` ` if ` ` value ` ` is valid , ` ` False ` ` if it is not .
: rtype : : class : ` bool < python : bool > `
: raises SyntaxError : if ` ` kwargs ` ` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator"""
|
try :
value = validators . path_exists ( value , ** kwargs )
except SyntaxError as error :
raise error
except Exception :
return False
return True
|
def geometric_center ( coords , periodic ) :
'''Geometric center taking into account periodic boundaries'''
|
max_vals = periodic
theta = 2 * np . pi * ( coords / max_vals )
eps = np . cos ( theta ) * max_vals / ( 2 * np . pi )
zeta = np . sin ( theta ) * max_vals / ( 2 * np . pi )
eps_avg = eps . sum ( axis = 0 )
zeta_avg = zeta . sum ( axis = 0 )
theta_avg = np . arctan2 ( - zeta_avg , - eps_avg ) + np . pi
return theta_avg * max_vals / ( 2 * np . pi )
|
def receive ( self , protocolTreeNode ) :
""": type protocolTreeNode : ProtocolTreeNode"""
|
if not self . processIqRegistry ( protocolTreeNode ) :
if protocolTreeNode . tag == "message" :
self . onMessage ( protocolTreeNode )
elif not protocolTreeNode . tag == "receipt" : # receipts will be handled by send layer
self . toUpper ( protocolTreeNode )
|
def _check_operators ( self , operators ) :
"""Check Inputs
This method cheks that the input operators and weights are correctly
formatted
Parameters
operators : list , tuple or np . ndarray
List of linear operator class instances
Returns
np . array operators
Raises
TypeError
For invalid input type"""
|
if not isinstance ( operators , ( list , tuple , np . ndarray ) ) :
raise TypeError ( 'Invalid input type, operators must be a list, ' 'tuple or numpy array.' )
operators = np . array ( operators )
if not operators . size :
raise ValueError ( 'Operator list is empty.' )
for operator in operators :
if not hasattr ( operator , 'op' ) :
raise ValueError ( 'Operators must contain "op" method.' )
if not hasattr ( operator , 'cost' ) :
raise ValueError ( 'Operators must contain "cost" method.' )
operator . op = check_callable ( operator . op )
operator . cost = check_callable ( operator . cost )
return operators
|
def update ( self , * args , ** kw ) :
'''Update the dictionary with items and names : :
( items , names , * * kw )
( dict , names , * * kw )
( MIDict , names , * * kw )
Optional positional argument ` ` names ` ` is only allowed when ` ` self . indices ` `
is empty ( no indices are set yet ) .'''
|
if len ( args ) > 1 and self . indices :
raise ValueError ( 'Only one positional argument is allowed when the' 'index names are already set.' )
if not self . indices : # empty ; init again
_MI_init ( self , * args , ** kw )
return
d = MIMapping ( * args , ** kw )
if not d . indices :
return
names = force_list ( self . indices . keys ( ) )
if len ( d . indices ) != len ( names ) :
raise ValueError ( 'Length of update items (%s) does not match ' 'length of original items (%s)' % ( len ( d . indices ) , len ( names ) ) )
for key in d : # use _ _ setitem _ _ ( ) to handle duplicate
self [ key ] = d [ key ]
|
def get ( self , subscription_id = None , stream = None , historics_id = None , page = None , per_page = None , order_by = None , order_dir = None , include_finished = None ) :
"""Show details of the Subscriptions belonging to this user .
Uses API documented at http : / / dev . datasift . com / docs / api / rest - api / endpoints / pushget
: param subscription _ id : optional id of an existing Push Subscription
: type subscription _ id : str
: param hash : optional hash of a live stream
: type hash : str
: param playback _ id : optional playback id of a Historics query
: type playback _ id : str
: param page : optional page number for pagination
: type page : int
: param per _ page : optional number of items per page , default 20
: type per _ page : int
: param order _ by : field to order by , default request _ time
: type order _ by : str
: param order _ dir : direction to order by , asc or desc , default desc
: type order _ dir : str
: param include _ finished : boolean indicating if finished Subscriptions for Historics should be included
: type include _ finished : bool
: returns : dict with extra response data
: rtype : : class : ` ~ datasift . request . DictResponse `
: raises : : class : ` ~ datasift . exceptions . DataSiftApiException ` , : class : ` requests . exceptions . HTTPError `"""
|
params = { }
if subscription_id :
params [ 'id' ] = subscription_id
if stream :
params [ 'hash' ] = stream
if historics_id :
params [ 'historics_id' ] = historics_id
if page :
params [ 'page' ] = page
if per_page :
params [ 'per_page' ] = per_page
if order_by :
params [ 'order_by' ] = order_by
if order_dir :
params [ 'order_dir' ] = order_dir
if include_finished :
params [ 'include_finished' ] = 1 if include_finished else 0
return self . request . get ( 'get' , params = params )
|
def dusk_utc ( self , date , latitude , longitude , depression = 0 , observer_elevation = 0 ) :
"""Calculate dusk time in the UTC timezone .
: param date : Date to calculate for .
: type date : : class : ` datetime . date `
: param latitude : Latitude - Northern latitudes should be positive
: type latitude : float
: param longitude : Longitude - Eastern longitudes should be positive
: type longitude : float
: param depression : Override the depression used
: type depression : float
: param observer _ elevation : Elevation in metres to calculate dusk for
: type observer _ elevation : int
: return : The UTC date and time at which dusk occurs .
: rtype : : class : ` ~ datetime . datetime `"""
|
if depression == 0 :
depression = self . _depression
depression += 90
try :
return self . _calc_time ( depression , SUN_SETTING , date , latitude , longitude , observer_elevation )
except ValueError as exc :
if exc . args [ 0 ] == "math domain error" :
raise AstralError ( ( "Sun never reaches %d degrees below the horizon, " "at this location." ) % ( depression - 90 ) )
else :
raise
|
def getReadAlignmentId ( self , gaAlignment ) :
"""Returns a string ID suitable for use in the specified GA
ReadAlignment object in this ReadGroupSet ."""
|
compoundId = datamodel . ReadAlignmentCompoundId ( self . getCompoundId ( ) , gaAlignment . fragment_name )
return str ( compoundId )
|
def _call ( self , x ) :
"""Return ` ` self ( x ) ` ` ."""
|
with self . mutex :
result = pyshearlab . SLsheardec2D ( x , self . shearlet_system )
return np . moveaxis ( result , - 1 , 0 )
|
def forward ( self , observations ) :
"""Calculate model outputs"""
|
input_data = self . input_block ( observations )
policy_base_output = self . policy_backbone ( input_data )
value_base_output = self . value_backbone ( input_data )
action_output = self . action_head ( policy_base_output )
value_output = self . value_head ( value_base_output )
return action_output , value_output
|
def add_by_steps ( self , entries_by_step , table = None , columns = None ) :
"""Add entries to the main table .
The * entries * variable should be an iterable yielding iterables ."""
|
for entries in entries_by_step :
self . add ( entries , table = table , columns = columns )
|
def format_request_email_title ( increq , ** ctx ) :
"""Format the email message title for inclusion request notification .
: param increq : Inclusion request object for which the request is made .
: type increq : ` invenio _ communities . models . InclusionRequest `
: param ctx : Optional extra context parameters passed to formatter .
: type ctx : dict .
: returns : Email message title .
: rtype : str"""
|
template = current_app . config [ "COMMUNITIES_REQUEST_EMAIL_TITLE_TEMPLATE" ] ,
return format_request_email_templ ( increq , template , ** ctx )
|
def connections ( self ) :
"""Returns all of the loaded connections names as a list"""
|
conn = lambda x : str ( x ) . replace ( 'connection:' , '' )
return [ conn ( name ) for name in self . sections ( ) ]
|
def nearest ( self , idx ) :
"""Return datetime of record whose datetime is nearest idx ."""
|
hi = self . after ( idx )
lo = self . before ( idx )
if hi is None :
return lo
if lo is None :
return hi
if abs ( hi - idx ) < abs ( lo - idx ) :
return hi
return lo
|
def to_docstring ( kwargs , lpad = '' ) :
"""Reconstruct a docstring from keyword argument info .
Basically reverses : func : ` extract _ kwargs ` .
Parameters
kwargs : list
Output from the extract _ kwargs function
lpad : str , optional
Padding string ( from the left ) .
Returns
str
The docstring snippet documenting the keyword arguments .
Examples
> > > kwargs = [
. . . ( ' bar ' , ' str , optional ' , [ ' This parameter is the bar . ' ] ) ,
. . . ( ' baz ' , ' int , optional ' , [ ' This parameter is the baz . ' ] ) ,
> > > print ( to _ docstring ( kwargs ) , end = ' ' )
bar : str , optional
This parameter is the bar .
baz : int , optional
This parameter is the baz ."""
|
buf = io . StringIO ( )
for name , type_ , description in kwargs :
buf . write ( '%s%s: %s\n' % ( lpad , name , type_ ) )
for line in description :
buf . write ( '%s %s\n' % ( lpad , line ) )
return buf . getvalue ( )
|
def tryCComment ( self , block ) :
"""C comment checking . If the previous line begins with a " / * " or a " * " , then
return its leading white spaces + ' * ' + the white spaces after the *
return : filler string or null , if not in a C comment"""
|
indentation = None
prevNonEmptyBlock = self . _prevNonEmptyBlock ( block )
if not prevNonEmptyBlock . isValid ( ) :
return None
prevNonEmptyBlockText = prevNonEmptyBlock . text ( )
if prevNonEmptyBlockText . endswith ( '*/' ) :
try :
foundBlock , notUsedColumn = self . findTextBackward ( prevNonEmptyBlock , prevNonEmptyBlock . length ( ) , '/*' )
except ValueError :
foundBlock = None
if foundBlock is not None :
dbg ( "tryCComment: success (1) in line %d" % foundBlock . blockNumber ( ) )
return self . _lineIndent ( foundBlock . text ( ) )
if prevNonEmptyBlock != block . previous ( ) : # inbetween was an empty line , so do not copy the " * " character
return None
blockTextStripped = block . text ( ) . strip ( )
prevBlockTextStripped = prevNonEmptyBlockText . strip ( )
if prevBlockTextStripped . startswith ( '/*' ) and not '*/' in prevBlockTextStripped :
indentation = self . _blockIndent ( prevNonEmptyBlock )
if CFG_AUTO_INSERT_STAR : # only add ' * ' , if there is none yet .
indentation += ' '
if not blockTextStripped . endswith ( '*' ) :
indentation += '*'
secondCharIsSpace = len ( blockTextStripped ) > 1 and blockTextStripped [ 1 ] . isspace ( )
if not secondCharIsSpace and not blockTextStripped . endswith ( "*/" ) :
indentation += ' '
dbg ( "tryCComment: success (2) in line %d" % block . blockNumber ( ) )
return indentation
elif prevBlockTextStripped . startswith ( '*' ) and ( len ( prevBlockTextStripped ) == 1 or prevBlockTextStripped [ 1 ] . isspace ( ) ) : # in theory , we could search for opening / * , and use its indentation
# and then one alignment character . Let ' s not do this for now , though .
indentation = self . _lineIndent ( prevNonEmptyBlockText )
# only add ' * ' , if there is none yet .
if CFG_AUTO_INSERT_STAR and not blockTextStripped . startswith ( '*' ) :
indentation += '*'
if len ( blockTextStripped ) < 2 or not blockTextStripped [ 1 ] . isspace ( ) :
indentation += ' '
dbg ( "tryCComment: success (2) in line %d" % block . blockNumber ( ) )
return indentation
return None
|
def inception_v3 ( inputs , num_classes = 1000 , is_training = True , dropout_keep_prob = 0.8 , min_depth = 16 , depth_multiplier = 1.0 , prediction_fn = slim . softmax , spatial_squeeze = True , reuse = None , scope = 'InceptionV3' ) :
"""Inception model from http : / / arxiv . org / abs / 1512.00567.
" Rethinking the Inception Architecture for Computer Vision "
Christian Szegedy , Vincent Vanhoucke , Sergey Ioffe , Jonathon Shlens ,
Zbigniew Wojna .
With the default arguments this method constructs the exact model defined in
the paper . However , one can experiment with variations of the inception _ v3
network by changing arguments dropout _ keep _ prob , min _ depth and
depth _ multiplier .
The default image size used to train this network is 299x299.
Args :
inputs : a tensor of size [ batch _ size , height , width , channels ] .
num _ classes : number of predicted classes .
is _ training : whether is training or not .
dropout _ keep _ prob : the percentage of activation values that are retained .
min _ depth : Minimum depth value ( number of channels ) for all convolution ops .
Enforced when depth _ multiplier < 1 , and not an active constraint when
depth _ multiplier > = 1.
depth _ multiplier : Float multiplier for the depth ( number of channels )
for all convolution ops . The value must be greater than zero . Typical
usage will be to set this value in ( 0 , 1 ) to reduce the number of
parameters or computation cost of the model .
prediction _ fn : a function to get predictions out of logits .
spatial _ squeeze : if True , logits is of shape is [ B , C ] , if false logits is
of shape [ B , 1 , 1 , C ] , where B is batch _ size and C is number of classes .
reuse : whether or not the network and its variables should be reused . To be
able to reuse ' scope ' must be given .
scope : Optional variable _ scope .
Returns :
logits : the pre - softmax activations , a tensor of size
[ batch _ size , num _ classes ]
end _ points : a dictionary from components of the network to the corresponding
activation .
Raises :
ValueError : if ' depth _ multiplier ' is less than or equal to zero ."""
|
if depth_multiplier <= 0 :
raise ValueError ( 'depth_multiplier is not greater than zero.' )
def depth ( d ) :
return max ( int ( d * depth_multiplier ) , min_depth )
with tf . variable_scope ( scope , 'InceptionV3' , [ inputs , num_classes ] , reuse = reuse ) as scope :
with slim . arg_scope ( [ slim . batch_norm , slim . dropout ] , is_training = is_training ) :
net , end_points = inception_v3_base ( inputs , scope = scope , min_depth = min_depth , depth_multiplier = depth_multiplier )
# Auxiliary Head logits
with slim . arg_scope ( [ slim . conv2d , slim . max_pool2d , slim . avg_pool2d ] , stride = 1 , padding = 'SAME' ) :
aux_logits = end_points [ 'Mixed_6e' ]
with tf . variable_scope ( 'AuxLogits' ) :
aux_logits = slim . avg_pool2d ( aux_logits , [ 5 , 5 ] , stride = 3 , padding = 'VALID' , scope = 'AvgPool_1a_5x5' )
aux_logits = slim . conv2d ( aux_logits , depth ( 128 ) , [ 1 , 1 ] , scope = 'Conv2d_1b_1x1' )
# Shape of feature map before the final layer .
kernel_size = _reduced_kernel_size_for_small_input ( aux_logits , [ 5 , 5 ] )
aux_logits = slim . conv2d ( aux_logits , depth ( 768 ) , kernel_size , weights_initializer = trunc_normal ( 0.01 ) , padding = 'VALID' , scope = 'Conv2d_2a_{}x{}' . format ( * kernel_size ) )
aux_logits = slim . conv2d ( aux_logits , num_classes , [ 1 , 1 ] , activation_fn = None , normalizer_fn = None , weights_initializer = trunc_normal ( 0.001 ) , scope = 'Conv2d_2b_1x1' )
if spatial_squeeze :
aux_logits = tf . squeeze ( aux_logits , [ 1 , 2 ] , name = 'SpatialSqueeze' )
end_points [ 'AuxLogits' ] = aux_logits
# Final pooling and prediction
with tf . variable_scope ( 'Logits' ) :
kernel_size = _reduced_kernel_size_for_small_input ( net , [ 8 , 8 ] )
net = slim . avg_pool2d ( net , kernel_size , padding = 'VALID' , scope = 'AvgPool_1a_{}x{}' . format ( * kernel_size ) )
# 1 x 1 x 2048
net = slim . dropout ( net , keep_prob = dropout_keep_prob , scope = 'Dropout_1b' )
end_points [ 'PreLogits' ] = net
# 2048
logits = slim . conv2d ( net , num_classes , [ 1 , 1 ] , activation_fn = None , normalizer_fn = None , scope = 'Conv2d_1c_1x1' )
if spatial_squeeze :
logits = tf . squeeze ( logits , [ 1 , 2 ] , name = 'SpatialSqueeze' )
# 1000
end_points [ 'Logits' ] = logits
end_points [ 'Predictions' ] = prediction_fn ( logits , scope = 'Predictions' )
return logits , end_points
|
def delete ( zone ) :
'''Delete the specified configuration from memory and stable storage .
zone : string
name of zone
CLI Example :
. . code - block : : bash
salt ' * ' zonecfg . delete epyon'''
|
ret = { 'status' : True }
# delete zone
res = __salt__ [ 'cmd.run_all' ] ( 'zonecfg -z {zone} delete -F' . format ( zone = zone , ) )
ret [ 'status' ] = res [ 'retcode' ] == 0
ret [ 'message' ] = res [ 'stdout' ] if ret [ 'status' ] else res [ 'stderr' ]
if ret [ 'message' ] == '' :
del ret [ 'message' ]
else :
ret [ 'message' ] = _clean_message ( ret [ 'message' ] )
return ret
|
def download ( ) :
"""Download all files from an FTP share"""
|
ftp = ftplib . FTP ( SITE )
ftp . set_debuglevel ( DEBUG )
ftp . login ( USER , PASSWD )
ftp . cwd ( DIR )
filelist = ftp . nlst ( )
filecounter = MANAGER . counter ( total = len ( filelist ) , desc = 'Downloading' , unit = 'files' )
for filename in filelist :
with Writer ( filename , ftp . size ( filename ) , DEST ) as writer :
ftp . retrbinary ( 'RETR %s' % filename , writer . write )
print ( filename )
filecounter . update ( )
ftp . close ( )
|
def color_is_disabled ( ** envars ) :
'''Look for clues in environment , e . g . :
- https : / / bixense . com / clicolors /
- http : / / no - color . org /
Arguments :
envars : Additional environment variables to check for
equality , i . e . ` ` MYAPP _ COLOR _ DISABLED = ' 1 ' ` `
Returns :
None , Bool : Disabled'''
|
result = None
if 'NO_COLOR' in env :
result = True
elif env . CLICOLOR == '0' :
result = True
log . debug ( '%r (NO_COLOR=%s, CLICOLOR=%s)' , result , env . NO_COLOR or '' , env . CLICOLOR or '' )
for name , value in envars . items ( ) :
envar = getattr ( env , name )
if envar . value == value :
result = True
log . debug ( '%s == %r: %r' , name , value , result )
return result
|
def explain_lifecycle ( self , index = None , params = None ) :
"""` < https : / / www . elastic . co / guide / en / elasticsearch / reference / current / ilm - explain - lifecycle . html > ` _
: arg index : The name of the index to explain"""
|
return self . transport . perform_request ( "GET" , _make_path ( index , "_ilm" , "explain" ) , params = params )
|
def is_imagej ( self ) :
"""Return ImageJ description if exists , else None ."""
|
for description in ( self . description , self . description1 ) :
if not description :
return None
if description [ : 7 ] == 'ImageJ=' :
return description
return None
|
def match_time_series ( self , timeseries1 , timeseries2 ) :
"""Return two lists of the two input time series with matching dates
: param TimeSeries timeseries1 : The first timeseries
: param TimeSeries timeseries2 : The second timeseries
: return : Two two dimensional lists containing the matched values ,
: rtype : two List"""
|
time1 = map ( lambda item : item [ 0 ] , timeseries1 . to_twodim_list ( ) )
time2 = map ( lambda item : item [ 0 ] , timeseries2 . to_twodim_list ( ) )
matches = filter ( lambda x : ( x in time1 ) , time2 )
listX = filter ( lambda x : ( x [ 0 ] in matches ) , timeseries1 . to_twodim_list ( ) )
listY = filter ( lambda x : ( x [ 0 ] in matches ) , timeseries2 . to_twodim_list ( ) )
return listX , listY
|
def get_next_types ( self , n = None ) :
"""Gets the next set of ` ` Types ` ` in this list .
The specified amount must be less than or equal to the return
from ` ` available ( ) ` ` .
arg : n ( cardinal ) : the number of ` ` Type ` ` elements requested
which must be less than or equal to ` ` available ( ) ` `
return : ( osid . type . Type ) - an array of ` ` Type ` ` elements . The
length of the array is less than or equal to the number
specified .
raise : IllegalState - no more elements available in this list
raise : OperationFailed - unable to complete request
* compliance : mandatory - - This method must be implemented . *"""
|
import sys
from . . osid . osid_errors import IllegalState , OperationFailed
if n > self . available ( ) : # ! ! ! This is not quite as specified ( see method docs ) ! ! !
raise IllegalState ( 'not enough elements available in this list' )
else :
next_list = [ ]
x = 0
while x < n :
try :
next_list . append ( self . next ( ) )
except : # Need to specify exceptions here
raise OperationFailed ( )
x = x + 1
return next_list
|
def stats ( self ) :
"""shotcut to pull out useful info for interactive use"""
|
printDebug ( "Classes.....: %d" % len ( self . all_classes ) )
printDebug ( "Properties..: %d" % len ( self . all_properties ) )
|
def z_at_value ( func , fval , unit , zmax = 1000. , ** kwargs ) :
r"""Wrapper around astropy . cosmology . z _ at _ value to handle numpy arrays .
Getting a z for a cosmological quantity involves numerically inverting
` ` func ` ` . The ` ` zmax ` ` argument sets how large of a z to guess ( see
: py : func : ` astropy . cosmology . z _ at _ value ` for details ) . If a z is larger than
` ` zmax ` ` , this will try a larger zmax up to ` ` zmax * 10 * * 5 ` ` . If that still
is not large enough , will just return ` ` numpy . inf ` ` .
Parameters
func : function or method
A function that takes redshift as input .
fval : float
The value of ` ` func ( z ) ` ` .
unit : astropy . unit
The unit of ` ` fval ` ` .
zmax : float , optional
The initial maximum search limit for ` ` z ` ` . Default is 1000.
\ * * kwargs :
All other keyword arguments are passed to
: py : func : ` ` astropy . cosmology . z _ at _ value ` ` .
Returns
float
The redshift at the requested values ."""
|
fval , input_is_array = ensurearray ( fval )
# make sure fval is atleast 1D
if fval . size == 1 and fval . ndim == 0 :
fval = fval . reshape ( 1 )
zs = numpy . zeros ( fval . shape , dtype = float )
# the output array
for ( ii , val ) in enumerate ( fval ) :
try :
zs [ ii ] = astropy . cosmology . z_at_value ( func , val * unit , zmax = zmax , ** kwargs )
except CosmologyError : # we ' ll get this if the z was larger than zmax ; in that case we ' ll
# try bumping up zmax later to get a value
zs [ ii ] = numpy . inf
# check if there were any zs > zmax
replacemask = numpy . isinf ( zs )
# try bumping up zmax to get a result
if replacemask . any ( ) : # we ' ll keep bumping up the maxz until we can get a result
counter = 0
# to prevent running forever
while replacemask . any ( ) :
kwargs [ 'zmin' ] = zmax
zmax = 10 * zmax
idx = numpy . where ( replacemask )
for ii in idx :
val = fval [ ii ]
try :
zs [ ii ] = astropy . cosmology . z_at_value ( func , val * unit , zmax = zmax , ** kwargs )
replacemask [ ii ] = False
except CosmologyError : # didn ' t work , try on next loop
pass
counter += 1
if counter == 5 : # give up and warn the user
logging . warning ( "One or more values correspond to a " "redshift > {0:.1e}. The redshift for these " "have been set to inf. If you would like " "better precision, call God." . format ( zmax ) )
break
return formatreturn ( zs , input_is_array )
|
def hkdf_expand ( pseudo_random_key , info = b"" , length = 32 , hash = hashlib . sha512 ) :
'''Expand ` pseudo _ random _ key ` and ` info ` into a key of length ` bytes ` using
HKDF ' s expand function based on HMAC with the provided hash ( default
SHA - 512 ) . See the HKDF draft RFC and paper for usage notes .'''
|
hash_len = hash ( ) . digest_size
length = int ( length )
if length > 255 * hash_len :
raise Exception ( "Cannot expand to more than 255 * %d = %d bytes using the specified hash function" % ( hash_len , 255 * hash_len ) )
blocks_needed = length // hash_len + ( 0 if length % hash_len == 0 else 1 )
# ceil
okm = b""
output_block = b""
for counter in range ( blocks_needed ) :
output_block = hmac . new ( pseudo_random_key , buffer ( output_block + info + bytearray ( ( counter + 1 , ) ) ) , hash ) . digest ( )
okm += output_block
return okm [ : length ]
|
def _clone ( self , cid ) :
"""Create a temporary image snapshot from a given cid .
Temporary image snapshots are marked with a sentinel label
so that they can be cleaned on unmount ."""
|
try :
iid = self . client . commit ( container = cid , conf = { 'Labels' : { 'io.projectatomic.Temporary' : 'true' } } ) [ 'Id' ]
except docker . errors . APIError as ex :
raise MountError ( str ( ex ) )
self . tmp_image = iid
return self . _create_temp_container ( iid )
|
def _select_query_method ( cls , url ) :
"""Select the correct query method based on the URL
Works for ` DataQualityFlag ` and ` DataQualityDict `"""
|
if urlparse ( url ) . netloc . startswith ( 'geosegdb.' ) : # only DB2 server
return cls . query_segdb
return cls . query_dqsegdb
|
def query ( self , filter_text , max_servers = 10 , timeout = 30 , ** kw ) :
r"""Query game servers
https : / / developer . valvesoftware . com / wiki / Master _ Server _ Query _ Protocol
. . note : :
When specifying ` ` filter _ text ` ` use * raw strings * otherwise python won ' t treat backslashes
as literal characters ( e . g . ` ` query ( r ' \ appid \ 730 \ white \ 1 ' ) ` ` )
: param filter _ text : filter for servers
: type filter _ text : str
: param max _ servers : ( optional ) number of servers to return
: type max _ servers : int
: param timeout : ( optional ) timeout for request in seconds
: type timeout : int
: param app _ id : ( optional ) app id
: type app _ id : int
: param geo _ location _ ip : ( optional ) ip ( e . g . ' 1.2.3.4 ' )
: type geo _ location _ ip : str
: returns : list of servers , see below . ( ` ` None ` ` is returned steam doesn ' t respond )
: rtype : : class : ` list ` , : class : ` None `
Sample response :
. . code : : python
[ { ' auth _ players ' : 0 , ' server _ ip ' : ' 1.2.3.4 ' , ' server _ port ' : 27015 } ,
{ ' auth _ players ' : 6 , ' server _ ip ' : ' 1.2.3.4 ' , ' server _ port ' : 27016 } ,"""
|
if 'geo_location_ip' in kw :
kw [ 'geo_location_ip' ] = ip_to_int ( kw [ 'geo_location_ip' ] )
kw [ 'filter_text' ] = filter_text
kw [ 'max_servers' ] = max_servers
resp = self . _s . send_job_and_wait ( MsgProto ( EMsg . ClientGMSServerQuery ) , kw , timeout = timeout , )
if resp is None :
return None
resp = proto_to_dict ( resp )
for server in resp [ 'servers' ] :
server [ 'server_ip' ] = ip_from_int ( server [ 'server_ip' ] )
return resp [ 'servers' ]
|
def adjust_image ( f , max_size = ( 800 , 800 ) , new_format = None , jpeg_quality = 90 , fill = False , stretch = False , return_new_image = False , force_jpeg_save = True ) :
"""Підганяє зображення під параметри .
max _ size - максимальний розмір картинки . один з розмірів може бути None ( авто )
new _ format - формат файлу ( jpeg , png , gif ) . якщо None , тоді буде використаний формат оригіналу
jpeg _ quality - якість JPEG
fill - чи зображення має бути заповненим при обрізці ( інакше буде вписане )
stretch - чи розтягувати , якщо картинка замаленька
return _ new _ image - якщо True , тоді буде повертатись новий об ' єкт StringIO картинки . Інакше bool , чи файл змінювався .
force _ jpeg _ save - якщо True , тоді якщо файл JPEG , то він буде перезбережений в будь - якому випадку"""
|
assert isinstance ( max_size , ( list , tuple ) ) and len ( max_size ) == 2
assert 0 < jpeg_quality <= 100
if new_format :
new_format = new_format . lower ( )
if new_format not in ( 'jpeg' , 'png' , 'gif' ) :
raise RuntimeError ( 'Invalid new_format value.' )
f . seek ( 0 )
img = Image . open ( f )
if ( ( new_format == 'jpeg' and img . mode != 'RGB' ) or ( new_format is None and img . format == 'JPEG' and img . mode != 'RGB' ) ) :
do_convert = True
if dju_settings . DJU_IMG_CONVERT_JPEG_TO_RGB :
img = get_image_as_rgb ( f )
if img is not None :
do_convert = False
if do_convert :
current_format = img . format
img = img . convert ( 'RGB' )
img . format = current_format
max_width , max_height = max_size
img_width , img_height = img . size
img_format = img . format . lower ( )
ch_size = ch_format = False
if max_width is None :
max_width = int ( ( ( img_width / float ( img_height ) ) * max_height ) )
elif max_height is None :
max_height = int ( ( ( img_height / float ( img_width ) ) * max_width ) )
if ( img_width , img_height ) != ( max_width , max_height ) :
tasks = [ ]
if fill :
if ( img_width < max_width or img_height < max_height ) and not stretch :
k = max ( max_width / float ( img_width ) , max_height / float ( img_height ) )
w , h = max_width / k , max_height / k
left , top = int ( ( img_width - w ) / 2. ) , int ( ( img_height - h ) / 2. )
tasks . append ( ( 'crop' , ( ( left , top , int ( left + w ) , int ( top + h ) ) , ) , { } ) )
else :
k = min ( img_width / float ( max_width ) , img_height / float ( max_height ) )
w , h = img_width / k , img_height / k
tasks . append ( ( 'resize' , ( ( int ( w ) , int ( h ) ) , Image . LANCZOS ) , { } ) )
left , top = int ( ( w - max_width ) / 2. ) , int ( ( h - max_height ) / 2. )
tasks . append ( ( 'crop' , ( ( left , top , left + max_width , top + max_height ) , ) , { } ) )
elif ( ( img_width > max_width or img_height > max_height ) or ( img_width < max_width and img_height < max_height and stretch ) ) :
k = max ( img_width / float ( max_width ) , img_height / float ( max_height ) )
w , h = int ( img_width / k ) , int ( img_height / k )
tasks . append ( ( 'resize' , ( ( w , h ) , Image . LANCZOS ) , { } ) )
for img_method , method_args , method_kwargs in tasks :
if ( ( img_method == 'resize' and method_args [ 0 ] == ( img_width , img_height ) ) or ( img_method == 'crop' and method_args [ 0 ] == ( 0 , 0 , img . size [ 0 ] , img . size [ 1 ] ) ) ) :
continue
img = getattr ( img , img_method ) ( * method_args , ** method_kwargs )
ch_size = True
if new_format and new_format != img_format :
img_format = new_format
ch_format = True
if not ch_format and img_format == 'jpeg' and force_jpeg_save :
ch_format = True
if return_new_image :
t = StringIO ( )
_save_img ( img , t , img_format = img_format , quality = jpeg_quality , progressive = True , optimize = True )
return t
if ch_size or ch_format :
img . load ( )
truncate_file ( f )
_save_img ( img , f , img_format = img_format , quality = jpeg_quality , progressive = True , optimize = True )
if isinstance ( f , UploadedFile ) :
f . seek ( 0 , 2 )
f . size = f . tell ( )
set_uploaded_file_content_type_and_file_ext ( f , img_format )
return ch_size or ch_format
|
def addSource ( self , itemSource ) :
"""Add the given L { IBatchProcessor } as a source of input for this indexer ."""
|
_IndexerInputSource ( store = self . store , indexer = self , source = itemSource )
itemSource . addReliableListener ( self , style = iaxiom . REMOTE )
|
def __find_variant ( self , value ) :
"""Find the messages . Variant type that describes this value .
Args :
value : The value whose variant type is being determined .
Returns :
The messages . Variant value that best describes value ' s type ,
or None if it ' s a type we don ' t know how to handle ."""
|
if isinstance ( value , bool ) :
return messages . Variant . BOOL
elif isinstance ( value , six . integer_types ) :
return messages . Variant . INT64
elif isinstance ( value , float ) :
return messages . Variant . DOUBLE
elif isinstance ( value , six . string_types ) :
return messages . Variant . STRING
elif isinstance ( value , ( list , tuple ) ) : # Find the most specific variant that covers all elements .
variant_priority = [ None , messages . Variant . INT64 , messages . Variant . DOUBLE , messages . Variant . STRING ]
chosen_priority = 0
for v in value :
variant = self . __find_variant ( v )
try :
priority = variant_priority . index ( variant )
except IndexError :
priority = - 1
if priority > chosen_priority :
chosen_priority = priority
return variant_priority [ chosen_priority ]
# Unrecognized type .
return None
|
def get_expr_id ( self , search_group , search , lars_id , instruments , gps_start_time , gps_end_time , comments = None ) :
"""Return the expr _ def _ id for the row in the table whose
values match the givens .
If a matching row is not found , returns None .
@ search _ group : string representing the search group ( e . g . , cbc )
@ serach : string representing search ( e . g . , inspiral )
@ lars _ id : string representing lars _ id
@ instruments : the instruments ; must be a python set
@ gps _ start _ time : string or int representing the gps _ start _ time of the experiment
@ gps _ end _ time : string or int representing the gps _ end _ time of the experiment"""
|
# create string from instrument set
instruments = ifos_from_instrument_set ( instruments )
# look for the ID
for row in self :
if ( row . search_group , row . search , row . lars_id , row . instruments , row . gps_start_time , row . gps_end_time , row . comments ) == ( search_group , search , lars_id , instruments , gps_start_time , gps_end_time , comments ) : # found it
return row . experiment_id
# experiment not found in table
return None
|
def dump_to_stream ( self , cnf , stream , ** opts ) :
""": param cnf : Configuration data to dump
: param stream : Config file or file like object write to
: param opts : optional keyword parameters"""
|
tree = container_to_etree ( cnf , ** opts )
etree_write ( tree , stream )
|
def _parseAtImports ( self , src ) :
"""[ import [ S | CDO | CDC ] * ] *"""
|
result = [ ]
while isAtRuleIdent ( src , 'import' ) :
ctxsrc = src
src = stripAtRuleIdent ( src )
import_ , src = self . _getStringOrURI ( src )
if import_ is None :
raise self . ParseError ( 'Import expecting string or url' , src , ctxsrc )
mediums = [ ]
medium , src = self . _getIdent ( src . lstrip ( ) )
while medium is not None :
mediums . append ( medium )
if src [ : 1 ] == ',' :
src = src [ 1 : ] . lstrip ( )
medium , src = self . _getIdent ( src )
else :
break
# XXX No medium inherits and then " all " is appropriate
if not mediums :
mediums = [ "all" ]
if src [ : 1 ] != ';' :
raise self . ParseError ( '@import expected a terminating \';\'' , src , ctxsrc )
src = src [ 1 : ] . lstrip ( )
stylesheet = self . cssBuilder . atImport ( import_ , mediums , self )
if stylesheet is not None :
result . append ( stylesheet )
src = self . _parseSCDOCDC ( src )
return src , result
|
def add_nio ( self , nio , port_number ) :
"""Adds a NIO as new port on Frame Relay switch .
: param nio : NIO instance to add
: param port _ number : port to allocate for the NIO"""
|
if port_number in self . _nios :
raise DynamipsError ( "Port {} isn't free" . format ( port_number ) )
log . info ( 'Frame Relay switch "{name}" [{id}]: NIO {nio} bound to port {port}' . format ( name = self . _name , id = self . _id , nio = nio , port = port_number ) )
self . _nios [ port_number ] = nio
yield from self . set_mappings ( self . _mappings )
|
def load_markov ( argv , stdin ) :
"""Load and return markov algorithm ."""
|
if len ( argv ) > 3 :
with open ( argv [ 3 ] ) as input_file :
return Algorithm ( input_file . readlines ( ) )
else :
return Algorithm ( stdin . readlines ( ) )
|
def _run_cnvkit_shared_orig ( inputs , backgrounds ) :
"""Original CNVkit implementation with full normalization and segmentation ."""
|
work_dir = _sv_workdir ( inputs [ 0 ] )
raw_work_dir = utils . safe_makedir ( os . path . join ( work_dir , "raw" ) )
background_name = dd . get_sample_name ( backgrounds [ 0 ] ) if backgrounds else "flat"
background_cnn = os . path . join ( raw_work_dir , "%s_background.cnn" % ( background_name ) )
ckouts = [ ]
for cur_input in inputs :
cur_raw_work_dir = utils . safe_makedir ( os . path . join ( _sv_workdir ( cur_input ) , "raw" ) )
out_base , out_base_old = _bam_to_outbase ( dd . get_align_bam ( cur_input ) , cur_raw_work_dir , cur_input )
if utils . file_exists ( out_base_old + ".cns" ) :
out_base = out_base_old
ckouts . append ( { "cnr" : "%s.cnr" % out_base , "cns" : "%s.cns" % out_base } )
if not utils . file_exists ( ckouts [ 0 ] [ "cns" ] ) :
cov_interval = dd . get_coverage_interval ( inputs [ 0 ] )
samples_to_run = list ( zip ( [ "background" ] * len ( backgrounds ) , backgrounds ) ) + list ( zip ( [ "evaluate" ] * len ( inputs ) , inputs ) )
# New style shared SV bins
if tz . get_in ( [ "depth" , "bins" , "target" ] , inputs [ 0 ] ) :
target_bed = tz . get_in ( [ "depth" , "bins" , "target" ] , inputs [ 0 ] )
antitarget_bed = tz . get_in ( [ "depth" , "bins" , "antitarget" ] , inputs [ 0 ] )
raw_coverage_cnns = reduce ( operator . add , [ _get_general_coverage ( cdata , itype ) for itype , cdata in samples_to_run ] )
# Back compatible with pre - existing runs
else :
target_bed , antitarget_bed = _get_original_targets ( inputs [ 0 ] )
raw_coverage_cnns = reduce ( operator . add , [ _get_original_coverage ( cdata , itype ) for itype , cdata in samples_to_run ] )
# Currently metrics not calculated due to speed and needing re - evaluation
# We could re - enable with larger truth sets to evaluate background noise
# But want to reimplement in a more general fashion as part of normalization
if False :
coverage_cnns = reduce ( operator . add , [ _cnvkit_metrics ( cnns , target_bed , antitarget_bed , cov_interval , inputs + backgrounds ) for cnns in tz . groupby ( "bam" , raw_coverage_cnns ) . values ( ) ] )
background_cnn = cnvkit_background ( _select_background_cnns ( coverage_cnns ) , background_cnn , inputs , target_bed , antitarget_bed )
else :
coverage_cnns = raw_coverage_cnns
background_cnn = cnvkit_background ( [ x [ "file" ] for x in coverage_cnns if x [ "itype" ] == "background" ] , background_cnn , inputs , target_bed , antitarget_bed )
parallel = { "type" : "local" , "cores" : dd . get_cores ( inputs [ 0 ] ) , "progs" : [ "cnvkit" ] }
fixed_cnrs = run_multicore ( _cnvkit_fix , [ ( cnns , background_cnn , inputs , ckouts ) for cnns in tz . groupby ( "bam" , [ x for x in coverage_cnns if x [ "itype" ] == "evaluate" ] ) . values ( ) ] , inputs [ 0 ] [ "config" ] , parallel )
[ _cnvkit_segment ( cnr , cov_interval , data , inputs + backgrounds ) for cnr , data in fixed_cnrs ]
return ckouts
|
def char2range ( d , is_bytes = False , invert = True ) :
"""Convert the characters in the dict to a range in string form ."""
|
fmt = bytesformat if is_bytes else uniformat
maxrange = MAXASCII if is_bytes else MAXUNICODE
for k1 in sorted ( d . keys ( ) ) :
v1 = d [ k1 ]
if not isinstance ( v1 , list ) :
char2range ( v1 , is_bytes = is_bytes , invert = invert )
else :
inverted = k1 . startswith ( '^' )
v1 . sort ( )
last = None
first = None
ilast = None
ifirst = None
v2 = [ ]
iv2 = [ ]
if v1 and v1 [ 0 ] != 0 :
ifirst = 0
for i in v1 :
if first is None :
first = i
last = i
elif i == last + 1 :
last = i
elif first is not None :
if first == last :
v2 . append ( fmt ( first ) )
else :
v2 . append ( "%s-%s" % ( fmt ( first ) , fmt ( last ) ) )
if invert and ifirst is not None :
ilast = first - 1
if ifirst == ilast :
iv2 . append ( fmt ( ifirst ) )
else :
iv2 . append ( "%s-%s" % ( fmt ( ifirst ) , fmt ( ilast ) ) )
ifirst = last + 1
first = i
last = i
if not v1 :
iv2 = [ "%s-%s" % ( fmt ( 0 ) , fmt ( maxrange ) ) ]
elif first is not None :
if first == last :
v2 . append ( fmt ( first ) )
else :
v2 . append ( "%s-%s" % ( fmt ( first ) , fmt ( last ) ) )
if invert and ifirst is not None :
ilast = first - 1
if ifirst == ilast :
iv2 . append ( fmt ( ifirst ) )
else :
iv2 . append ( "%s-%s" % ( fmt ( ifirst ) , fmt ( ilast ) ) )
ifirst = last + 1
if invert and ifirst <= maxrange :
ilast = maxrange
if ifirst == ilast :
iv2 . append ( fmt ( ifirst ) )
else :
iv2 . append ( "%s-%s" % ( fmt ( ifirst ) , fmt ( ilast ) ) )
d [ k1 ] = '' . join ( v2 )
if invert :
d [ k1 [ 1 : ] if inverted else '^' + k1 ] = '' . join ( iv2 )
|
def system_requirements ( ) :
"""Check if all necessary packages are installed on system
: return : None or raise exception if some tooling is missing"""
|
command_exists ( "systemd-nspawn" , [ "systemd-nspawn" , "--version" ] , "Command systemd-nspawn does not seems to be present on your system" "Do you have system with systemd" )
command_exists ( "machinectl" , [ "machinectl" , "--no-pager" , "--help" ] , "Command machinectl does not seems to be present on your system" "Do you have system with systemd" )
if "Enforcing" in run_cmd ( [ "getenforce" ] , return_output = True , ignore_status = True ) :
logger . error ( "Please disable selinux (setenforce 0), selinux blocks some nspawn operations" "This may lead to strange behaviour" )
|
def get_tag_values ( self , tags , columns ) :
"""function to get the tag values from tags and columns"""
|
tag_values = [ ]
i = 0
while i < len ( columns ) :
tag_key = columns [ i ]
if tag_key in tags :
tag_values . append ( tags . get ( tag_key ) )
else :
tag_values . append ( None )
i += 1
return tag_values
|
def basic_auth ( f ) :
"""Injects auth , into requests call over route
: return : route"""
|
def wrapper ( * args , ** kwargs ) :
self = args [ 0 ]
if 'auth' in kwargs :
raise AttributeError ( "don't set auth token explicitly" )
assert self . is_connected , "not connected, call router.connect(email, password) first"
if self . _jwt_auth :
kwargs [ 'auth' ] = self . _jwt_auth
elif self . _auth :
kwargs [ 'auth' ] = self . _auth
else :
assert False , "no basic token, no JWT, but connected o_O"
return f ( * args , ** kwargs )
return wrapper
|
def is_name_in_grace_period ( self , name , block_number ) :
"""Given a name and block number , determine if it is in the renewal grace period at that block .
* names in revealed but not ready namespaces are never expired , unless the namespace itself is expired ;
* names in ready namespaces enter the grace period once max ( ready _ block , renew _ block ) + lifetime - grace _ period blocks passes
Return True if so
Return False if not , or if the name does not exist ."""
|
cur = self . db . cursor ( )
name_rec = namedb_get_name ( cur , name , block_number , include_expired = False )
if name_rec is None : # expired already or doesn ' t exist
return False
namespace_id = get_namespace_from_name ( name )
namespace_rec = namedb_get_namespace ( cur , namespace_id , block_number , include_history = False )
if namespace_rec is None :
return False
grace_info = BlockstackDB . get_name_deadlines ( name_rec , namespace_rec , block_number )
if grace_info is None : # namespace isn ' t ready yet
return False
return ( block_number >= grace_info [ 'expire_block' ] and block_number < grace_info [ 'renewal_deadline' ] )
|
def sum_string ( amount , gender , items = None ) :
"""Get sum in words
@ param amount : amount of objects
@ type amount : C { integer types }
@ param gender : gender of object ( MALE , FEMALE or NEUTER )
@ type gender : C { int }
@ param items : variants of object in three forms :
for one object , for two objects and for five objects
@ type items : 3 - element C { sequence } of C { unicode } or
just C { unicode } ( three variants with delimeter ' , ' )
@ return : in - words representation objects ' amount
@ rtype : C { unicode }
@ raise ValueError : items isn ' t 3 - element C { sequence } or C { unicode }
@ raise ValueError : amount bigger than 10 * * 11
@ raise ValueError : amount is negative"""
|
if isinstance ( items , six . text_type ) :
items = split_values ( items )
if items is None :
items = ( u"" , u"" , u"" )
try :
one_item , two_items , five_items = items
except ValueError :
raise ValueError ( "Items must be 3-element sequence" )
check_positive ( amount )
if amount == 0 :
if five_items :
return u"ноль %s" % five_items
else :
return u"ноль"
into = u''
tmp_val = amount
# единицы
into , tmp_val = _sum_string_fn ( into , tmp_val , gender , items )
# тысячи
into , tmp_val = _sum_string_fn ( into , tmp_val , FEMALE , ( u"тысяча" , u"тысячи" , u"тысяч" ) )
# миллионы
into , tmp_val = _sum_string_fn ( into , tmp_val , MALE , ( u"миллион" , u"миллиона" , u"миллионов" ) )
# миллиарды
into , tmp_val = _sum_string_fn ( into , tmp_val , MALE , ( u"миллиард" , u"миллиарда" , u"миллиардов" ) )
if tmp_val == 0 :
return into
else :
raise ValueError ( "Cannot operand with numbers bigger than 10**11" )
|
def btc_tx_is_segwit ( tx_serialized ) :
"""Is this serialized ( hex - encoded ) transaction a segwit transaction ?"""
|
marker_offset = 4
# 5th byte is the marker byte
flag_offset = 5
# 6th byte is the flag byte
marker_byte_string = tx_serialized [ 2 * marker_offset : 2 * ( marker_offset + 1 ) ]
flag_byte_string = tx_serialized [ 2 * flag_offset : 2 * ( flag_offset + 1 ) ]
if marker_byte_string == '00' and flag_byte_string != '00' : # segwit ( per BIP144)
return True
else :
return False
|
def _Rforce ( self , R , z , phi = 0. , t = 0. ) :
"""NAME :
_ Rforce
PURPOSE :
evaluate the radial force for this potential
INPUT :
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
the radial force
HISTORY :
2010-07-10 - Written - Bovy ( NYU )"""
|
return - R / ( R ** 2. + z ** 2. ) ** ( self . alpha / 2. )
|
def plot_inducing ( self , visible_dims = None , projection = '2d' , label = 'inducing' , legend = True , ** plot_kwargs ) :
"""Plot the inducing inputs of a sparse gp model
: param array - like visible _ dims : an array specifying the input dimensions to plot ( maximum two )
: param kwargs plot _ kwargs : keyword arguments for the plotting library"""
|
canvas , kwargs = pl ( ) . new_canvas ( projection = projection , ** plot_kwargs )
plots = _plot_inducing ( self , canvas , visible_dims , projection , label , ** kwargs )
return pl ( ) . add_to_canvas ( canvas , plots , legend = legend )
|
def generate_bond_subgraphs_from_break ( bond_graph , atom1 , atom2 ) :
"""Splits the bond graph between two atoms to producing subgraphs .
Notes
This will not work if there are cycles in the bond graph .
Parameters
bond _ graph : networkx . Graph
Graph of covalent bond network
atom1 : isambard . ampal . Atom
First atom in the bond .
atom2 : isambard . ampal . Atom
Second atom in the bond .
Returns
subgraphs : [ networkx . Graph ]
A list of subgraphs generated when a bond is broken in the covalent
bond network ."""
|
bond_graph . remove_edge ( atom1 , atom2 )
try :
subgraphs = list ( networkx . connected_component_subgraphs ( bond_graph , copy = False ) )
finally : # Add edge
bond_graph . add_edge ( atom1 , atom2 )
return subgraphs
|
def calculate_metric ( Js , logJs , loglogJs , logloglogJs , loglogloglogJs , mapping ) :
"""This function will take the various integrals calculated by get _ moments and
convert this into a metric for the appropriate parameter space .
Parameters
Js : Dictionary
The list of ( log ^ 0 x ) * x * * ( - i / 3 ) integrals computed by get _ moments ( )
The index is Js [ i ]
logJs : Dictionary
The list of ( log ^ 1 x ) * x * * ( - i / 3 ) integrals computed by get _ moments ( )
The index is logJs [ i ]
loglogJs : Dictionary
The list of ( log ^ 2 x ) * x * * ( - i / 3 ) integrals computed by get _ moments ( )
The index is loglogJs [ i ]
logloglogJs : Dictionary
The list of ( log ^ 3 x ) * x * * ( - i / 3 ) integrals computed by get _ moments ( )
The index is logloglogJs [ i ]
loglogloglogJs : Dictionary
The list of ( log ^ 4 x ) * x * * ( - i / 3 ) integrals computed by get _ moments ( )
The index is loglogloglogJs [ i ]
mapping : dictionary
Used to identify which Lambda components are active in this parameter
space and map these to entries in the metric matrix .
Returns
metric : numpy . matrix
The resulting metric ."""
|
# How many dimensions in the parameter space ?
maxLen = len ( mapping . keys ( ) )
metric = numpy . matrix ( numpy . zeros ( shape = ( maxLen , maxLen ) , dtype = float ) )
unmax_metric = numpy . matrix ( numpy . zeros ( shape = ( maxLen + 1 , maxLen + 1 ) , dtype = float ) )
for i in range ( 16 ) :
for j in range ( 16 ) :
calculate_metric_comp ( metric , unmax_metric , i , j , Js , logJs , loglogJs , logloglogJs , loglogloglogJs , mapping )
return metric , unmax_metric
|
def dump ( self , backend , node ) :
'''High - level function to call a ` backend ' on a ` node ' to generate
code for module ` module _ name ' .'''
|
assert issubclass ( backend , Backend )
b = backend ( )
b . attach ( self )
return b . run ( node )
|
def run_check ( self , data ) :
"""Check for uncommon words and difficult words in file ."""
|
if not data :
sys . exit ( 1 )
data , sentences , chars , num_words = self . pre_check ( data )
w_dict = Counter ( data )
uniq_len , uncommon , uncom_len = self . gsl ( w_dict )
non_dchall_set = Counter ( { word : count for word , count in w_dict . items ( ) if word and word not in self . dale_chall_words } )
diff_count = sum ( non_dchall_set . values ( ) )
dc_score = round ( self . dale_chall ( diff_count , num_words , sentences ) , 1 )
cli_score = round ( self . coleman_liau ( chars , num_words , sentences ) , 1 )
return uncommon , uncom_len , uniq_len , dc_score , cli_score
|
def filter ( args ) :
"""% prog filter paired . fastq
Filter to get high qv reads . Use interleaved format ( one file ) or paired
format ( two files ) to filter on paired reads ."""
|
p = OptionParser ( filter . __doc__ )
p . add_option ( "-q" , dest = "qv" , default = 20 , type = "int" , help = "Minimum quality score to keep [default: %default]" )
p . add_option ( "-p" , dest = "pct" , default = 95 , type = "int" , help = "Minimum percent of bases that have [-q] quality " "[default: %default]" )
opts , args = p . parse_args ( args )
if len ( args ) not in ( 1 , 2 ) :
sys . exit ( not p . print_help ( ) )
if len ( args ) == 1 :
r1 = r2 = args [ 0 ]
else :
r1 , r2 = args
qv = opts . qv
pct = opts . pct
offset = guessoffset ( [ r1 ] )
qvchar = chr ( offset + qv )
logging . debug ( "Call base qv >= {0} as good." . format ( qvchar ) )
outfile = r1 . rsplit ( "." , 1 ) [ 0 ] + ".q{0}.paired.fastq" . format ( qv )
fw = open ( outfile , "w" )
p1fp , p2fp = FastqPairedIterator ( r1 , r2 )
while True :
a = list ( islice ( p1fp , 4 ) )
if not a :
break
b = list ( islice ( p2fp , 4 ) )
q1 = a [ - 1 ] . rstrip ( )
q2 = b [ - 1 ] . rstrip ( )
if isHighQv ( q1 , qvchar , pct = pct ) and isHighQv ( q2 , qvchar , pct = pct ) :
fw . writelines ( a )
fw . writelines ( b )
|
def add_entry ( self , date , entry ) :
"""Add the given entry to the textual representation ."""
|
in_date = False
insert_at = 0
for ( lineno , line ) in enumerate ( self . lines ) : # Search for the date of the entry
if isinstance ( line , DateLine ) and line . date == date :
in_date = True
# Insert here if there is no existing Entry for this date
insert_at = lineno
continue
if in_date :
if isinstance ( line , Entry ) :
insert_at = lineno
elif isinstance ( line , DateLine ) :
break
self . lines . insert ( insert_at + 1 , entry )
# If there ' s no other Entry in the current date , add a blank line
# between the date and the entry
if not isinstance ( self . lines [ insert_at ] , Entry ) :
self . lines . insert ( insert_at + 1 , TextLine ( '' ) )
|
def preprocess_cell ( self , cell , resources , cell_index ) :
"""Also extracts attachments"""
|
from nbformat . notebooknode import NotebookNode
attach_names = [ ]
# Just move the attachment into an output
for k , attach in cell . get ( 'attachments' , { } ) . items ( ) :
for mime_type in self . extract_output_types :
if mime_type in attach :
if not 'outputs' in cell :
cell [ 'outputs' ] = [ ]
o = NotebookNode ( { 'data' : NotebookNode ( { mime_type : attach [ mime_type ] } ) , 'metadata' : NotebookNode ( { 'filenames' : { mime_type : k } # Will get re - written
} ) , 'output_type' : 'display_data' } )
cell [ 'outputs' ] . append ( o )
attach_names . append ( ( mime_type , k ) )
nb , resources = super ( ) . preprocess_cell ( cell , resources , cell_index )
output_names = list ( resources . get ( 'outputs' , { } ) . keys ( ) )
if attach_names : # We ' re going to assume that attachments are only on Markdown cells , and Markdown cells
# can ' t generate output , so all of the outputs wee added .
# reverse + zip matches the last len ( attach _ names ) elements from output _ names
for output_name , ( mimetype , an ) in zip ( reversed ( output_names ) , reversed ( attach_names ) ) : # We ' ll post process to set the final output directory
cell . source = re . sub ( '\(attachment:{}\)' . format ( an ) , '(__IMGDIR__/{})' . format ( output_name ) , cell . source )
return nb , resources
|
def get_resource_attribute ( collection , key , attribute ) :
"""Return the appropriate * Response * for retrieving an attribute of
a single resource .
: param string collection : a : class : ` sandman . model . Model ` endpoint
: param string key : the primary key for the : class : ` sandman . model . Model `
: rtype : : class : ` flask . Response `"""
|
resource = retrieve_resource ( collection , key )
_validate ( endpoint_class ( collection ) , request . method , resource )
value = getattr ( resource , attribute )
if isinstance ( value , Model ) :
return resource_response ( value )
else :
return attribute_response ( resource , attribute , value )
|
def set_default_host ( cls , value ) :
"""Default : " http : / / 127.0.0.1:80"
A string that will be automatically included at the beginning of the url generated for doing each http request ."""
|
if value is None :
cls . DEFAULT_HOST = "http://127.0.0.1:80"
else :
scheme , host , port = get_hostname_parameters_from_url ( value )
cls . DEFAULT_HOST = "%s://%s:%s" % ( scheme , host , port )
|
def fast_hash ( self ) :
"""Get a CRC32 or xxhash . xxh64 reflecting the DataStore .
Returns
hashed : int , checksum of data"""
|
fast = sum ( i . fast_hash ( ) for i in self . data . values ( ) )
return fast
|
def _updateJobWhenDone ( self ) :
"""Asynchronously update the status of the job on the disk , first waiting until the writing threads have finished and the input blockFn has stopped blocking ."""
|
def asyncUpdate ( ) :
try : # Wait till all file writes have completed
for i in range ( len ( self . workers ) ) :
self . queue . put ( None )
for thread in self . workers :
thread . join ( )
# Wait till input block - fn returns - in the event of an exception
# this will eventually terminate
self . inputBlockFn ( )
# Check the terminate event , if set we can not guarantee
# that the workers ended correctly , therefore we exit without
# completing the update
if self . _terminateEvent . isSet ( ) :
raise RuntimeError ( "The termination flag is set, exiting before update" )
# Indicate any files that should be deleted once the update of
# the job wrapper is completed .
self . jobGraph . filesToDelete = list ( self . filesToDelete )
# Complete the job
self . jobStore . update ( self . jobGraph )
# Delete any remnant jobs
list ( map ( self . jobStore . delete , self . jobsToDelete ) )
# Delete any remnant files
list ( map ( self . jobStore . deleteFile , self . filesToDelete ) )
# Remove the files to delete list , having successfully removed the files
if len ( self . filesToDelete ) > 0 :
self . jobGraph . filesToDelete = [ ]
# Update , removing emptying files to delete
self . jobStore . update ( self . jobGraph )
except :
self . _terminateEvent . set ( )
raise
finally : # Indicate that _ blockFn can return
# This code will always run
self . updateSemaphore . release ( )
# The update semaphore is held while the job is written to the job store
try :
self . updateSemaphore . acquire ( )
t = Thread ( target = asyncUpdate )
t . start ( )
except : # This is to ensure that the semaphore is released in a crash to stop a deadlock
# scenario
self . updateSemaphore . release ( )
raise
|
def _infer_xy_labels ( darray , x , y , imshow = False , rgb = None ) :
"""Determine x and y labels . For use in _ plot2d
darray must be a 2 dimensional data array , or 3d for imshow only ."""
|
assert x is None or x != y
if imshow and darray . ndim == 3 :
return _infer_xy_labels_3d ( darray , x , y , rgb )
if x is None and y is None :
if darray . ndim != 2 :
raise ValueError ( 'DataArray must be 2d' )
y , x = darray . dims
elif x is None :
if y not in darray . dims and y not in darray . coords :
raise ValueError ( 'y must be a dimension name if x is not supplied' )
x = darray . dims [ 0 ] if y == darray . dims [ 1 ] else darray . dims [ 1 ]
elif y is None :
if x not in darray . dims and x not in darray . coords :
raise ValueError ( 'x must be a dimension name if y is not supplied' )
y = darray . dims [ 0 ] if x == darray . dims [ 1 ] else darray . dims [ 1 ]
elif any ( k not in darray . coords and k not in darray . dims for k in ( x , y ) ) :
raise ValueError ( 'x and y must be coordinate variables' )
return x , y
|
def _generate_provenance ( self ) :
"""Function to generate provenance at the end of the IF ."""
|
# noinspection PyTypeChecker
hazard = definition ( self . _provenance [ 'hazard_keywords' ] [ 'hazard' ] )
exposures = [ definition ( layer . keywords [ 'exposure' ] ) for layer in self . exposures ]
# InaSAFE
set_provenance ( self . _provenance , provenance_impact_function_name , self . name )
set_provenance ( self . _provenance , provenance_analysis_extent , self . _analysis_extent . asWkt ( ) )
set_provenance ( self . _provenance , provenance_analysis_question , get_multi_exposure_analysis_question ( hazard , exposures ) )
set_provenance ( self . _provenance , provenance_data_store_uri , self . datastore . uri_path )
# Map title
set_provenance ( self . _provenance , provenance_map_title , self . name )
# CRS
set_provenance ( self . _provenance , provenance_crs , self . _crs . authid ( ) )
# Debug mode
set_provenance ( self . _provenance , provenance_debug_mode , self . debug_mode )
self . _provenance_ready = True
|
def remove ( self , order , cells ) :
"""Remove cells at a given order from the MOC ."""
|
self . _normalized = False
order = self . _validate_order ( order )
for cell in cells :
cell = self . _validate_cell ( order , cell )
self . _compare_operation ( order , cell , True , 'remove' )
|
def _get_port_profile_id ( self , request ) :
"""Get the port profile ID from the request path ."""
|
# Note ( alexcoman ) : The port profile ID can be found as suffix
# in request path .
port_profile_id = request . path . split ( "/" ) [ - 1 ] . strip ( )
if uuidutils . is_uuid_like ( port_profile_id ) :
LOG . debug ( "The instance id was found in request path." )
return port_profile_id
LOG . debug ( "Failed to get the instance id from the request." )
return None
|
def replace_su ( network , su_to_replace ) :
"""Replace the storage unit su _ to _ replace with a bus for the energy
carrier , two links for the conversion of the energy carrier to and from electricity ,
a store to keep track of the depletion of the energy carrier and its
CO2 emissions , and a variable generator for the storage inflow .
Because the energy size and power size are linked in the storage unit by the max _ hours ,
extra functionality must be added to the LOPF to implement this constraint ."""
|
su = network . storage_units . loc [ su_to_replace ]
bus_name = "{} {}" . format ( su [ "bus" ] , su [ "carrier" ] )
link_1_name = "{} converter {} to AC" . format ( su_to_replace , su [ "carrier" ] )
link_2_name = "{} converter AC to {}" . format ( su_to_replace , su [ "carrier" ] )
store_name = "{} store {}" . format ( su_to_replace , su [ "carrier" ] )
gen_name = "{} inflow" . format ( su_to_replace )
network . add ( "Bus" , bus_name , carrier = su [ "carrier" ] )
# dispatch link
network . add ( "Link" , link_1_name , bus0 = bus_name , bus1 = su [ "bus" ] , capital_cost = su [ "capital_cost" ] * su [ "efficiency_dispatch" ] , p_nom = su [ "p_nom" ] / su [ "efficiency_dispatch" ] , p_nom_extendable = su [ "p_nom_extendable" ] , p_nom_max = su [ "p_nom_max" ] / su [ "efficiency_dispatch" ] , p_nom_min = su [ "p_nom_min" ] / su [ "efficiency_dispatch" ] , p_max_pu = su [ "p_max_pu" ] , marginal_cost = su [ "marginal_cost" ] * su [ "efficiency_dispatch" ] , efficiency = su [ "efficiency_dispatch" ] )
# store link
network . add ( "Link" , link_2_name , bus1 = bus_name , bus0 = su [ "bus" ] , p_nom = su [ "p_nom" ] , p_nom_extendable = su [ "p_nom_extendable" ] , p_nom_max = su [ "p_nom_max" ] , p_nom_min = su [ "p_nom_min" ] , p_max_pu = - su [ "p_min_pu" ] , efficiency = su [ "efficiency_store" ] )
if su_to_replace in network . storage_units_t . state_of_charge_set . columns and ( ~ pd . isnull ( network . storage_units_t . state_of_charge_set [ su_to_replace ] ) ) . any ( ) :
e_max_pu = pd . Series ( data = 1. , index = network . snapshots )
e_min_pu = pd . Series ( data = 0. , index = network . snapshots )
non_null = ~ pd . isnull ( network . storage_units_t . state_of_charge_set [ su_to_replace ] )
e_max_pu [ non_null ] = network . storage_units_t . state_of_charge_set [ su_to_replace ] [ non_null ]
e_min_pu [ non_null ] = network . storage_units_t . state_of_charge_set [ su_to_replace ] [ non_null ]
else :
e_max_pu = 1.
e_min_pu = 0.
network . add ( "Store" , store_name , bus = bus_name , e_nom = su [ "p_nom" ] * su [ "max_hours" ] , e_nom_min = su [ "p_nom_min" ] / su [ "efficiency_dispatch" ] * su [ "max_hours" ] , e_nom_max = su [ "p_nom_max" ] / su [ "efficiency_dispatch" ] * su [ "max_hours" ] , e_nom_extendable = su [ "p_nom_extendable" ] , e_max_pu = e_max_pu , e_min_pu = e_min_pu , standing_loss = su [ "standing_loss" ] , e_cyclic = su [ 'cyclic_state_of_charge' ] , e_initial = su [ 'state_of_charge_initial' ] )
network . add ( "Carrier" , "rain" , co2_emissions = 0. )
# inflow from a variable generator , which can be curtailed ( i . e . spilled )
inflow_max = network . storage_units_t . inflow [ su_to_replace ] . max ( )
if inflow_max == 0. :
inflow_pu = 0.
else :
inflow_pu = network . storage_units_t . inflow [ su_to_replace ] / inflow_max
print ( inflow_pu , type ( inflow_pu ) , type ( inflow_pu ) in [ pd . Series ] )
network . add ( "Generator" , gen_name , bus = bus_name , carrier = "rain" , p_nom = inflow_max , p_max_pu = inflow_pu )
if su [ "p_nom_extendable" ] :
ratio2 = su [ "max_hours" ]
ratio1 = ratio2 * su [ "efficiency_dispatch" ]
def extra_functionality ( network , snapshots ) :
model = network . model
model . store_fix_1 = Constraint ( rule = lambda model : model . store_e_nom [ store_name ] == model . link_p_nom [ link_1_name ] * ratio1 )
model . store_fix_2 = Constraint ( rule = lambda model : model . store_e_nom [ store_name ] == model . link_p_nom [ link_2_name ] * ratio2 )
else :
extra_functionality = None
network . remove ( "StorageUnit" , su_to_replace )
return bus_name , link_1_name , link_2_name , store_name , gen_name , extra_functionality
|
def apply_transformer_types ( network ) :
"""Calculate transformer electrical parameters x , r , b , g from
standard types ."""
|
trafos_with_types_b = network . transformers . type != ""
if trafos_with_types_b . zsum ( ) == 0 :
return
missing_types = ( pd . Index ( network . transformers . loc [ trafos_with_types_b , 'type' ] . unique ( ) ) . difference ( network . transformer_types . index ) )
assert missing_types . empty , ( "The type(s) {} do(es) not exist in network.transformer_types" . format ( ", " . join ( missing_types ) ) )
# Get a copy of the transformers data
# ( joining pulls in " phase _ shift " , " s _ nom " , " tap _ side " from TransformerType )
t = ( network . transformers . loc [ trafos_with_types_b , [ "type" , "tap_position" , "num_parallel" ] ] . join ( network . transformer_types , on = 'type' ) )
t [ "r" ] = t [ "vscr" ] / 100.
t [ "x" ] = np . sqrt ( ( t [ "vsc" ] / 100. ) ** 2 - t [ "r" ] ** 2 )
# NB : b and g are per unit of s _ nom
t [ "g" ] = t [ "pfe" ] / ( 1000. * t [ "s_nom" ] )
# for some bizarre reason , some of the standard types in pandapower have i0 ^ 2 < g ^ 2
t [ "b" ] = - np . sqrt ( ( ( t [ "i0" ] / 100. ) ** 2 - t [ "g" ] ** 2 ) . clip ( lower = 0 ) )
for attr in [ "r" , "x" ] :
t [ attr ] /= t [ "num_parallel" ]
for attr in [ "b" , "g" ] :
t [ attr ] *= t [ "num_parallel" ]
# deal with tap positions
t [ "tap_ratio" ] = 1. + ( t [ "tap_position" ] - t [ "tap_neutral" ] ) * ( t [ "tap_step" ] / 100. )
# now set calculated values on live transformers
for attr in [ "r" , "x" , "g" , "b" , "phase_shift" , "s_nom" , "tap_side" , "tap_ratio" ] :
network . transformers . loc [ trafos_with_types_b , attr ] = t [ attr ]
|
def get_align ( text ) :
"Return ( halign , valign , angle ) of the < text > ."
|
( x1 , x2 , h , v , a ) = unaligned_get_dimension ( text )
return ( h , v , a )
|
def DeriveReportKey ( cls , root_key , report_id , sent_timestamp ) :
"""Derive a standard one time use report signing key .
The standard method is HMAC - SHA256 ( root _ key , MAGIC _ NUMBER | | report _ id | | sent _ timestamp )
where MAGIC _ NUMBER is 0x000002 and all integers are in little endian ."""
|
signed_data = struct . pack ( "<LLL" , AuthProvider . ReportKeyMagic , report_id , sent_timestamp )
hmac_calc = hmac . new ( root_key , signed_data , hashlib . sha256 )
return bytearray ( hmac_calc . digest ( ) )
|
def url_to_filename ( url ) :
"""Safely translate url to relative filename
Args :
url ( str ) : A target url string
Returns :
str"""
|
# remove leading / trailing slash
if url . startswith ( '/' ) :
url = url [ 1 : ]
if url . endswith ( '/' ) :
url = url [ : - 1 ]
# remove pardir symbols to prevent unwilling filesystem access
url = remove_pardir_symbols ( url )
# replace dots to underscore in filename part
url = replace_dots_to_underscores_at_last ( url )
return url
|
def getHelpAsString ( docstring = False , show_ver = True ) :
"""Return useful help from a file in the script directory called
` ` _ _ taskname _ _ . help ` `"""
|
install_dir = os . path . dirname ( __file__ )
taskname = util . base_taskname ( __taskname__ , __package__ )
htmlfile = os . path . join ( install_dir , 'htmlhelp' , taskname + '.html' )
helpfile = os . path . join ( install_dir , taskname + '.help' )
if docstring or ( not docstring and not os . path . exists ( htmlfile ) ) :
if show_ver :
helpString = "\n{:s} Version {:s} updated on {:s}\n\n" . format ( __taskname__ , __version__ , __version_date__ )
else :
helpString = ''
if os . path . exists ( helpfile ) :
helpString += teal . getHelpFileAsString ( taskname , __file__ )
elif __doc__ is not None :
helpString += __doc__ + os . linesep
else :
helpString = 'file://' + htmlfile
return helpString
|
def geom_iter ( self , g_nums ) :
"""Iterator over a subset of geometries .
The indices of the geometries to be returned are indicated by an
iterable of | int | \\ s passed as ` g _ nums ` .
As with : meth : ` geom _ single ` , each geometry is returned as a
length - 3N | npfloat _ | with each atom ' s x / y / z coordinates
grouped together : :
[ A1x , A1y , A1z , A2x , A2y , A2z , . . . ]
In order to use NumPy ` slicing or advanced indexing
< http : / / docs . scipy . org / doc / numpy - 1.10.0 / reference /
arrays . indexing . html > ` _ _ , : data : ` geoms ` must first be
explicitly converted to | nparray | , e . g . : :
> > > x = opan . xyz . OpanXYZ ( path = ' . . . ' )
> > > np . array ( x . geoms ) [ [ 2,6,9 ] ]
Parameters
g _ nums
length - R iterable of | int | - -
Indices of the desired geometries
Yields
geom
length - 3N | npfloat _ | - -
Vectors of the atomic coordinates for each geometry
indicated in ` g _ nums `
Raises
~ exceptions . IndexError
If an item in ` g _ nums ` is invalid ( out of range )"""
|
# Using the custom coded pack _ tups to not have to care whether the
# input is iterable
from . utils import pack_tups
vals = pack_tups ( g_nums )
for val in vals :
yield self . geom_single ( val [ 0 ] )
|
def render_to_template ( self ) :
"""Render the current menu instance to a template and return a string"""
|
context_data = self . get_context_data ( )
template = self . get_template ( )
context_data [ 'current_template' ] = template . template . name
return template . render ( context_data )
|
def generate_cont ( self , max_length , state_size , reply_to , backward , dataset ) :
"""Generate texts from start / end .
Parameters
max _ length : ` int ` or ` None `
Maximum sentence length .
state _ size : ` int `
State size .
reply _ to : ` str ` or ` None `
Input string .
backward : ` bool `
` True ` to generate text start .
dataset : ` str `
Dataset key prefix .
Returns
` generator ` of ` str `
Generated texts ."""
|
state = self . get_cont_state ( reply_to , backward )
while True :
parts = self . generate ( state_size , state , dataset , backward )
if reply_to is not None :
if backward :
parts = chain ( reversed ( list ( parts ) ) , ( reply_to , ) )
else :
parts = chain ( ( reply_to , ) , parts )
parts = islice ( parts , 0 , max_length )
yield self . format ( parts )
|
def pid ( name ) :
'''Returns the PID of a container
name
Container name
CLI Example :
. . code - block : : bash
salt myminion nspawn . pid arch1'''
|
try :
return int ( info ( name ) . get ( 'PID' ) )
except ( TypeError , ValueError ) as exc :
raise CommandExecutionError ( 'Unable to get PID for container \'{0}\': {1}' . format ( name , exc ) )
|
def get_queryset ( self , * args , ** kwargs ) :
"""Ensures that this manager always returns nodes in tree order ."""
|
qs = super ( TreeManager , self ) . get_queryset ( * args , ** kwargs )
# Restrict operations to pages on the current site if needed
if settings . PAGES_HIDE_SITES and settings . PAGES_USE_SITE_ID :
return qs . order_by ( self . tree_id_attr , self . left_attr ) . filter ( sites = settings . SITE_ID )
else :
return qs . order_by ( self . tree_id_attr , self . left_attr )
|
def _copy ( self ) :
"""Copies this instance . Its IonEvent ( if any ) is not preserved .
Keeping this protected until / unless we decide there ' s use for it publicly ."""
|
args , kwargs = self . _to_constructor_args ( self )
value = self . __class__ ( * args , ** kwargs )
value . ion_event = None
value . ion_type = self . ion_type
value . ion_annotations = self . ion_annotations
return value
|
def xml_path_completion ( xml_path ) :
"""Takes in a local xml path and returns a full path .
if @ xml _ path is absolute , do nothing
if @ xml _ path is not absolute , load xml that is shipped by the package"""
|
if xml_path . startswith ( "/" ) :
full_path = xml_path
else :
full_path = os . path . join ( robosuite . models . assets_root , xml_path )
return full_path
|
def from_xdr_object ( cls , op_xdr_object ) :
"""Creates a : class : ` ChangeTrust ` object from an XDR Operation
object ."""
|
if not op_xdr_object . sourceAccount :
source = None
else :
source = encode_check ( 'account' , op_xdr_object . sourceAccount [ 0 ] . ed25519 ) . decode ( )
line = Asset . from_xdr_object ( op_xdr_object . body . changeTrustOp . line )
limit = Operation . from_xdr_amount ( op_xdr_object . body . changeTrustOp . limit )
return cls ( source = source , asset = line , limit = limit )
|
def get_name ( self , language ) :
"""Return the name of this course"""
|
return self . gettext ( language , self . _name ) if self . _name else ""
|
def idxmin ( self , axis = 0 , skipna = True ) :
"""Return index of first occurrence of minimum over requested axis .
NA / null values are excluded .
Parameters
axis : { 0 or ' index ' , 1 or ' columns ' } , default 0
0 or ' index ' for row - wise , 1 or ' columns ' for column - wise
skipna : boolean , default True
Exclude NA / null values . If an entire row / column is NA , the result
will be NA .
Returns
Series
Indexes of minima along the specified axis .
Raises
ValueError
* If the row / column is empty
See Also
Series . idxmin
Notes
This method is the DataFrame version of ` ` ndarray . argmin ` ` ."""
|
axis = self . _get_axis_number ( axis )
indices = nanops . nanargmin ( self . values , axis = axis , skipna = skipna )
index = self . _get_axis ( axis )
result = [ index [ i ] if i >= 0 else np . nan for i in indices ]
return Series ( result , index = self . _get_agg_axis ( axis ) )
|
def list_apps ( self , cmd = None , embed_tasks = False , embed_counts = False , embed_deployments = False , embed_readiness = False , embed_last_task_failure = False , embed_failures = False , embed_task_stats = False , app_id = None , label = None , ** kwargs ) :
"""List all apps .
: param str cmd : if passed , only show apps with a matching ` cmd `
: param bool embed _ tasks : embed tasks in result
: param bool embed _ counts : embed all task counts
: param bool embed _ deployments : embed all deployment identifier
: param bool embed _ readiness : embed all readiness check results
: param bool embed _ last _ task _ failure : embeds the last task failure
: param bool embed _ failures : shorthand for embed _ last _ task _ failure
: param bool embed _ task _ stats : embed task stats in result
: param str app _ id : if passed , only show apps with an ' id ' that matches or contains this value
: param str label : if passed , only show apps with the selected labels
: param kwargs : arbitrary search filters
: returns : list of applications
: rtype : list [ : class : ` marathon . models . app . MarathonApp ` ]"""
|
params = { }
if cmd :
params [ 'cmd' ] = cmd
if app_id :
params [ 'id' ] = app_id
if label :
params [ 'label' ] = label
embed_params = { 'app.tasks' : embed_tasks , 'app.counts' : embed_counts , 'app.deployments' : embed_deployments , 'app.readiness' : embed_readiness , 'app.lastTaskFailure' : embed_last_task_failure , 'app.failures' : embed_failures , 'app.taskStats' : embed_task_stats }
filtered_embed_params = [ k for ( k , v ) in embed_params . items ( ) if v ]
if filtered_embed_params :
params [ 'embed' ] = filtered_embed_params
response = self . _do_request ( 'GET' , '/v2/apps' , params = params )
apps = self . _parse_response ( response , MarathonApp , is_list = True , resource_name = 'apps' )
for k , v in kwargs . items ( ) :
apps = [ o for o in apps if getattr ( o , k ) == v ]
return apps
|
def __remove_trailing_zeros ( self , collection ) :
"""Removes trailing zeroes from indexable collection of numbers"""
|
index = len ( collection ) - 1
while index >= 0 and collection [ index ] == 0 :
index -= 1
return collection [ : index + 1 ]
|
def endpoint_is_activated ( endpoint_id , until , absolute_time ) :
"""Executor for ` globus endpoint is - activated `"""
|
client = get_client ( )
res = client . endpoint_get_activation_requirements ( endpoint_id )
def fail ( deadline = None ) :
exp_string = ""
if deadline is not None :
exp_string = " or will expire within {} seconds" . format ( deadline )
message = "The endpoint is not activated{}.\n\n" . format ( exp_string ) + activation_requirements_help_text ( res , endpoint_id )
formatted_print ( res , simple_text = message )
click . get_current_context ( ) . exit ( 1 )
def success ( msg , * format_params ) :
formatted_print ( res , simple_text = ( msg . format ( endpoint_id , * format_params ) ) )
click . get_current_context ( ) . exit ( 0 )
# eternally active endpoints have a special expires _ in value
if res [ "expires_in" ] == - 1 :
success ( "{} does not require activation" )
# autoactivation is not supported and - - until was not passed
if until is None : # and we are active right now ( 0s in the future ) . . .
if res . active_until ( 0 ) :
success ( "{} is activated" )
# or we are not active
fail ( )
# autoactivation is not supported and - - until was passed
if res . active_until ( until , relative_time = not absolute_time ) :
success ( "{} will be active for at least {} seconds" , until )
else :
fail ( deadline = until )
|
def initialize_state ( self ) :
"""Call this to initialize the state of the UI after everything has been connected ."""
|
if self . __scan_hardware_source :
self . __profile_changed_event_listener = self . __scan_hardware_source . profile_changed_event . listen ( self . __update_profile_index )
self . __frame_parameters_changed_event_listener = self . __scan_hardware_source . frame_parameters_changed_event . listen ( self . __update_frame_parameters )
self . __data_item_states_changed_event_listener = self . __scan_hardware_source . data_item_states_changed_event . listen ( self . __data_item_states_changed )
self . __acquisition_state_changed_event_listener = self . __scan_hardware_source . acquisition_state_changed_event . listen ( self . __acquisition_state_changed )
self . __probe_state_changed_event_listener = self . __scan_hardware_source . probe_state_changed_event . listen ( self . __probe_state_changed )
self . __channel_state_changed_event_listener = self . __scan_hardware_source . channel_state_changed_event . listen ( self . __channel_state_changed )
subscan_state_model = self . __scan_hardware_source . subscan_state_model
def subscan_state_changed ( name ) :
if callable ( self . on_subscan_state_changed ) :
self . on_subscan_state_changed ( subscan_state_model . value )
self . __subscan_state_changed_listener = subscan_state_model . property_changed_event . listen ( subscan_state_changed )
subscan_state_changed ( "value" )
if self . on_display_name_changed :
self . on_display_name_changed ( self . display_name )
if self . on_subscan_state_changed :
self . on_subscan_state_changed ( self . __scan_hardware_source . subscan_state )
channel_count = self . __scan_hardware_source . channel_count
if self . on_channel_count_changed :
self . on_channel_count_changed ( channel_count )
self . __channel_enabled = [ False ] * channel_count
for channel_index in range ( channel_count ) :
channel_id , name , enabled = self . __scan_hardware_source . get_channel_state ( channel_index )
self . __channel_state_changed ( channel_index , channel_id , name , enabled )
self . __channel_enabled [ channel_index ] = enabled
self . __update_buttons ( )
if self . on_profiles_changed :
profile_items = list ( ScanControlStateController . profiles . items ( ) )
profile_items . sort ( key = lambda k_v : k_v [ 1 ] )
profiles = map ( lambda k_v : k_v [ 0 ] , profile_items )
self . on_profiles_changed ( profiles )
self . __update_profile_index ( self . __scan_hardware_source . selected_profile_index )
if self . on_linked_changed :
self . on_linked_changed ( self . __linked )
if self . on_simulate_button_state_changed :
use_simulator = self . __scan_hardware_source . use_hardware_simulator
self . on_simulate_button_state_changed ( use_simulator , use_simulator )
if self . on_data_item_states_changed :
self . on_data_item_states_changed ( list ( ) )
probe_state = self . __scan_hardware_source . probe_state
probe_position = self . __scan_hardware_source . probe_position
self . __probe_state_changed ( probe_state , probe_position )
|
def p_command ( p ) :
"""command : IDENTIFIER arguments ' ; '
| IDENTIFIER arguments block"""
|
# print ( " COMMAND : " , p [ 1 ] , p [ 2 ] , p [ 3 ] )
tests = p [ 2 ] . get ( 'tests' )
block = None
if p [ 3 ] != ';' :
block = p [ 3 ]
handler = sifter . handler . get ( 'command' , p [ 1 ] )
if handler is None :
print ( "No handler registered for command '%s' on line %d" % ( p [ 1 ] , p . lineno ( 1 ) ) )
raise SyntaxError
p [ 0 ] = handler ( arguments = p [ 2 ] [ 'args' ] , tests = tests , block = block )
|
def to_native_units ( self , motor ) :
"""Return the native speed measurement required to achieve desired rotations - per - minute"""
|
assert abs ( self . rotations_per_minute ) <= motor . max_rpm , "invalid rotations-per-minute: {} max RPM is {}, {} was requested" . format ( motor , motor . max_rpm , self . rotations_per_minute )
return self . rotations_per_minute / motor . max_rpm * motor . max_speed
|
def AddCampaign ( self , client_customer_id , campaign_name , ad_channel_type , budget ) :
"""Add a Campaign to the client account .
Args :
client _ customer _ id : str Client Customer Id to use when creating Campaign .
campaign _ name : str Name of the campaign to be added .
ad _ channel _ type : str Primary serving target the campaign ' s ads .
budget : str a budget amount ( in micros ) to use ."""
|
self . client . SetClientCustomerId ( client_customer_id )
campaign_service = self . client . GetService ( 'CampaignService' )
budget_id = self . AddBudget ( client_customer_id , budget )
operations = [ { 'operator' : 'ADD' , 'operand' : { 'name' : campaign_name , 'status' : 'PAUSED' , 'biddingStrategyConfiguration' : { 'biddingStrategyType' : 'MANUAL_CPC' , 'biddingScheme' : { 'xsi_type' : 'ManualCpcBiddingScheme' , 'enhancedCpcEnabled' : 'false' } } , 'budget' : { 'budgetId' : budget_id } , 'advertisingChannelType' : ad_channel_type } } ]
campaign_service . mutate ( operations )
|
def identify_and_tag_DOI ( line ) :
"""takes a single citation line and attempts to locate any DOI references .
DOI references are recognised in both http ( url ) format and also the
standard DOI notation ( DOI : . . . )
@ param line : ( string ) the reference line in which to search for DOI ' s .
@ return : the tagged line and a list of DOI strings ( if any )"""
|
# Used to hold the DOI strings in the citation line
doi_strings = [ ]
# Run the DOI pattern on the line , returning the re . match objects
matched_doi = re_doi . finditer ( line )
# For each match found in the line
for match in reversed ( list ( matched_doi ) ) : # Store the start and end position
start = match . start ( )
end = match . end ( )
# Get the actual DOI string ( remove the url part of the doi string )
doi_phrase = match . group ( 'doi' )
if '%2f' in doi_phrase . lower ( ) :
doi_phrase = unquote ( doi_phrase )
# Replace the entire matched doi with a tag
line = line [ 0 : start ] + "<cds.DOI />" + line [ end : ]
# Add the single DOI string to the list of DOI strings
doi_strings . append ( doi_phrase )
doi_strings . reverse ( )
return line , doi_strings
|
def undeploy_api_gateway ( self , lambda_name , domain_name = None , base_path = None ) :
"""Delete a deployed REST API Gateway ."""
|
print ( "Deleting API Gateway.." )
api_id = self . get_api_id ( lambda_name )
if domain_name : # XXX - Remove Route53 smartly here ?
# XXX - This doesn ' t raise , but doesn ' t work either .
try :
self . apigateway_client . delete_base_path_mapping ( domainName = domain_name , basePath = '(none)' if base_path is None else base_path )
except Exception as e : # We may not have actually set up the domain .
pass
was_deleted = self . delete_stack ( lambda_name , wait = True )
if not was_deleted : # try erasing it with the older method
for api in self . get_rest_apis ( lambda_name ) :
self . apigateway_client . delete_rest_api ( restApiId = api [ 'id' ] )
|
def push_theme_to_ckan ( catalog , portal_url , apikey , identifier = None , label = None ) :
"""Escribe la metadata de un theme en el portal pasado por parámetro .
Args :
catalog ( DataJson ) : El catálogo de origen que contiene el
theme .
portal _ url ( str ) : La URL del portal CKAN de destino .
apikey ( str ) : La apikey de un usuario con los permisos que le
permitan crear o actualizar el dataset .
identifier ( str ) : El identificador para buscar el theme en la
taxonomia .
label ( str ) : El label para buscar el theme en la taxonomia .
Returns :
str : El name del theme en el catálogo de destino ."""
|
ckan_portal = RemoteCKAN ( portal_url , apikey = apikey )
theme = catalog . get_theme ( identifier = identifier , label = label )
group = map_theme_to_group ( theme )
pushed_group = ckan_portal . call_action ( 'group_create' , data_dict = group )
return pushed_group [ 'name' ]
|
def get_file ( self , user , handle ) :
"""Retrieve a file for a user .
: returns : a : class : ` pathlib . Path ` instance to this file ,
or None if no file can be found for this handle ."""
|
user_dir = self . user_dir ( user )
if not user_dir . exists ( ) :
return None
if not is_valid_handle ( handle ) :
return None
file_path = user_dir / handle
if not file_path . exists ( ) and not file_path . is_file ( ) :
return None
return file_path
|
def next ( self ) :
"""Next point in iteration"""
|
while True :
x , y = next ( self . scan )
self . index += 1
if ( self . index < self . start ) :
continue
if ( self . index > self . stop ) :
raise StopIteration ( "skip stopping" )
if ( ( self . index - self . start ) % self . step != 0 ) :
continue
return x , y
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.