signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def apply_T4 ( word ) : # OPTIMIZE
'''An agglutination diphthong that ends in / u , y / usually contains a
syllable boundary when - C # or - CCV follow , e . g . , [ lau . ka . us ] ,
[ va . ka . ut . taa ] .'''
|
WORD = _split_consonants_and_vowels ( word )
for k , v in WORD . iteritems ( ) :
if len ( v ) == 2 and v . endswith ( ( 'u' , 'y' ) ) :
if WORD . get ( k + 2 , 0 ) :
if not WORD . get ( k + 3 , 0 ) :
if len ( WORD [ k + 2 ] ) == 1 and is_consonant ( WORD [ k + 2 ] ) :
WORD [ k ] = v [ 0 ] + '.' + v [ 1 ]
elif len ( WORD [ k + 1 ] ) == 1 and WORD . get ( k + 3 , 0 ) :
if is_consonant ( WORD [ k + 3 ] [ 0 ] ) :
WORD [ k ] = v [ 0 ] + '.' + v [ 1 ]
elif len ( WORD [ k + 2 ] ) == 2 :
WORD [ k ] = v [ 0 ] + '.' + v [ 1 ]
word = _compile_dict_into_word ( WORD )
return word
|
def copy ( self ) :
"""Return a new Project instance , deep - copying all the attributes ."""
|
p = Project ( )
p . name = self . name
p . path = self . path
p . _plugin = self . _plugin
p . stage = self . stage . copy ( )
p . stage . project = p
for sprite in self . sprites :
s = sprite . copy ( )
s . project = p
p . sprites . append ( s )
for actor in self . actors :
if isinstance ( actor , Sprite ) :
p . actors . append ( p . get_sprite ( actor . name ) )
else :
a = actor . copy ( )
if isinstance ( a , Watcher ) :
if isinstance ( a . target , Project ) :
a . target = p
elif isinstance ( a . target , Stage ) :
a . target = p . stage
else :
a . target = p . get_sprite ( a . target . name )
p . actors . append ( a )
p . variables = dict ( ( n , v . copy ( ) ) for ( n , v ) in self . variables . items ( ) )
p . lists = dict ( ( n , l . copy ( ) ) for ( n , l ) in self . lists . items ( ) )
p . thumbnail = self . thumbnail
p . tempo = self . tempo
p . notes = self . notes
p . author = self . author
return p
|
def get_result ( db , result_id ) :
""": param db :
a : class : ` openquake . server . dbapi . Db ` instance
: param result _ id :
a result ID
: returns : ( job _ id , job _ status , datadir , datastore _ key )"""
|
job = db ( 'SELECT job.*, ds_key FROM job, output WHERE ' 'oq_job_id=job.id AND output.id=?x' , result_id , one = True )
return ( job . id , job . status , job . user_name , os . path . dirname ( job . ds_calc_dir ) , job . ds_key )
|
def order ( order_book_id , quantity , price = None , style = None ) :
"""全品种通用智能调仓函数
如果不指定 price , 则相当于下 MarketOrder
如果 order _ book _ id 是股票 , 等同于调用 order _ shares
如果 order _ book _ id 是期货 , 则进行智能下单 :
* quantity 表示调仓量
* 如果 quantity 为正数 , 则先平 Sell 方向仓位 , 再开 Buy 方向仓位
* 如果 quantity 为负数 , 则先平 Buy 反向仓位 , 再开 Sell 方向仓位
: param order _ book _ id : 下单标的物
: type order _ book _ id : : class : ` ~ Instrument ` object | ` str `
: param int quantity : 调仓量
: param float price : 下单价格
: param style : 下单类型 , 默认是市价单 。 目前支持的订单类型有 : class : ` ~ LimitOrder ` 和 : class : ` ~ MarketOrder `
: type style : ` OrderStyle ` object
: return : list [ : class : ` ~ Order ` ]
: example :
. . code - block : : python3
: linenos :
# 当前仓位为0
# RB1710 多方向调仓2手 : 调整后变为 BUY 2手
order ( ' RB1710 ' , 2)
# RB1710 空方向调仓3手 : 先平多方向2手 在开空方向1手 , 调整后变为 SELL 1手
order ( ' RB1710 ' , - 3)"""
|
style = cal_style ( price , style )
orders = Environment . get_instance ( ) . portfolio . order ( order_book_id , quantity , style )
if isinstance ( orders , Order ) :
return [ orders ]
return orders
|
def help ( file = None ) :
"""Print out syntax help for running astrodrizzle
Parameters
file : str ( Default = None )
If given , write out help to the filename specified by this parameter
Any previously existing file with this name will be deleted before
writing out the help ."""
|
helpstr = getHelpAsString ( docstring = True , show_ver = True )
if file is None :
print ( helpstr )
else :
if os . path . exists ( file ) :
os . remove ( file )
f = open ( file , mode = 'w' )
f . write ( helpstr )
f . close ( )
|
def del_view_menu ( self , name ) :
"""Deletes a ViewMenu from the backend
: param name :
name of the ViewMenu"""
|
obj = self . find_view_menu ( name )
if obj :
try :
obj . delete ( )
except Exception as e :
log . error ( c . LOGMSG_ERR_SEC_DEL_PERMISSION . format ( str ( e ) ) )
|
def match_replace ( cls , ops , kwargs ) :
"""Match and replace a full operand specification to a function that
provides a replacement for the whole expression
or raises a : exc : ` . CannotSimplify ` exception .
E . g .
First define an operation : :
> > > class Invert ( Operation ) :
. . . _ rules = OrderedDict ( )
. . . simplifications = [ match _ replace , ]
Then some _ rules : :
> > > A = wc ( " A " )
> > > A _ float = wc ( " A " , head = float )
> > > Invert _ A = pattern ( Invert , A )
> > > Invert . _ rules . update ( [
. . . ( ' r1 ' , ( pattern _ head ( Invert _ A ) , lambda A : A ) ) ,
. . . ( ' r2 ' , ( pattern _ head ( A _ float ) , lambda A : 1 . / A ) ) ,
Check rule application : :
> > > print ( srepr ( Invert . create ( " hallo " ) ) ) # matches no rule
Invert ( ' hallo ' )
> > > Invert . create ( Invert ( " hallo " ) ) # matches first rule
' hallo '
> > > Invert . create ( . 2 ) # matches second rule
5.0
A pattern can also have the same wildcard appear twice : :
> > > class X ( Operation ) :
. . . _ rules = {
. . . ' r1 ' : ( pattern _ head ( A , A ) , lambda A : A ) ,
. . . simplifications = [ match _ replace , ]
> > > X . create ( 1,2)
X ( 1 , 2)
> > > X . create ( 1,1)"""
|
expr = ProtoExpr ( ops , kwargs )
if LOG :
logger = logging . getLogger ( 'QNET.create' )
for key , rule in cls . _rules . items ( ) :
pat , replacement = rule
match_dict = match_pattern ( pat , expr )
if match_dict :
try :
replaced = replacement ( ** match_dict )
if LOG :
logger . debug ( "%sRule %s.%s: (%s, %s) -> %s" , ( " " * ( LEVEL ) ) , cls . __name__ , key , expr . args , expr . kwargs , replaced )
return replaced
except CannotSimplify :
if LOG_NO_MATCH :
logger . debug ( "%sRule %s.%s: no match: CannotSimplify" , ( " " * ( LEVEL ) ) , cls . __name__ , key )
continue
else :
if LOG_NO_MATCH :
logger . debug ( "%sRule %s.%s: no match: %s" , ( " " * ( LEVEL ) ) , cls . __name__ , key , match_dict . reason )
# No matching rules
return ops , kwargs
|
def parse_factor_line ( line ) :
"""function to parse a factor file line . Used by fac2real ( )
Parameters
line : ( str )
a factor line from a factor file
Returns
inode : int
the inode of the grid node
itrans : int
flag for transformation of the grid node
fac _ data : dict
a dictionary of point number , factor"""
|
raw = line . strip ( ) . split ( )
inode , itrans , nfac = [ int ( i ) for i in raw [ : 3 ] ]
fac_data = { int ( raw [ ifac ] ) - 1 : float ( raw [ ifac + 1 ] ) for ifac in range ( 4 , 4 + nfac * 2 , 2 ) }
# fac _ data = { }
# for ifac in range ( 4,4 + nfac * 2,2 ) :
# pnum = int ( raw [ ifac ] ) - 1 # zero based to sync with pandas
# fac = float ( raw [ ifac + 1 ] )
# fac _ data [ pnum ] = fac
return inode , itrans , fac_data
|
def getStateCode ( self , state ) :
"""Calculates the state code for a specific state or set of states .
We transform the states so that they are nonnegative and take an inner product .
The resulting number is unique because we use numeral system with a large enough base ."""
|
return np . dot ( state - self . minvalues , self . statecode )
|
def data ( self ) :
"""Values in request body ."""
|
if self . _data is None :
self . _data = self . arg_container ( )
if isinstance ( self . fieldstorage . value , list ) :
for k in self . fieldstorage . keys ( ) :
fname = self . fieldstorage [ k ] . filename
if fname :
self . _data [ k ] = ( fname , self . fieldstorage [ k ] . file )
else :
self . _data [ k ] = self . fieldstorage . getfirst ( k )
return self . _data
|
def msg_curse ( self , args = None , max_width = None ) :
"""Return the dict to display in the curse interface ."""
|
# Init the return message
ret = [ ]
# Only process if stats exist and plugin not disabled
if not self . stats or self . is_disable ( ) :
return ret
# Build the string message
# Header
msg = '{}' . format ( 'SWAP' )
ret . append ( self . curse_add_line ( msg , "TITLE" ) )
msg = ' {:3}' . format ( self . trend_msg ( self . get_trend ( 'percent' ) ) )
ret . append ( self . curse_add_line ( msg ) )
# Percent memory usage
msg = '{:>6.1%}' . format ( self . stats [ 'percent' ] / 100 )
ret . append ( self . curse_add_line ( msg ) )
# New line
ret . append ( self . curse_new_line ( ) )
# Total memory usage
msg = '{:8}' . format ( 'total:' )
ret . append ( self . curse_add_line ( msg ) )
msg = '{:>6}' . format ( self . auto_unit ( self . stats [ 'total' ] ) )
ret . append ( self . curse_add_line ( msg ) )
# New line
ret . append ( self . curse_new_line ( ) )
# Used memory usage
msg = '{:8}' . format ( 'used:' )
ret . append ( self . curse_add_line ( msg ) )
msg = '{:>6}' . format ( self . auto_unit ( self . stats [ 'used' ] ) )
ret . append ( self . curse_add_line ( msg , self . get_views ( key = 'used' , option = 'decoration' ) ) )
# New line
ret . append ( self . curse_new_line ( ) )
# Free memory usage
msg = '{:8}' . format ( 'free:' )
ret . append ( self . curse_add_line ( msg ) )
msg = '{:>6}' . format ( self . auto_unit ( self . stats [ 'free' ] ) )
ret . append ( self . curse_add_line ( msg ) )
return ret
|
def _read_data_handler ( length , whence , ctx , skip = False , stream_event = ION_STREAM_INCOMPLETE_EVENT ) :
"""Creates a co - routine for retrieving data up to a requested size .
Args :
length ( int ) : The minimum length requested .
whence ( Coroutine ) : The co - routine to return to after the data is satisfied .
ctx ( _ HandlerContext ) : The context for the read .
skip ( Optional [ bool ] ) : Whether the requested number of bytes should be skipped .
stream _ event ( Optional [ IonEvent ] ) : The stream event to return if no bytes are read or
available ."""
|
trans = None
queue = ctx . queue
if length > ctx . remaining :
raise IonException ( 'Length overrun: %d bytes, %d remaining' % ( length , ctx . remaining ) )
# Make sure to check the queue first .
queue_len = len ( queue )
if queue_len > 0 : # Any data available means we can only be incomplete .
stream_event = ION_STREAM_INCOMPLETE_EVENT
length -= queue_len
if skip : # For skipping we need to consume any remnant in the buffer queue .
if length >= 0 :
queue . skip ( queue_len )
else :
queue . skip ( queue_len + length )
while True :
data_event , self = ( yield trans )
if data_event is not None and data_event . data is not None :
data = data_event . data
data_len = len ( data )
if data_len > 0 : # We got something so we can only be incomplete .
stream_event = ION_STREAM_INCOMPLETE_EVENT
length -= data_len
if not skip :
queue . extend ( data )
else :
pos_adjustment = data_len
if length < 0 :
pos_adjustment += length
# More data than we need to skip , so make sure to accumulate that remnant .
queue . extend ( data [ length : ] )
queue . position += pos_adjustment
if length <= 0 : # We got all the data we need , go back immediately
yield Transition ( None , whence )
trans = Transition ( stream_event , self )
|
def eval ( source , optimize = True , output = sys . stdout , input = sys . stdin , steps = - 1 ) :
"""Compiles and runs program , returning the values on the stack .
To return the machine instead , see execute ( ) .
Args :
optimize : Whether to optimize the code after parsing it .
output : Stream which program can write output to .
input : Stream which program can read input from .
steps : An optional maximum number of instructions to execute on the
virtual machine . Set to - 1 for no limit .
Returns :
None : If the stack is empty
obj : If the stack contains a single value
[ obj , obj , . . . ] : If the stack contains many values"""
|
machine = execute ( source , optimize = optimize , output = output , input = input , steps = steps )
ds = machine . stack
if len ( ds ) == 0 :
return None
elif len ( ds ) == 1 :
return ds [ - 1 ]
else :
return ds
|
def from_json ( cls , data ) :
"""Create a location from a dictionary .
Args :
data : {
" city " : " - " ,
" latitude " : 0,
" longitude " : 0,
" time _ zone " : 0,
" elevation " : 0}"""
|
optional_keys = ( 'city' , 'state' , 'country' , 'latitude' , 'longitude' , 'time_zone' , 'elevation' , 'station_id' , 'source' )
for key in optional_keys :
if key not in data :
data [ key ] = None
return cls ( data [ 'city' ] , data [ 'state' ] , data [ 'country' ] , data [ 'latitude' ] , data [ 'longitude' ] , data [ 'time_zone' ] , data [ 'elevation' ] , data [ 'station_id' ] , data [ 'source' ] )
|
def set_stats_params ( self , address = None , enable_http = None , minify = None , no_cores = None , no_metrics = None , push_interval = None ) :
"""Enables stats server on the specified address .
* http : / / uwsgi . readthedocs . io / en / latest / StatsServer . html
: param str | unicode address : Address / socket to make stats available on .
Examples :
* 127.0.0.1:1717
* / tmp / statsock
* : 5050
: param bool enable _ http : Server stats over HTTP .
Prefixes stats server json output with http headers .
: param bool minify : Minify statistics json output .
: param bool no _ cores : Disable generation of cores - related stats .
: param bool no _ metrics : Do not include metrics in stats output .
: param int push _ interval : Set the default frequency of stats pushers in seconds /"""
|
self . _set ( 'stats-server' , address )
self . _set ( 'stats-http' , enable_http , cast = bool )
self . _set ( 'stats-minified' , minify , cast = bool )
self . _set ( 'stats-no-cores' , no_cores , cast = bool )
self . _set ( 'stats-no-metrics' , no_metrics , cast = bool )
self . _set ( 'stats-pusher-default-freq' , push_interval )
return self . _section
|
def load ( self , * modules ) :
"""Load one or more modules .
Args :
modules : Either a string full path to a module or an actual module
object ."""
|
for module in modules :
if isinstance ( module , six . string_types ) :
try :
module = get_object ( module )
except Exception as e :
self . errors [ module ] = e
continue
self . modules [ module . __package__ ] = module
for ( loader , module_name , is_pkg ) in pkgutil . walk_packages ( module . __path__ ) :
full_name = "{}.{}" . format ( _package ( module ) , module_name )
try :
self . modules [ full_name ] = get_object ( full_name )
if is_pkg :
self . load ( self . modules [ full_name ] )
except Exception as e :
self . errors [ full_name ] = e
|
def extend ( self , itemseq ) :
"""Add sequence of elements to end of ParseResults list of elements .
Example : :
patt = OneOrMore ( Word ( alphas ) )
# use a parse action to append the reverse of the matched strings , to make a palindrome
def make _ palindrome ( tokens ) :
tokens . extend ( reversed ( [ t [ : : - 1 ] for t in tokens ] ) )
return ' ' . join ( tokens )
print ( patt . addParseAction ( make _ palindrome ) . parseString ( " lskdj sdlkjf lksd " ) ) # - > ' lskdjsdlkjflksddsklfjkldsjdksl '"""
|
if isinstance ( itemseq , ParseResults ) :
self += itemseq
else :
self . __toklist . extend ( itemseq )
|
def diskwarp ( src_ds , res = None , extent = None , t_srs = None , r = 'cubic' , outdir = None , dst_fn = None , dst_ndv = None , verbose = True ) :
"""Helper function that calls warp for single input Dataset with output to disk ( GDAL GeoTiff Driver )"""
|
if dst_fn is None :
dst_fn = os . path . splitext ( src_ds . GetFileList ( ) [ 0 ] ) [ 0 ] + '_warp.tif'
if outdir is not None :
dst_fn = os . path . join ( outdir , os . path . basename ( dst_fn ) )
driver = iolib . gtif_drv
dst_ds = warp ( src_ds , res , extent , t_srs , r , driver , dst_fn , dst_ndv = dst_ndv , verbose = verbose )
# Write out
dst_ds = None
# Now reopen ds from disk
dst_ds = gdal . Open ( dst_fn )
return dst_ds
|
def stop ( self ) :
"""Stops the external measurement program and returns the measurement result ,
if the measurement was running ."""
|
consumed_energy = collections . defaultdict ( dict )
if not self . is_running ( ) :
return None
# cpu - energy - meter expects SIGINT to stop and report its result
self . _measurement_process . send_signal ( signal . SIGINT )
( out , err ) = self . _measurement_process . communicate ( )
assert self . _measurement_process . returncode is not None
if self . _measurement_process . returncode :
logging . debug ( "Energy measurement terminated with return code %s" , self . _measurement_process . returncode )
self . _measurement_process = None
for line in err . splitlines ( ) :
logging . debug ( "energy measurement stderr: %s" , line )
for line in out . splitlines ( ) :
line = line . decode ( 'ASCII' )
logging . debug ( "energy measurement output: %s" , line )
match = re . match ( r'cpu(\d+)_([a-z]+)_joules=(\d+\.?\d*)' , line )
if not match :
continue
cpu , domain , energy = match . groups ( )
cpu = int ( cpu )
energy = Decimal ( energy )
consumed_energy [ cpu ] [ domain ] = energy
return consumed_energy
|
def _sane_version_list ( version ) :
"""Ensure the major and minor are int .
Parameters
version : list
Version components
Returns
version : list
List of components where first two components has been sanitised"""
|
v0 = str ( version [ 0 ] )
if v0 : # Test if the major is a number .
try :
v0 = v0 . lstrip ( "v" ) . lstrip ( "V" )
# Handle the common case where tags have v before major .
v0 = int ( v0 )
except ValueError :
v0 = None
if v0 is None :
version = [ 0 , 0 ] + version
else :
version [ 0 ] = v0
try : # Test if the minor is a number .
version [ 1 ] = int ( version [ 1 ] )
except ValueError : # Insert Minor 0.
version = [ version [ 0 ] , 0 ] + version [ 1 : ]
return version
|
def match_filtered_identities ( self , fa , fb ) :
"""Determine if two filtered identities are the same .
The method compares the email addresses of each filtered identity
to check if they are the same . When the given filtered identities
are the same object or share the same UUID , this will also
produce a positive match .
Identities which their email addresses are in the blacklist will be
ignored and the result of the comparison will be false .
: param fa : filtered identity to match
: param fb : filtered identity to match
: returns : True when both filtered identities are likely to be the same .
Otherwise , returns False .
: raises ValueError : when any of the given filtered identities is not
an instance of EmailIdentity class ."""
|
if not isinstance ( fa , EmailIdentity ) :
raise ValueError ( "<fa> is not an instance of UniqueIdentity" )
if not isinstance ( fb , EmailIdentity ) :
raise ValueError ( "<fb> is not an instance of EmailNameIdentity" )
if fa . uuid and fb . uuid and fa . uuid == fb . uuid :
return True
if fa . email in self . blacklist :
return False
# Compare email addresses first
if fa . email and fa . email == fb . email :
return True
return False
|
def plot_survival ( self , on , how = "os" , survival_units = "Days" , strata = None , ax = None , ci_show = False , with_condition_color = "#B38600" , no_condition_color = "#A941AC" , with_condition_label = None , no_condition_label = None , color_map = None , label_map = None , color_palette = "Set2" , threshold = None , ** kwargs ) :
"""Plot a Kaplan Meier survival curve by splitting the cohort into two groups
Parameters
on : str or function or list or dict
See ` cohort . load . as _ dataframe `
how : { " os " , " pfs " } , optional
Whether to plot OS ( overall survival ) or PFS ( progression free survival )
survival _ units : str
Unit of time for the survival measure , i . e . Days or Months
strata : str
( optional ) column name of stratifying variable
ci _ show : bool
Display the confidence interval around the survival curve
threshold : int , " median " , " median - per - strata " or None ( optional )
Threshold of ` col ` on which to split the cohort"""
|
assert how in [ "os" , "pfs" ] , "Invalid choice of survival plot type %s" % how
cols , df = self . as_dataframe ( on , return_cols = True , ** kwargs )
plot_col = self . plot_col_from_cols ( cols = cols , only_allow_one = True )
df = filter_not_null ( df , plot_col )
results = plot_kmf ( df = df , condition_col = plot_col , xlabel = survival_units , ylabel = "Overall Survival (%)" if how == "os" else "Progression-Free Survival (%)" , censor_col = "deceased" if how == "os" else "progressed_or_deceased" , survival_col = how , strata_col = strata , threshold = threshold , ax = ax , ci_show = ci_show , with_condition_color = with_condition_color , no_condition_color = no_condition_color , with_condition_label = with_condition_label , no_condition_label = no_condition_label , color_palette = color_palette , label_map = label_map , color_map = color_map , )
return results
|
def parse_udiff ( diff , patterns = None , parent = '.' ) :
"""Return a dictionary of matching lines ."""
|
# For each file of the diff , the entry key is the filename ,
# and the value is a set of row numbers to consider .
rv = { }
path = nrows = None
for line in diff . splitlines ( ) :
if nrows :
if line [ : 1 ] != '-' :
nrows -= 1
continue
if line [ : 3 ] == '@@ ' :
hunk_match = HUNK_REGEX . match ( line )
( row , nrows ) = [ int ( g or '1' ) for g in hunk_match . groups ( ) ]
rv [ path ] . update ( range ( row , row + nrows ) )
elif line [ : 3 ] == '+++' :
path = line [ 4 : ] . split ( '\t' , 1 ) [ 0 ]
if path [ : 2 ] == 'b/' :
path = path [ 2 : ]
rv [ path ] = set ( )
return dict ( [ ( os . path . join ( parent , path ) , rows ) for ( path , rows ) in rv . items ( ) if rows and filename_match ( path , patterns ) ] )
|
def setCurrentInspectorRegItem ( self , regItem ) :
"""Sets the current inspector given an InspectorRegItem"""
|
check_class ( regItem , InspectorRegItem , allow_none = True )
self . inspectorTab . setCurrentRegItem ( regItem )
|
def validate ( self , path , schema , value , results ) :
"""Validates a given value against this rule .
: param path : a dot notation path to the value .
: param schema : a schema this rule is called from
: param value : a value to be validated .
: param results : a list with validation results to add new results ."""
|
name = path if path != None else "value"
found = [ ]
for prop in self . _properties :
property_value = ObjectReader . get_property ( value , prop )
if property_value != None :
found . append ( prop )
if len ( found ) == 0 :
results . append ( ValidationResult ( path , ValidationResultType . Error , "VALUE_NULL" , name + " must have at least one property from " + str ( self . _properties ) , self . _properties , None ) )
|
def pad_funcs ( self , high ) :
'''Turns dot delimited function refs into function strings'''
|
for name in high :
if not isinstance ( high [ name ] , dict ) :
if isinstance ( high [ name ] , six . string_types ) : # Is this is a short state ? It needs to be padded !
if '.' in high [ name ] :
comps = high [ name ] . split ( '.' )
if len ( comps ) >= 2 : # Merge the comps
comps [ 1 ] = '.' . join ( comps [ 1 : len ( comps ) ] )
high [ name ] = { # ' _ _ sls _ _ ' : template ,
# ' _ _ env _ _ ' : None ,
comps [ 0 ] : [ comps [ 1 ] ] }
continue
continue
skeys = set ( )
for key in sorted ( high [ name ] ) :
if key . startswith ( '_' ) :
continue
if not isinstance ( high [ name ] [ key ] , list ) :
continue
if '.' in key :
comps = key . split ( '.' )
if len ( comps ) >= 2 : # Merge the comps
comps [ 1 ] = '.' . join ( comps [ 1 : len ( comps ) ] )
# Salt doesn ' t support state files such as :
# / etc / redis / redis . conf :
# file . managed :
# - user : redis
# - group : redis
# - mode : 644
# file . comment :
# - regex : ^ requirepass
if comps [ 0 ] in skeys :
continue
high [ name ] [ comps [ 0 ] ] = high [ name ] . pop ( key )
high [ name ] [ comps [ 0 ] ] . append ( comps [ 1 ] )
skeys . add ( comps [ 0 ] )
continue
skeys . add ( key )
return high
|
def _partition_query ( cls , table_name , limit = 0 , order_by = None , filters = None ) :
"""Returns a partition query
: param table _ name : the name of the table to get partitions from
: type table _ name : str
: param limit : the number of partitions to be returned
: type limit : int
: param order _ by : a list of tuples of field name and a boolean
that determines if that field should be sorted in descending
order
: type order _ by : list of ( str , bool ) tuples
: param filters : dict of field name and filter value combinations"""
|
limit_clause = 'LIMIT {}' . format ( limit ) if limit else ''
order_by_clause = ''
if order_by :
l = [ ]
# noqa : E741
for field , desc in order_by :
l . append ( field + ' DESC' if desc else '' )
order_by_clause = 'ORDER BY ' + ', ' . join ( l )
where_clause = ''
if filters :
l = [ ]
# noqa : E741
for field , value in filters . items ( ) :
l . append ( f"{field} = '{value}'" )
where_clause = 'WHERE ' + ' AND ' . join ( l )
sql = textwrap . dedent ( f"""\
SELECT * FROM "{table_name}$partitions"
{where_clause}
{order_by_clause}
{limit_clause}
""" )
return sql
|
def _shape_repr ( shape ) :
"""Return a platform independent reprensentation of an array shape
Under Python 2 , the ` long ` type introduces an ' L ' suffix when using the
default % r format for tuples of integers ( typically used to store the shape
of an array ) .
Under Windows 64 bit ( and Python 2 ) , the ` long ` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit .
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable .
Under Python 3 , there is no more ` long ` type so the ` L ` suffix is never
introduced in string representation .
> > > _ shape _ repr ( ( 1 , 2 ) )
' ( 1 , 2 ) '
> > > one = 2 * * 64 / 2 * * 64 # force an upcast to ` long ` under Python 2
> > > _ shape _ repr ( ( one , 2 * one ) )
' ( 1 , 2 ) '
> > > _ shape _ repr ( ( 1 , ) )
> > > _ shape _ repr ( ( ) )"""
|
if len ( shape ) == 0 :
return "()"
joined = ", " . join ( "%d" % e for e in shape )
if len ( shape ) == 1 : # special notation for singleton tuples
joined += ','
return "(%s)" % joined
|
def get_asset_admin_session ( self , * args , ** kwargs ) :
"""Gets an asset administration session for creating , updating and
deleting assets .
return : ( osid . repository . AssetAdminSession ) - an
AssetAdminSession
raise : OperationFailed - unable to complete request
raise : Unimplemented - supports _ asset _ admin ( ) is false
compliance : optional - This method must be implemented if
supports _ asset _ admin ( ) is true ."""
|
if not self . supports_asset_admin ( ) :
raise Unimplemented ( )
try :
from . import sessions
except ImportError :
raise
# OperationFailed ( )
try :
session = sessions . AssetAdminSession ( proxy = self . _proxy , runtime = self . _runtime , ** kwargs )
except AttributeError :
raise
# OperationFailed ( )
return session
|
def calc_hexversion ( major = 0 , minor = 0 , micro = 0 , releaselevel = 'dev' , serial = 0 ) :
"""Calculate the hexadecimal version number from the tuple version _ info :
: param major : integer
: param minor : integer
: param micro : integer
: param relev : integer or string
: param serial : integer
: return : integerm always increasing with revision numbers"""
|
try :
releaselevel = int ( releaselevel )
except ValueError :
releaselevel = RELEASE_LEVEL_VALUE . get ( releaselevel , 0 )
hex_version = int ( serial )
hex_version |= releaselevel * 1 << 4
hex_version |= int ( micro ) * 1 << 8
hex_version |= int ( minor ) * 1 << 16
hex_version |= int ( major ) * 1 << 24
return hex_version
|
def uninstall ( self , tool : Tool , force : bool = False , noprune : bool = False ) -> None :
"""Uninstalls all Docker images associated with this tool .
See : ` BuildManager . uninstall `"""
|
self . __installation . build . uninstall ( tool . image , force = force , noprune = noprune )
|
def dotter ( ) :
"""Prints formatted time to stdout at the start of a line , as well as a " . "
whenever the length of the line is equal or lesser than 80 " . " long"""
|
# Use a global variable
global globalcount
if globalcount <= 80 :
sys . stdout . write ( '.' )
globalcount += 1
else :
sys . stdout . write ( '\n.' )
globalcount = 1
|
def command_getkeys ( self , command , * args , encoding = 'utf-8' ) :
"""Extract keys given a full Redis command ."""
|
return self . execute ( b'COMMAND' , b'GETKEYS' , command , * args , encoding = encoding )
|
def get_containers ( self ) :
"""Return available containers ."""
|
permitted = lambda c : settings . container_permitted ( c . name )
return [ c for c in self . _get_containers ( ) if permitted ( c ) ]
|
def put_zonefiles ( hostport , zonefile_data_list , timeout = 30 , my_hostport = None , proxy = None ) :
"""Push one or more zonefiles to the given server .
Each zone file in the list must be base64 - encoded
Return { ' status ' : True , ' saved ' : [ . . . ] } on success
Return { ' error ' : . . . } on error"""
|
assert hostport or proxy , 'need either hostport or proxy'
saved_schema = { 'type' : 'object' , 'properties' : { 'saved' : { 'type' : 'array' , 'items' : { 'type' : 'integer' , 'minimum' : 0 , 'maximum' : 1 , } , 'minItems' : len ( zonefile_data_list ) , 'maxItems' : len ( zonefile_data_list ) } , } , 'required' : [ 'saved' ] }
schema = json_response_schema ( saved_schema )
if proxy is None :
proxy = connect_hostport ( hostport )
push_info = None
try :
push_info = proxy . put_zonefiles ( zonefile_data_list )
push_info = json_validate ( schema , push_info )
if json_is_error ( push_info ) :
return push_info
except socket . timeout :
log . error ( "Connection timed out" )
resp = { 'error' : 'Connection to remote host timed out.' , 'http_status' : 503 }
return resp
except socket . error as se :
log . error ( "Connection error {}" . format ( se . errno ) )
resp = { 'error' : 'Connection to remote host failed.' , 'http_status' : 502 }
return resp
except ValidationError as e :
if BLOCKSTACK_DEBUG :
log . exception ( e )
resp = { 'error' : 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.' , 'http_status' : 502 }
return resp
except Exception as ee :
if BLOCKSTACK_DEBUG :
log . exception ( ee )
log . error ( "Caught exception while connecting to Blockstack node: {}" . format ( ee ) )
resp = { 'error' : 'Failed to contact Blockstack node. Try again with `--debug`.' , 'http_status' : 500 }
return resp
return push_info
|
def ordered ( seq , keys = None , default = True , warn = False ) :
"""Return an iterator of the seq where keys are used to break ties in
a conservative fashion : if , after applying a key , there are no ties
then no other keys will be computed .
Two default keys will be applied if 1 ) keys are not provided or 2 ) the
given keys don ' t resolve all ties ( but only if ` default ` is True ) . The
two keys are ` _ nodes ` ( which places smaller expressions before large ) and
` default _ sort _ key ` which ( if the ` sort _ key ` for an object is defined
properly ) should resolve any ties .
If ` ` warn ` ` is True then an error will be raised if there were no
keys remaining to break ties . This can be used if it was expected that
there should be no ties between items that are not identical .
Examples
> > > from sympy . utilities . iterables import ordered
> > > from sympy import count _ ops
> > > from sympy . abc import x , y
The count _ ops is not sufficient to break ties in this list and the first
two items appear in their original order ( i . e . the sorting is stable ) :
> > > list ( ordered ( [ y + 2 , x + 2 , x * * 2 + y + 3 ] ,
. . . count _ ops , default = False , warn = False ) )
[ y + 2 , x + 2 , x * * 2 + y + 3]
The default _ sort _ key allows the tie to be broken :
> > > list ( ordered ( [ y + 2 , x + 2 , x * * 2 + y + 3 ] ) )
[ x + 2 , y + 2 , x * * 2 + y + 3]
Here , sequences are sorted by length , then sum :
> > > seq , keys = [ [ [ 1 , 2 , 1 ] , [ 0 , 3 , 1 ] , [ 1 , 1 , 3 ] , [ 2 ] , [ 1 ] ] , [
. . . lambda x : len ( x ) ,
. . . lambda x : sum ( x ) ] ]
> > > list ( ordered ( seq , keys , default = False , warn = False ) )
[ [ 1 ] , [ 2 ] , [ 1 , 2 , 1 ] , [ 0 , 3 , 1 ] , [ 1 , 1 , 3 ] ]
If ` ` warn ` ` is True , an error will be raised if there were not
enough keys to break ties :
> > > list ( ordered ( seq , keys , default = False , warn = True ) )
Traceback ( most recent call last ) :
ValueError : not enough keys to break ties
Notes
The decorated sort is one of the fastest ways to sort a sequence for
which special item comparison is desired : the sequence is decorated ,
sorted on the basis of the decoration ( e . g . making all letters lower
case ) and then undecorated . If one wants to break ties for items that
have the same decorated value , a second key can be used . But if the
second key is expensive to compute then it is inefficient to decorate
all items with both keys : only those items having identical first key
values need to be decorated . This function applies keys successively
only when needed to break ties . By yielding an iterator , use of the
tie - breaker is delayed as long as possible .
This function is best used in cases when use of the first key is
expected to be a good hashing function ; if there are no unique hashes
from application of a key then that key should not have been used . The
exception , however , is that even if there are many collisions , if the
first group is small and one does not need to process all items in the
list then time will not be wasted sorting what one was not interested
in . For example , if one were looking for the minimum in a list and
there were several criteria used to define the sort order , then this
function would be good at returning that quickly if the first group
of candidates is small relative to the number of items being processed ."""
|
d = defaultdict ( list )
if keys :
if not isinstance ( keys , ( list , tuple ) ) :
keys = [ keys ]
keys = list ( keys )
f = keys . pop ( 0 )
for a in seq :
d [ f ( a ) ] . append ( a )
else :
if not default :
raise ValueError ( 'if default=False then keys must be provided' )
d [ None ] . extend ( seq )
for k in sorted ( d . keys ( ) ) :
if len ( d [ k ] ) > 1 :
if keys :
d [ k ] = ordered ( d [ k ] , keys , default , warn )
elif default :
d [ k ] = ordered ( d [ k ] , ( _nodes , default_sort_key , ) , default = False , warn = warn )
elif warn :
from sympy . utilities . iterables import uniq
u = list ( uniq ( d [ k ] ) )
if len ( u ) > 1 :
raise ValueError ( 'not enough keys to break ties: %s' % u )
for v in d [ k ] :
yield v
d . pop ( k )
|
def removeChild ( self , position ) :
"""Removes the child at the position ' position '
Calls the child item finalize to close its resources before removing it ."""
|
assert 0 <= position <= len ( self . childItems ) , "position should be 0 < {} <= {}" . format ( position , len ( self . childItems ) )
self . childItems [ position ] . finalize ( )
self . childItems . pop ( position )
|
def normal_prior ( value , mean , sigma ) :
"""Normal prior distribution ."""
|
return - 0.5 * ( 2 * np . pi * sigma ) - ( value - mean ) ** 2 / ( 2.0 * sigma )
|
def reset_time ( self , time , true_anom , elongan , eincl ) :
"""TODO : add documentation"""
|
self . true_anom = true_anom
self . elongan = elongan
self . eincl = eincl
self . time = time
self . populated_at_time = [ ]
self . reset ( )
return
|
def load ( value : Any , type_ : Type [ T ] , ** kwargs ) -> T :
"""Quick function call to load data into a type .
It is useful to avoid creating the Loader object ,
in case only the default parameters are used ."""
|
from . import dataloader
loader = dataloader . Loader ( ** kwargs )
return loader . load ( value , type_ )
|
def list_returner_functions ( * args , ** kwargs ) : # pylint : disable = unused - argument
'''List the functions for all returner modules . Optionally , specify a returner
module or modules from which to list .
. . versionadded : : 2014.7.0
CLI Example :
. . code - block : : bash
salt ' * ' sys . list _ returner _ functions
salt ' * ' sys . list _ returner _ functions mysql
salt ' * ' sys . list _ returner _ functions mysql etcd
Returner names can be specified as globs .
. . versionadded : : 2015.5.0
. . code - block : : bash
salt ' * ' sys . list _ returner _ functions ' sqlite3 . get _ * ' '''
|
# NOTE : * * kwargs is used here to prevent a traceback when garbage
# arguments are tacked on to the end .
returners_ = salt . loader . returners ( __opts__ , [ ] )
if not args : # We ' re being asked for all functions
return sorted ( returners_ )
names = set ( )
for module in args :
if '*' in module or '.' in module :
for func in fnmatch . filter ( returners_ , module ) :
names . add ( func )
else : # " sys " should just match sys without also matching sysctl
moduledot = module + '.'
for func in returners_ :
if func . startswith ( moduledot ) :
names . add ( func )
return sorted ( names )
|
def _data_dep_init ( self , inputs ) :
"""Data dependent initialization for eager execution ."""
|
with tf . variable_scope ( "data_dep_init" ) : # Generate data dependent init values
activation = self . layer . activation
self . layer . activation = None
x_init = self . layer . call ( inputs )
m_init , v_init = tf . moments ( x_init , self . norm_axes )
scale_init = 1. / tf . sqrt ( v_init + 1e-10 )
# Assign data dependent init values
self . layer . g = self . layer . g * scale_init
self . layer . bias = ( - m_init * scale_init )
self . layer . activation = activation
self . initialized = True
|
def locus ( args ) :
"""% prog locus bamfile
Extract selected locus from a list of TREDs for validation , and run lobSTR ."""
|
from jcvi . formats . sam import get_minibam
# See ` Format - lobSTR - database . ipynb ` for a list of TREDs for validation
INCLUDE = [ "HD" , "SBMA" , "SCA1" , "SCA2" , "SCA8" , "SCA17" , "DM1" , "DM2" , "FXTAS" ]
db_choices = ( "hg38" , "hg19" )
p = OptionParser ( locus . __doc__ )
p . add_option ( "--tred" , choices = INCLUDE , help = "TRED name" )
p . add_option ( "--ref" , choices = db_choices , default = "hg38" , help = "Reference genome" )
p . set_home ( "lobstr" )
opts , args = p . parse_args ( args )
if len ( args ) != 1 :
sys . exit ( not p . print_help ( ) )
bamfile , = args
ref = opts . ref
lhome = opts . lobstr_home
tred = opts . tred
tredsfile = datafile ( "TREDs.meta.csv" )
tf = pd . read_csv ( tredsfile , index_col = 0 )
row = tf . ix [ tred ]
tag = "repeat_location"
ldb = "TREDs"
if ref == "hg19" :
tag += "." + ref
ldb += "-" + ref
seqid , start_end = row [ tag ] . split ( ":" )
PAD = 1000
start , end = start_end . split ( '-' )
start , end = int ( start ) - PAD , int ( end ) + PAD
region = "{}:{}-{}" . format ( seqid , start , end )
minibamfile = get_minibam ( bamfile , region )
c = seqid . replace ( "chr" , "" )
cmd , vcf = allelotype_on_chr ( minibamfile , c , lhome , ldb )
sh ( cmd )
parser = LobSTRvcf ( columnidsfile = None )
parser . parse ( vcf , filtered = False )
items = parser . items ( )
if not items :
print ( "No entry found!" , file = sys . stderr )
return
k , v = parser . items ( ) [ 0 ]
print ( "{} => {}" . format ( tred , v . replace ( ',' , '/' ) ) , file = sys . stderr )
|
def resizeEvent ( self , event ) :
"""Overloads the resize event to auto - resize the rich text label to the
size of this QPushButton .
: param event | < QResizeEvent >"""
|
super ( XPushButton , self ) . resizeEvent ( event )
if self . _richTextLabel :
self . _richTextLabel . resize ( event . size ( ) )
|
def _get_dbid2goids ( associations ) :
"""Return gene2go data for user - specified taxids ."""
|
id2gos = cx . defaultdict ( set )
for ntd in associations :
id2gos [ ntd . DB_ID ] . add ( ntd . GO_ID )
return dict ( id2gos )
|
def results ( self , times = 'all' , t_precision = 12 , ** kwargs ) :
r"""Fetches the calculated quantity from the algorithm and returns it as
an array .
Parameters
times : scalar or list
Time steps to be returned . The default value is ' all ' which results
in returning all time steps . If a scalar is given , only the
corresponding time step is returned . If a range is given
( e . g . , ' range ( 0 , 1 , 1e - 3 ) ' ) , time steps in this range are returned .
t _ precision : integer
The time precision ( number of decimal places ) . Default value is 12.
Notes
The keyword steps is interpreted in the same way as times ."""
|
if 'steps' in kwargs . keys ( ) :
times = kwargs [ 'steps' ]
t_pre = t_precision
quantity = self . settings [ 'quantity' ]
q = [ k for k in list ( self . keys ( ) ) if quantity in k ]
if times == 'all' :
t = q
elif type ( times ) in [ float , int ] :
n = int ( - dc ( str ( round ( times , t_pre ) ) ) . as_tuple ( ) . exponent * ( round ( times , t_pre ) != int ( times ) ) )
t_str = ( str ( int ( round ( times , t_pre ) * 10 ** n ) ) + ( 'e-' + str ( n ) ) * ( n != 0 ) )
t = [ k for k in q if t_str == k . split ( '@' ) [ - 1 ] ]
elif 'range' in times :
t = times . replace ( ' ' , '' )
t = t [ 6 : - 1 ]
t = t . split ( ',' )
out = np . arange ( float ( t [ 0 ] ) , float ( t [ 1 ] ) , float ( t [ 2 ] ) )
out = np . append ( out , float ( t [ 1 ] ) )
out = np . unique ( out )
out = np . around ( out , decimals = t_pre )
t = [ ]
for i in out :
n = int ( - dc ( str ( round ( i , t_pre ) ) ) . as_tuple ( ) . exponent * ( round ( i , t_pre ) != int ( i ) ) )
j = ( str ( int ( round ( i , t_pre ) * 10 ** n ) ) + ( 'e-' + str ( n ) ) * ( n != 0 ) )
t_str = [ k for k in q if j == k . split ( '@' ) [ - 1 ] ]
t += ( t_str )
d = { k : self [ k ] for k in t }
return d
|
def BIC ( self , data = None ) :
'''BIC on the passed data . If passed data is None ( default ) , calculates BIC
on the model ' s assigned data'''
|
# NOTE : in principle this method computes the BIC only after finding the
# maximum likelihood parameters ( or , of course , an EM fixed - point as an
# approximation ! )
assert data is None and len ( self . states_list ) > 0 , 'Must have data to get BIC'
if data is None :
return - 2 * sum ( self . log_likelihood ( s . data ) . sum ( ) for s in self . states_list ) + self . num_parameters ( ) * np . log ( sum ( s . data . shape [ 0 ] for s in self . states_list ) )
else :
return - 2 * self . log_likelihood ( data ) + self . num_parameters ( ) * np . log ( data . shape [ 0 ] )
|
def processResponse ( self , arg , replytype , ** kw ) :
"""Parameters :
arg - - deferred
replytype - - typecode"""
|
if self . debug :
log . msg ( '--->PROCESS REQUEST\n%s' % arg , debug = 1 )
for h in self . handlers :
arg . addCallback ( h . processResponse , ** kw )
arg . addCallback ( self . parseResponse , replytype )
|
def p_substr_assignment ( p ) :
"""statement : LET ID arg _ list EQ expr"""
|
if p [ 3 ] is None or p [ 5 ] is None :
return
# There were errors
p [ 0 ] = None
entry = SYMBOL_TABLE . access_call ( p [ 2 ] , p . lineno ( 2 ) )
if entry is None :
return
if entry . class_ == CLASS . unknown :
entry . class_ = CLASS . var
assert entry . class_ == CLASS . var and entry . type_ == TYPE . string
if p [ 5 ] . type_ != TYPE . string :
api . errmsg . syntax_error_expected_string ( p . lineno ( 4 ) , p [ 5 ] . type_ )
if len ( p [ 3 ] ) > 1 :
syntax_error ( p . lineno ( 2 ) , "Accessing string with too many indexes. Expected only one." )
return
if len ( p [ 3 ] ) == 1 :
substr = ( make_typecast ( _TYPE ( gl . STR_INDEX_TYPE ) , p [ 3 ] [ 0 ] . value , p . lineno ( 2 ) ) , make_typecast ( _TYPE ( gl . STR_INDEX_TYPE ) , p [ 3 ] [ 0 ] . value , p . lineno ( 2 ) ) )
else :
substr = ( make_typecast ( _TYPE ( gl . STR_INDEX_TYPE ) , make_number ( gl . MIN_STRSLICE_IDX , lineno = p . lineno ( 2 ) ) , p . lineno ( 2 ) ) , make_typecast ( _TYPE ( gl . STR_INDEX_TYPE ) , make_number ( gl . MAX_STRSLICE_IDX , lineno = p . lineno ( 2 ) ) , p . lineno ( 2 ) ) )
lineno = p . lineno ( 2 )
base = make_number ( OPTIONS . string_base . value , lineno , _TYPE ( gl . STR_INDEX_TYPE ) )
p [ 0 ] = make_sentence ( 'LETSUBSTR' , entry , make_binary ( lineno , 'MINUS' , substr [ 0 ] , base , func = lambda x , y : x - y ) , make_binary ( lineno , 'MINUS' , substr [ 1 ] , base , func = lambda x , y : x - y ) , p [ 5 ] )
|
def absent ( name , service_name , auth = None , ** kwargs ) :
'''Ensure an endpoint does not exists
name
Interface name
url
URL of the endpoint
service _ name
Service name or ID
region
The region name to assign the endpoint'''
|
ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : '' }
__salt__ [ 'keystoneng.setup_clouds' ] ( auth )
success , val = _ , endpoint = _common ( ret , name , service_name , kwargs )
if not success :
return val
if endpoint :
if __opts__ [ 'test' ] is True :
ret [ 'result' ] = None
ret [ 'changes' ] = { 'id' : endpoint . id }
ret [ 'comment' ] = 'Endpoint will be deleted.'
return ret
__salt__ [ 'keystoneng.endpoint_delete' ] ( id = endpoint . id )
ret [ 'changes' ] [ 'id' ] = endpoint . id
ret [ 'comment' ] = 'Deleted endpoint'
return ret
|
def cov ( x , y , keepdims = 0 ) :
"""Returns the estimated covariance of the values in the passed
array ( i . e . , N - 1 ) . Dimension can equal None ( ravel array first ) , an
integer ( the dimension over which to operate ) , or a sequence ( operate
over multiple dimensions ) . Set keepdims = 1 to return an array with the
same number of dimensions as inarray .
Usage : lcov ( x , y , keepdims = 0)"""
|
n = len ( x )
xmn = mean ( x )
ymn = mean ( y )
xdeviations = [ 0 ] * len ( x )
ydeviations = [ 0 ] * len ( y )
for i in range ( len ( x ) ) :
xdeviations [ i ] = x [ i ] - xmn
ydeviations [ i ] = y [ i ] - ymn
ss = 0.0
for i in range ( len ( xdeviations ) ) :
ss = ss + xdeviations [ i ] * ydeviations [ i ]
return ss / float ( n - 1 )
|
def remove ( self , element , multiplicity = None ) :
"""Removes an element from the multiset .
If no multiplicity is specified , the element is completely removed from the multiset :
> > > ms = Multiset ( ' aabbbc ' )
> > > ms . remove ( ' a ' )
> > > sorted ( ms )
[ ' b ' , ' b ' , ' b ' , ' c ' ]
If the multiplicity is given , it is subtracted from the element ' s multiplicity in the multiset :
> > > ms . remove ( ' b ' , 2)
> > > sorted ( ms )
[ ' b ' , ' c ' ]
It is not an error to remove more elements than are in the set :
> > > ms . remove ( ' b ' , 2)
> > > sorted ( ms )
This extends the : meth : ` MutableSet . remove ` signature to allow specifying the multiplicity .
Args :
element :
The element to remove from the multiset .
multiplicity :
An optional multiplicity i . e . count of elements to remove .
Returns :
The multiplicity of the element in the multiset before
the removal .
Raises :
KeyError : if the element is not contained in the set . Use : meth : ` discard ` if
you do not want an exception to be raised ."""
|
_elements = self . _elements
if element not in _elements :
raise KeyError
old_multiplicity = _elements . get ( element , 0 )
if multiplicity is None or multiplicity >= old_multiplicity :
del _elements [ element ]
self . _total -= old_multiplicity
elif multiplicity < 0 :
raise ValueError ( "Multiplicity must be not be negative" )
elif multiplicity > 0 :
_elements [ element ] -= multiplicity
self . _total -= multiplicity
return old_multiplicity
|
def get_database_users ( self ) :
"""Get list of database users ."""
|
url = "db/{0}/users" . format ( self . _database )
response = self . request ( url = url , method = 'GET' , expected_response_code = 200 )
return response . json ( )
|
def reset ( all = False , vms = False , switches = False ) :
'''Reset the running state of VMM or a subsystem .
all :
Reset the running state .
switches :
Reset the configured switches .
vms :
Reset and terminate all VMs .
CLI Example :
. . code - block : : bash
salt ' * ' vmctl . reset all = True'''
|
ret = False
cmd = [ 'vmctl' , 'reset' ]
if all :
cmd . append ( 'all' )
elif vms :
cmd . append ( 'vms' )
elif switches :
cmd . append ( 'switches' )
result = __salt__ [ 'cmd.run_all' ] ( cmd , output_loglevel = 'trace' , python_shell = False )
if result [ 'retcode' ] == 0 :
ret = True
else :
raise CommandExecutionError ( 'Problem encountered running vmctl' , info = { 'errors' : [ result [ 'stderr' ] ] , 'changes' : ret } )
return ret
|
def confirmMapIdentity ( self , subject , vendorSpecific = None ) :
"""See Also : confirmMapIdentityResponse ( )
Args :
subject :
vendorSpecific :
Returns :"""
|
response = self . confirmMapIdentityResponse ( subject , vendorSpecific )
return self . _read_boolean_response ( response )
|
def _equation_of_time ( t ) :
"""Find the difference between apparent and mean solar time
Parameters
t : ` ~ astropy . time . Time `
times ( array )
Returns
ret1 : ` ~ astropy . units . Quantity `
the equation of time"""
|
# Julian centuries since J2000.0
T = ( t - Time ( "J2000" ) ) . to ( u . year ) . value / 100
# obliquity of ecliptic ( Meeus 1998 , eq 22.2)
poly_pars = ( 84381.448 , 46.8150 , 0.00059 , 0.001813 )
eps = u . Quantity ( polyval ( T , poly_pars ) , u . arcsec )
y = np . tan ( eps / 2 ) ** 2
# Sun ' s mean longitude ( Meeus 1998 , eq 25.2)
poly_pars = ( 280.46646 , 36000.76983 , 0.0003032 )
L0 = u . Quantity ( polyval ( T , poly_pars ) , u . deg )
# Sun ' s mean anomaly ( Meeus 1998 , eq 25.3)
poly_pars = ( 357.52911 , 35999.05029 , 0.0001537 )
M = u . Quantity ( polyval ( T , poly_pars ) , u . deg )
# eccentricity of Earth ' s orbit ( Meeus 1998 , eq 25.4)
poly_pars = ( 0.016708634 , - 0.000042037 , - 0.0000001267 )
e = polyval ( T , poly_pars )
# equation of time , radians ( Meeus 1998 , eq 28.3)
eot = ( y * np . sin ( 2 * L0 ) - 2 * e * np . sin ( M ) + 4 * e * y * np . sin ( M ) * np . cos ( 2 * L0 ) - 0.5 * y ** 2 * np . sin ( 4 * L0 ) - 5 * e ** 2 * np . sin ( 2 * M ) / 4 ) * u . rad
return eot . to ( u . hourangle )
|
def get_current_context_id ( ) :
"""Identifis which context it is ( greenlet , stackless , or thread ) .
: returns : the identifier of the current context ."""
|
global get_current_context_id
if greenlet is not None :
if stackless is None :
get_current_context_id = greenlet . getcurrent
return greenlet . getcurrent ( )
return greenlet . getcurrent ( ) , stackless . getcurrent ( )
elif stackless is not None :
get_current_context_id = stackless . getcurrent
return stackless . getcurrent ( )
get_current_context_id = _thread . get_ident
return _thread . get_ident ( )
|
def get_email_content ( file_path ) :
"""Email content in file
: param file _ path : Path to file with email text
: return : Email text ( html formatted )"""
|
with open ( file_path , "r" ) as in_file :
text = str ( in_file . read ( ) )
return text . replace ( "\n\n" , "<br>" )
|
def search_records ( self , domain , record_type , name = None , data = None ) :
"""Returns a list of all records configured for the specified domain that
match the supplied search criteria ."""
|
search_params = [ ]
if name :
search_params . append ( "name=%s" % name )
if data :
search_params . append ( "data=%s" % data )
query_string = "&" . join ( search_params )
dom_id = utils . get_id ( domain )
uri = "/domains/%s/records?type=%s" % ( dom_id , record_type )
if query_string :
uri = "%s&%s" % ( uri , query_string )
resp , body = self . _retry_get ( uri )
records = body . get ( "records" , [ ] )
self . _reset_paging ( "record" , body )
rec_paging = self . _paging . get ( "record" , { } )
while rec_paging . get ( "next_uri" ) :
resp , body = self . _retry_get ( rec_paging . get ( "next_uri" ) )
self . _reset_paging ( "record" , body )
records . extend ( body . get ( "records" , [ ] ) )
for record in records :
record [ "domain_id" ] = dom_id
return [ CloudDNSRecord ( self , record , loaded = False ) for record in records if record ]
|
def get_job_list ( self , project_name ) :
"""Get the list of pending , running and finished jobs of some project .
: param project _ name : the project name
: return : a dictionary that list inculde job name and status
example :
{ " status " : " ok " ,
" pending " : [ { " id " : " 78391cc0fcaf11e1b0090800272a6d06 " , " spider " : " spider1 " } ] ,
" running " : [ { " id " : " 422e608f9f28cef127b3d5ef93fe9399 " , " spider " : " spider2 " ,
" start _ time " : " 2012-09-12 10:14:03.594664 " } ] ,
" finished " : [ { " id " : " 2f16646cfcaf11e1b0090800272a6d06 " , " spider " : " spider3 " ,
" start _ time " : " 2012-09-12 10:14:03.594664 " , " end _ time " : " 2012-09-12 10:24:03.594664 " } ] }"""
|
url , method = self . command_set [ 'listjobs' ] [ 0 ] , self . command_set [ 'listjobs' ] [ 1 ]
data = { 'project' : project_name }
response = http_utils . request ( url , method_type = method , data = data , return_type = http_utils . RETURN_JSON )
if response is None :
logging . warning ( '%s failure: not found or connection fail' % sys . _getframe ( ) . f_code . co_name )
response = JobList ( ) . __dict__
return response
|
def plots_from_files ( imspaths , figsize = ( 10 , 5 ) , rows = 1 , titles = None , maintitle = None ) :
"""Plots images given image files .
Arguments :
im _ paths ( list ) : list of paths
figsize ( tuple ) : figure size
rows ( int ) : number of rows
titles ( list ) : list of titles
maintitle ( string ) : main title"""
|
f = plt . figure ( figsize = figsize )
if maintitle is not None :
plt . suptitle ( maintitle , fontsize = 16 )
for i in range ( len ( imspaths ) ) :
sp = f . add_subplot ( rows , ceildiv ( len ( imspaths ) , rows ) , i + 1 )
sp . axis ( 'Off' )
if titles is not None :
sp . set_title ( titles [ i ] , fontsize = 16 )
img = plt . imread ( imspaths [ i ] )
plt . imshow ( img )
|
def setRect ( self , rect ) :
"""Sets the rect for this node , ensuring that the width and height meet the minimum requirements .
: param rect < QRectF >"""
|
mwidth = self . minimumWidth ( )
mheight = self . minimumHeight ( )
if ( rect . width ( ) < mwidth ) :
rect . setWidth ( mwidth )
if ( rect . height ( ) < mheight ) :
rect . setHeight ( mheight )
return super ( XNode , self ) . setRect ( rect )
|
def checkgrad ( f , fprime , x , * args , ** kw_args ) :
"""Analytical gradient calculation using a 3 - point method"""
|
LG . debug ( "Checking gradient ..." )
import numpy as np
# using machine precision to choose h
eps = np . finfo ( float ) . eps
step = np . sqrt ( eps ) * ( x . min ( ) )
# shake things up a bit by taking random steps for each x dimension
h = step * np . sign ( np . random . uniform ( - 1 , 1 , x . size ) )
f_ph = f ( x + h , * args , ** kw_args )
f_mh = f ( x - h , * args , ** kw_args )
numerical_gradient = ( f_ph - f_mh ) / ( 2 * h )
analytical_gradient = fprime ( x , * args , ** kw_args )
ratio = ( f_ph - f_mh ) / ( 2 * np . dot ( h , analytical_gradient ) )
h = np . zeros_like ( x )
for i in range ( len ( x ) ) :
pdb . set_trace ( )
h [ i ] = step
f_ph = f ( x + h , * args , ** kw_args )
f_mh = f ( x - h , * args , ** kw_args )
numerical_gradient = ( f_ph - f_mh ) / ( 2 * step )
analytical_gradient = fprime ( x , * args , ** kw_args ) [ i ]
ratio = ( f_ph - f_mh ) / ( 2 * step * analytical_gradient )
h [ i ] = 0
LG . debug ( "[%d] numerical: %f, analytical: %f, ratio: %f" % ( i , numerical_gradient , analytical_gradient , ratio ) )
|
def update ( self ) :
'''Draws directly to the terminal any UI elements in the tree that are
marked as having been updated . UI elements may have marked themselves
as updated if , for example , notable attributes have been altered , or
the : attr : ` updated ` element may be set to ` ` True ` ` explicitly by your
program . The drawing and layout logic are exactly the same as for
: meth : ` draw ` .'''
|
super ( ) . update ( self . default_format , terminal = self . terminal , styles = self . style )
|
def filter_stack ( graph , head , filters ) :
"""Perform a walk in a depth - first order starting
at * head * .
Returns ( visited , removes , orphans ) .
* visited : the set of visited nodes
* removes : the list of nodes where the node
data does not all * filters *
* orphans : tuples of ( last _ good , node ) ,
where node is not in removes , is directly
reachable from a node in * removes * and
* last _ good * is the closest upstream node that is not
in * removes * ."""
|
visited , removes , orphans = set ( [ head ] ) , set ( ) , set ( )
stack = deque ( [ ( head , head ) ] )
get_data = graph . node_data
get_edges = graph . out_edges
get_tail = graph . tail
while stack :
last_good , node = stack . pop ( )
data = get_data ( node )
if data is not None :
for filtfunc in filters :
if not filtfunc ( data ) :
removes . add ( node )
break
else :
last_good = node
for edge in get_edges ( node ) :
tail = get_tail ( edge )
if last_good is not node :
orphans . add ( ( last_good , tail ) )
if tail not in visited :
visited . add ( tail )
stack . append ( ( last_good , tail ) )
orphans = [ ( last_good , tail ) for ( last_good , tail ) in orphans if tail not in removes ]
# orphans . sort ( )
return visited , removes , orphans
|
def _sort_to_str ( self ) :
"""Before exec query , this method transforms sort dict string
from
{ " name " : " asc " , " timestamp " : " desc " }
to
" name asc , timestamp desc " """
|
params_list = [ ]
timestamp = ""
for k , v in self . _solr_params [ 'sort' ] . items ( ) :
if k != "timestamp" :
params_list . append ( " " . join ( [ k , v ] ) )
else :
timestamp = v
params_list . append ( " " . join ( [ 'timestamp' , timestamp ] ) )
self . _solr_params [ 'sort' ] = ", " . join ( params_list )
|
def _actor_from_game_image ( self , name , game_image ) :
"""Return an actor object matching the one in the game image .
Note :
Health and mana are based on measured percentage of a fixed maximum
rather than the actual maximum in the game .
Arguments :
name : must be ' player ' or ' opponent '
game _ image : opencv image of the main game area"""
|
HEALTH_MAX = 100
MANA_MAX = 40
# get the set of tools for investigating this actor
tools = { 'player' : self . _player_tools , 'opponent' : self . _oppnt_tools } [ name ]
# setup the arguments to be set :
args = [ name ]
# health :
t , l , b , r = tools [ 'health_region' ] . region_in ( game_image )
health_image = game_image [ t : b , l : r ]
health_image = numpy . rot90 ( health_image )
# upright for the TankLevel
how_full = tools [ 'health_tank' ] . how_full ( health_image )
if how_full is None :
return None
# failure
health = int ( round ( HEALTH_MAX * how_full ) )
args . append ( ( health , HEALTH_MAX ) )
# mana
for color in ( 'r' , 'g' , 'b' , 'y' ) :
t , l , b , r = tools [ color + '_region' ] . region_in ( game_image )
mana_image = game_image [ t : b , l : r ]
how_full = tools [ color + '_tank' ] . how_full ( mana_image )
if how_full is None :
return None
# failure
mana = int ( round ( MANA_MAX * how_full ) )
args . append ( ( mana , MANA_MAX ) )
# experience and coins simply start at zero
x_m = ( 0 , 1000 ) , ( 0 , 1000 )
args . extend ( x_m )
# hammer and scroll are unused
h_c = ( 0 , 0 ) , ( 0 , 0 )
args . extend ( h_c )
# build the actor and return it
return Actor ( * args )
|
def scan_sequence ( self , frame , direction ) :
"""Search in one reading frame"""
|
orf_start = None
for c , index in self . codons ( frame ) :
if ( c not in self . stop and ( c in self . start or not self . start ) and orf_start is None ) :
orf_start = index
elif c in self . stop and orf_start is not None :
self . _update_longest ( orf_start , index + 3 , direction , frame )
orf_start = None
if orf_start is not None :
self . _update_longest ( orf_start , index + 3 , direction , frame )
|
def _package_conf_ordering ( conf , clean = True , keep_backup = False ) :
'''Move entries in the correct file .'''
|
if conf in SUPPORTED_CONFS :
rearrange = [ ]
path = BASE_PATH . format ( conf )
backup_files = [ ]
for triplet in salt . utils . path . os_walk ( path ) :
for file_name in triplet [ 2 ] :
file_path = '{0}/{1}' . format ( triplet [ 0 ] , file_name )
cp = triplet [ 0 ] [ len ( path ) + 1 : ] + '/' + file_name
shutil . copy ( file_path , file_path + '.bak' )
backup_files . append ( file_path + '.bak' )
if cp [ 0 ] == '/' or len ( cp . split ( '/' ) ) > 2 :
with salt . utils . files . fopen ( file_path ) as fp_ :
rearrange . extend ( salt . utils . data . decode ( fp_ . readlines ( ) ) )
os . remove ( file_path )
else :
new_contents = ''
with salt . utils . files . fopen ( file_path , 'r+' ) as file_handler :
for line in file_handler :
line = salt . utils . stringutils . to_unicode ( line )
try :
atom = line . strip ( ) . split ( ) [ 0 ]
except IndexError :
new_contents += line
else :
if atom [ 0 ] == '#' or portage . dep_getkey ( atom ) == cp :
new_contents += line
else :
rearrange . append ( line . strip ( ) )
if new_contents :
file_handler . seek ( 0 )
file_handler . truncate ( len ( new_contents ) )
file_handler . write ( new_contents )
if not new_contents :
os . remove ( file_path )
for line in rearrange :
append_to_package_conf ( conf , string = line )
if not keep_backup :
for bfile in backup_files :
try :
os . remove ( bfile )
except OSError :
pass
if clean :
for triplet in salt . utils . path . os_walk ( path ) :
if not triplet [ 1 ] and not triplet [ 2 ] and triplet [ 0 ] != path :
shutil . rmtree ( triplet [ 0 ] )
|
def skip_status ( * skipped ) :
"""Decorator to skip this call if we ' re in one of the skipped states ."""
|
def decorator ( func ) :
@ functools . wraps ( func )
def _skip_status ( self , * args , ** kwargs ) :
if self . status not in skipped :
return func ( self , * args , ** kwargs )
return _skip_status
return decorator
|
def topics ( self ) :
""": return : the list of topics we interfaced with ( not the list of all available topics )"""
|
topics = self . interface . publishers . copy ( )
topics . update ( self . interface . subscribers )
return topics
|
def raise_ssl_error ( code , nested = None ) :
"""Raise an SSL error with the given error code"""
|
err_string = str ( code ) + ": " + _ssl_errors [ code ]
if nested :
raise SSLError ( code , err_string + str ( nested ) )
raise SSLError ( code , err_string )
|
def calculate_fuzzy_chi ( alpha , square_map , right_eigenvectors ) :
"""Calculate the membership matrix ( chi ) from parameters alpha .
Parameters
alpha : ndarray
Parameters of objective function ( e . g . flattened A )
square _ map : ndarray
Mapping from square indices ( i , j ) to flat indices ( k ) .
right _ eigenvectors : ndarray
The right eigenvectors .
Returns
A : ndarray
The transformation matrix A
chi _ fuzzy : ndarray
The ( fuzzy ) membership matrix .
mapping : ndarray
The mapping from microstates to macrostates ."""
|
# Convert parameter vector into matrix A
A = to_square ( alpha , square_map )
# Make A feasible .
A = fill_A ( A , right_eigenvectors )
# Calculate the fuzzy membership matrix .
chi_fuzzy = np . dot ( right_eigenvectors , A )
# Calculate the microstate mapping .
mapping = np . argmax ( chi_fuzzy , 1 )
return A , chi_fuzzy , mapping
|
def ajax_delete_analysis_attachment ( self ) :
"""Endpoint for attachment delete in WS"""
|
form = self . request . form
attachment_uid = form . get ( "attachment_uid" , None )
if not attachment_uid :
return "error"
attachment = api . get_object_by_uid ( attachment_uid , None )
if attachment is None :
return "Could not resolve attachment UID {}" . format ( attachment_uid )
# handle delete via the AttachmentsView
view = self . context . restrictedTraverse ( "@@attachments_view" )
view . delete_attachment ( attachment )
return "success"
|
def parse_time ( value : Union [ time , str ] ) -> time :
"""Parse a time / string and return a datetime . time .
This function doesn ' t support time zone offsets .
Raise ValueError if the input is well formatted but not a valid time .
Raise ValueError if the input isn ' t well formatted , in particular if it contains an offset ."""
|
if isinstance ( value , time ) :
return value
match = time_re . match ( value )
if not match :
raise errors . TimeError ( )
kw = match . groupdict ( )
if kw [ 'microsecond' ] :
kw [ 'microsecond' ] = kw [ 'microsecond' ] . ljust ( 6 , '0' )
kw_ = { k : int ( v ) for k , v in kw . items ( ) if v is not None }
with change_exception ( errors . TimeError , ValueError ) :
return time ( ** kw_ )
|
def Run ( self , args ) :
"""Reads a buffer on the client and sends it to the server ."""
|
# Make sure we limit the size of our output
if args . length > constants . CLIENT_MAX_BUFFER_SIZE :
raise RuntimeError ( "Can not read buffers this large." )
try :
fd = vfs . VFSOpen ( args . pathspec , progress_callback = self . Progress )
fd . Seek ( args . offset )
offset = fd . Tell ( )
data = fd . Read ( args . length )
except ( IOError , OSError ) as e :
self . SetStatus ( rdf_flows . GrrStatus . ReturnedStatus . IOERROR , e )
return
# Now return the data to the server
self . SendReply ( rdf_client . BufferReference ( offset = offset , data = data , length = len ( data ) , pathspec = fd . pathspec ) )
|
def server_add ( s_name , s_ip , s_state = None , ** connection_args ) :
'''Add a server
Note : The default server state is ENABLED
CLI Example :
. . code - block : : bash
salt ' * ' netscaler . server _ add ' serverName ' ' serverIpAddress '
salt ' * ' netscaler . server _ add ' serverName ' ' serverIpAddress ' ' serverState ' '''
|
ret = True
if server_exists ( s_name , ** connection_args ) :
return False
nitro = _connect ( ** connection_args )
if nitro is None :
return False
server = NSServer ( )
server . set_name ( s_name )
server . set_ipaddress ( s_ip )
if s_state is not None :
server . set_state ( s_state )
try :
NSServer . add ( nitro , server )
except NSNitroError as error :
log . debug ( 'netscaler module error - NSServer.add() failed: %s' , error )
ret = False
_disconnect ( nitro )
return ret
|
def get_abstracts ( self , refresh = True ) :
"""Return a list of ScopusAbstract objects using ScopusSearch ."""
|
return [ ScopusAbstract ( eid , refresh = refresh ) for eid in self . get_document_eids ( refresh = refresh ) ]
|
def get_iam_policy ( self , client = None ) :
"""Retrieve the IAM policy for the bucket .
See
https : / / cloud . google . com / storage / docs / json _ api / v1 / buckets / getIamPolicy
If : attr : ` user _ project ` is set , bills the API request to that project .
: type client : : class : ` ~ google . cloud . storage . client . Client ` or
` ` NoneType ` `
: param client : Optional . The client to use . If not passed , falls back
to the ` ` client ` ` stored on the current bucket .
: rtype : : class : ` google . api _ core . iam . Policy `
: returns : the policy instance , based on the resource returned from
the ` ` getIamPolicy ` ` API request ."""
|
client = self . _require_client ( client )
query_params = { }
if self . user_project is not None :
query_params [ "userProject" ] = self . user_project
info = client . _connection . api_request ( method = "GET" , path = "%s/iam" % ( self . path , ) , query_params = query_params , _target_object = None , )
return Policy . from_api_repr ( info )
|
def _validate_inputs ( actual_inputs , required_inputs , keypath = None ) :
"""Validate inputs . Raise exception if something is missing .
args :
actual _ inputs : the object / dictionary passed to a subclass of
PublishablePayload
required _ inputs : the object / dictionary containing keys ( and subkeys )
for required fields . ( See get _ common _ payload _ template . )
keypath : used internally in recursive function calls .
return :
Nothing . An exception will be raised if a problem is encountered ."""
|
actual_keys = set ( actual_inputs . keys ( ) )
required_keys = set ( required_inputs . keys ( ) )
if actual_keys . intersection ( required_keys ) != required_keys :
prefix = '%s.' if keypath else ''
output_keys = { '%s%s' % ( prefix , key ) for key in required_keys }
raise Exception ( "Missing input fields. Expected %s." % ', ' . join ( output_keys ) )
for key in required_keys : # TODO : review the following usage of isinstance .
# Will this always be appropriate , given duck typing ?
if isinstance ( required_inputs [ key ] , dict ) :
new_keypath = key if not keypath else '%s.%s' % ( keypath , key )
_validate_inputs ( actual_inputs = actual_inputs [ key ] , required_inputs = required_inputs [ key ] , keypath = new_keypath )
|
def export ( target_folder , source_folders = None , class_type = 'all' , raise_errors = False ) :
"""exports the existing scripts / instruments ( future : probes ) into folder as . b26 files
Args :
target _ folder : target location of created . b26 script files
source _ folder : singel path or list of paths that contains the location of python script files can also be just the name of a module
class _ type : string , one of the 4 following options
- probes ( exports probes ) - - not implemented yet - -
- scripts ( exports scripts )
- instruments ( exports instruments )
- all ( exports instruments , scripts and probes )
target _ folder : target folder whereb . b26 files are created
Returns :"""
|
if class_type not in ( 'all' , 'scripts' , 'instruments' , 'probes' ) :
print ( 'unknown type to export' )
return
if not os . path . isdir ( target_folder ) :
try :
os . mkdir ( target_folder )
except :
print ( ( target_folder , ' is invalid target folder' ) )
target_folder = None
if target_folder is not None :
if source_folders is None :
module_list = [ os . path . dirname ( os . path . dirname ( inspect . getfile ( inspect . currentframe ( ) ) ) ) ]
elif isinstance ( source_folders , str ) :
module_list = [ source_folders ]
elif isinstance ( source_folders , list ) :
module_list = source_folders
else :
raise TypeError ( 'unknown type for source_folders' )
for path_to_module in module_list :
if class_type in ( 'all' , 'scripts' ) :
export_default_scripts ( target_folder , source_folder = path_to_module , raise_errors = raise_errors )
if class_type in ( 'all' , 'instruments' ) :
export_default_instruments ( target_folder , path_to_module , raise_errors = raise_errors )
if class_type in ( 'all' , 'probes' ) :
print ( 'WARNING: probes currently not supported' )
|
def _handle_job_set ( function ) :
"""A decorator for handling ` taskhandle . JobSet ` \ s
A decorator for handling ` taskhandle . JobSet ` \ s for ` do ` and ` undo `
methods of ` Change ` \ s ."""
|
def call ( self , job_set = taskhandle . NullJobSet ( ) ) :
job_set . started_job ( str ( self ) )
function ( self )
job_set . finished_job ( )
return call
|
def _GetSupportedFilesInDir ( self , fileDir , fileList , supportedFormatList , ignoreDirList ) :
"""Recursively get all supported files given a root search directory .
Supported file extensions are given as a list , as are any directories which
should be ignored .
The result will be appended to the given file list argument .
Parameters
fileDir : string
Path to root of directory tree to search .
fileList : string
List to add any found files to .
supportedFormatList : list
List of supported file extensions .
ignoreDirList : list
List of directories to ignore ."""
|
goodlogging . Log . Info ( "CLEAR" , "Parsing file directory: {0}" . format ( fileDir ) )
if os . path . isdir ( fileDir ) is True :
for globPath in glob . glob ( os . path . join ( fileDir , '*' ) ) :
if util . FileExtensionMatch ( globPath , supportedFormatList ) :
newFile = tvfile . TVFile ( globPath )
if newFile . GetShowDetails ( ) :
fileList . append ( newFile )
elif os . path . isdir ( globPath ) :
if ( os . path . basename ( globPath ) in ignoreDirList ) :
goodlogging . Log . Info ( "CLEAR" , "Skipping ignored directory: {0}" . format ( globPath ) )
else :
self . _GetSupportedFilesInDir ( globPath , fileList , supportedFormatList , ignoreDirList )
else :
goodlogging . Log . Info ( "CLEAR" , "Ignoring unsupported file or folder: {0}" . format ( globPath ) )
else :
goodlogging . Log . Info ( "CLEAR" , "Invalid non-directory path given to parse" )
|
def _derive_charge ( self , config ) :
"""Use a temperature window to identify the roast charge .
The charge will manifest as a sudden downward trend on the temperature .
Once found , we save it and avoid overwriting . The charge is needed in
order to derive the turning point .
: param config : Current snapshot of the configuration
: type config : dict
: returns : None"""
|
if self . _roast . get ( 'charge' ) :
return None
self . _window . append ( config )
time , temp = list ( ) , list ( )
for x in list ( self . _window ) :
time . append ( x [ 'time' ] )
temp . append ( x [ 'bean_temp' ] )
slope , intercept , r_value , p_value , std_err = linregress ( time , temp )
if slope < 0 :
self . _roast [ 'charge' ] = self . _roast [ 'last' ]
self . add_roast_event ( { 'event' : 'Charge' } )
return config
return None
|
def flatten_array ( grid ) :
"""Takes a multi - dimensional array and returns a 1 dimensional array with the
same contents ."""
|
grid = [ grid [ i ] [ j ] for i in range ( len ( grid ) ) for j in range ( len ( grid [ i ] ) ) ]
while type ( grid [ 0 ] ) is list :
grid = flatten_array ( grid )
return grid
|
def getPerfInfo ( rh , useridlist ) :
"""Get the performance information for a userid
Input :
Request Handle
Userid to query < - may change this to a list later .
Output :
Dictionary containing the following :
overallRC - overall return code , 0 : success , non - zero : failure
rc - RC returned from SMCLI if overallRC = 0.
rs - RS returned from SMCLI if overallRC = 0.
errno - Errno returned from SMCLI if overallRC = 0.
response - Stripped and reformatted output of the SMCLI command ."""
|
rh . printSysLog ( "Enter vmUtils.getPerfInfo, userid: " + useridlist )
parms = [ "-T" , rh . userid , "-c" , "1" ]
results = invokeSMCLI ( rh , "Image_Performance_Query" , parms )
if results [ 'overallRC' ] != 0 : # SMCLI failed .
rh . printLn ( "ES" , results [ 'response' ] )
rh . printSysLog ( "Exit vmUtils.getPerfInfo, rc: " + str ( results [ 'overallRC' ] ) )
return results
lines = results [ 'response' ] . split ( "\n" )
usedTime = 0
totalCpu = 0
totalMem = 0
usedMem = 0
try :
for line in lines :
if "Used CPU time:" in line :
usedTime = line . split ( ) [ 3 ] . strip ( '"' )
# Value is in us , need make it seconds
usedTime = int ( usedTime ) / 1000000
if "Guest CPUs:" in line :
totalCpu = line . split ( ) [ 2 ] . strip ( '"' )
if "Max memory:" in line :
totalMem = line . split ( ) [ 2 ] . strip ( '"' )
# Value is in Kb , need to make it Mb
totalMem = int ( totalMem ) / 1024
if "Used memory:" in line :
usedMem = line . split ( ) [ 2 ] . strip ( '"' )
usedMem = int ( usedMem ) / 1024
except Exception as e :
msg = msgs . msg [ '0412' ] [ 1 ] % ( modId , type ( e ) . __name__ , str ( e ) , results [ 'response' ] )
rh . printLn ( "ES" , msg )
results [ 'overallRC' ] = 4
results [ 'rc' ] = 4
results [ 'rs' ] = 412
if results [ 'overallRC' ] == 0 :
memstr = "Total Memory: %iM\n" % totalMem
usedmemstr = "Used Memory: %iM\n" % usedMem
procstr = "Processors: %s\n" % totalCpu
timestr = "CPU Used Time: %i sec\n" % usedTime
results [ 'response' ] = memstr + usedmemstr + procstr + timestr
rh . printSysLog ( "Exit vmUtils.getPerfInfo, rc: " + str ( results [ 'rc' ] ) )
return results
|
def to_primitive ( self , context = None ) :
""". . versionadded : : 1.3.0"""
|
primitive = super ( ValueConstant , self ) . to_primitive ( context )
value = self . value
if hasattr ( value , 'isoformat' ) :
value = value . isoformat ( )
elif callable ( value ) :
value = value ( )
primitive [ 'value' ] = value
return primitive
|
def init_original_response ( self ) :
"""Get the original response for comparing , confirm is _ cookie _ necessary"""
|
no_cookie_resp = None
self . is_cookie_necessary = True
if 'json' in self . request :
self . request [ 'data' ] = json . dumps ( self . request . pop ( 'json' ) ) . encode ( self . encoding )
r1 = self . req . request ( retry = self . retry , timeout = self . timeout , ** self . request )
if 'headers' in self . request : # test is _ cookie _ necessary
cookie = self . request [ 'headers' ] . get ( 'Cookie' , None )
if cookie :
new_request = deepcopy ( self . request )
new_request [ 'headers' ] [ 'Cookie' ] = ''
r2 = self . req . request ( retry = self . retry , timeout = self . timeout , ** new_request )
no_cookie_resp = self . ensure_response ( r2 )
resp = r1 . x
assert resp , ValueError ( 'original_response should not be failed. %s' % self . request )
self . original_response = self . ensure_response ( r1 )
self . encoding = self . encoding or resp . encoding
if no_cookie_resp == self . original_response :
self . ignore [ 'headers' ] . append ( 'Cookie' )
self . is_cookie_necessary = False
return self . original_response
|
def lowercase ( self ) :
"""Lowercase all strings in this tree .
Works recursively and in - place ."""
|
if len ( self . children ) > 0 :
for child in self . children :
child . lowercase ( )
else :
self . text = self . text . lower ( )
|
def communityvisibilitystate ( self ) :
"""Return the Visibility State of the Users Profile"""
|
if self . _communityvisibilitystate == None :
return None
elif self . _communityvisibilitystate in self . VisibilityState :
return self . VisibilityState [ self . _communityvisibilitystate ]
else : # Invalid State
return None
|
def imagetransformer_b12l_4h_b128_uncond_dr03_tpu ( ) :
"""TPU config for cifar 10."""
|
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet ( )
update_hparams_for_tpu ( hparams )
hparams . batch_size = 2
hparams . num_heads = 4
# heads are expensive on tpu
hparams . num_decoder_layers = 12
hparams . block_length = 128
hparams . hidden_size = 256
hparams . filter_size = 2048
hparams . layer_preprocess_sequence = "none"
hparams . layer_postprocess_sequence = "dan"
hparams . layer_prepostprocess_dropout = 0.1
hparams . optimizer = "Adafactor"
hparams . learning_rate_schedule = "rsqrt_decay"
hparams . learning_rate_warmup_steps = 10000
return hparams
|
def _reset_bbox ( self ) :
"""This function should only be called internally . It resets
the viewers bounding box based on changes to pan or scale ."""
|
scale_x , scale_y = self . get_scale_xy ( )
pan_x , pan_y = self . get_pan ( coord = 'data' ) [ : 2 ]
win_wd , win_ht = self . get_window_size ( )
# NOTE : need to set at least a minimum 1 - pixel dimension on
# the window or we get a scale calculation exception . See github
# issue 431
win_wd , win_ht = max ( 1 , win_wd ) , max ( 1 , win_ht )
self . _calc_bg_dimensions ( scale_x , scale_y , pan_x , pan_y , win_wd , win_ht )
|
def getPythonVarName ( name ) :
"""Get the python variable name"""
|
return SUB_REGEX . sub ( '' , name . replace ( '+' , '_' ) . replace ( '-' , '_' ) . replace ( '.' , '_' ) . replace ( ' ' , '' ) . replace ( '/' , '_' ) ) . upper ( )
|
def childKeys ( self ) :
"""Returns the list of child keys for this settings instance .
: return [ < str > , . . ]"""
|
if self . _customFormat :
return self . _customFormat . childKeys ( )
else :
return super ( XSettings , self ) . childKeys ( )
|
def update_entry_line ( self , key = None ) :
"""Updates the entry line
Parameters
key : 3 - tuple of Integer , defaults to current cell
\t Cell to which code the entry line is updated"""
|
if key is None :
key = self . actions . cursor
cell_code = self . GetTable ( ) . GetValue ( * key )
post_command_event ( self , self . EntryLineMsg , text = cell_code )
|
def register ( self , request , ** kwargs ) :
"""Create and immediately log in a new user .
Only require a email to register , username is generated
automatically and a password is random generated and emailed
to the user .
Activation is still required for account uses after specified number
of days ."""
|
if Site . _meta . installed :
site = Site . objects . get_current ( )
else :
site = RequestSite ( request )
email = kwargs [ 'email' ]
# Generate random password
password = User . objects . make_random_password ( )
# Generate username based off of the email supplied
username = sha_constructor ( str ( email ) ) . hexdigest ( ) [ : 30 ]
incr = 0
# Ensure the generated username is in fact unqiue
while User . objects . filter ( username = username ) . count ( ) > 0 :
incr += 1
username = sha_constructor ( str ( email + str ( incr ) ) ) . hexdigest ( ) [ : 30 ]
# Create the active user
new_user = User . objects . create_user ( username , email , password )
new_user . save ( )
# Create the registration profile , this is still needed because
# the user still needs to activate there account for further users
# after 3 days
registration_profile = RegistrationProfile . objects . create_profile ( new_user )
# Authenticate and login the new user automatically
auth_user = authenticate ( username = username , password = password )
login ( request , auth_user )
# Set the expiration to when the users browser closes so user
# is forced to log in upon next visit , this should force the user
# to check there email for there generated password .
request . session . set_expiry ( 0 )
# Create a profile instance for the new user if
# AUTH _ PROFILE _ MODULE is specified in settings
if hasattr ( settings , 'AUTH_PROFILE_MODULE' ) and getattr ( settings , 'AUTH_PROFILE_MODULE' ) :
app_label , model_name = settings . AUTH_PROFILE_MODULE . split ( '.' )
model = models . get_model ( app_label , model_name )
try :
profile = new_user . get_profile ( )
except model . DoesNotExist :
profile = model ( user = new_user )
profile . save ( )
# Custom send activation email
self . send_activation_email ( new_user , registration_profile , password , site )
# Send user _ registered signal
signals . user_registered . send ( sender = self . __class__ , user = new_user , request = request )
return new_user
|
def _evaluate ( self , R , z , phi = 0. , t = 0. ) :
"""NAME :
_ evaluate
PURPOSE :
evaluate the potential at R , z , phi
INPUT :
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
Phi ( R , z , phi )
HISTORY :
2011-04-10 - Started - Bovy ( NYU )
2018-10-18 - Updated for general object potential - James Lane ( UofT )"""
|
# Cylindrical distance
Rdist = _cylR ( R , phi , self . _orb . R ( t ) , self . _orb . phi ( t ) )
# Evaluate potential
return evaluatePotentials ( self . _pot , Rdist , self . _orb . z ( t ) - z , use_physical = False )
|
def atlas_inventory_count_missing ( inv1 , inv2 ) :
"""Find out how many bits are set in inv2
that are not set in inv1."""
|
count = 0
common = min ( len ( inv1 ) , len ( inv2 ) )
for i in xrange ( 0 , common ) :
for j in xrange ( 0 , 8 ) :
if ( ( 1 << ( 7 - j ) ) & ord ( inv2 [ i ] ) ) != 0 and ( ( 1 << ( 7 - j ) ) & ord ( inv1 [ i ] ) ) == 0 :
count += 1
if len ( inv1 ) < len ( inv2 ) :
for i in xrange ( len ( inv1 ) , len ( inv2 ) ) :
for j in xrange ( 0 , 8 ) :
if ( ( 1 << ( 7 - j ) ) & ord ( inv2 [ i ] ) ) != 0 :
count += 1
return count
|
def _ParseDocstring ( function ) :
"""Parses the functions docstring into a dictionary of type checks ."""
|
if not function . __doc__ :
return { }
type_check_dict = { }
for match in param_regexp . finditer ( function . __doc__ ) :
param_str = match . group ( 1 ) . strip ( )
param_splitted = param_str . split ( " " )
if len ( param_splitted ) >= 2 :
type_str = " " . join ( param_splitted [ : - 1 ] )
name = param_splitted [ - 1 ]
type_check_dict [ name ] = type_str
for match in returns_regexp . finditer ( function . __doc__ ) :
type_check_dict [ "returns" ] = match . group ( 1 )
for match in type_regexp . finditer ( function . __doc__ ) :
name = match . group ( 1 )
type_str = match . group ( 2 )
type_check_dict [ name ] = type_str
for match in rtype_regexp . finditer ( function . __doc__ ) :
type_check_dict [ "returns" ] = match . group ( 1 )
return type_check_dict
|
async def send ( self , metric ) :
"""Transform metric to JSON bytestring and send to server .
Args :
metric ( dict ) : Complete metric to send as JSON ."""
|
message = json . dumps ( metric ) . encode ( 'utf-8' )
await self . loop . create_datagram_endpoint ( lambda : UDPClientProtocol ( message ) , remote_addr = ( self . ip , self . port ) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.