signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def infer_dtype_from_scalar ( val , pandas_dtype = False ) :
"""interpret the dtype from a scalar
Parameters
pandas _ dtype : bool , default False
whether to infer dtype including pandas extension types .
If False , scalar belongs to pandas extension types is inferred as
object""" | dtype = np . object_
# a 1 - element ndarray
if isinstance ( val , np . ndarray ) :
msg = "invalid ndarray passed to infer_dtype_from_scalar"
if val . ndim != 0 :
raise ValueError ( msg )
dtype = val . dtype
val = val . item ( )
elif isinstance ( val , str ) : # If we create an empty array using a string to infer
# the dtype , NumPy will only allocate one character per entry
# so this is kind of bad . Alternately we could use np . repeat
# instead of np . empty ( but then you still don ' t want things
# coming out as np . str _ !
dtype = np . object_
elif isinstance ( val , ( np . datetime64 , datetime ) ) :
val = tslibs . Timestamp ( val )
if val is tslibs . NaT or val . tz is None :
dtype = np . dtype ( 'M8[ns]' )
else :
if pandas_dtype :
dtype = DatetimeTZDtype ( unit = 'ns' , tz = val . tz )
else : # return datetimetz as object
return np . object_ , val
val = val . value
elif isinstance ( val , ( np . timedelta64 , timedelta ) ) :
val = tslibs . Timedelta ( val ) . value
dtype = np . dtype ( 'm8[ns]' )
elif is_bool ( val ) :
dtype = np . bool_
elif is_integer ( val ) :
if isinstance ( val , np . integer ) :
dtype = type ( val )
else :
dtype = np . int64
elif is_float ( val ) :
if isinstance ( val , np . floating ) :
dtype = type ( val )
else :
dtype = np . float64
elif is_complex ( val ) :
dtype = np . complex_
elif pandas_dtype :
if lib . is_period ( val ) :
dtype = PeriodDtype ( freq = val . freq )
val = val . ordinal
return dtype , val |
def calculateDelay ( self , at , delay ) :
"""Creates the delay from now til the specified start time , uses " at " if available .
: param at : the start time in % a % b % d % H : % M : % S % Y format .
: param delay : the delay from now til start .
: return : the delay .""" | if at is not None :
return max ( ( datetime . strptime ( at , DATETIME_FORMAT ) - datetime . utcnow ( ) ) . total_seconds ( ) , 0 )
elif delay is not None :
return delay
else :
return 0 |
def mode_to_str ( mode ) :
"""Converts a tf . estimator . ModeKeys in a nice readable string .
: param mode : The mdoe as a tf . estimator . ModeKeys
: return : A human readable string representing the mode .""" | if mode == tf . estimator . ModeKeys . TRAIN :
return "train"
if mode == tf . estimator . ModeKeys . EVAL :
return "eval"
if mode == tf . estimator . ModeKeys . PREDICT :
return "predict"
return "unknown" |
def _get_nets_other ( self , * args , ** kwargs ) :
"""Deprecated . This will be removed in a future release .""" | from warnings import warn
warn ( 'Whois._get_nets_other() has been deprecated and will be ' 'removed. You should now use Whois.get_nets_other().' )
return self . get_nets_other ( * args , ** kwargs ) |
def compute_position ( self , layout ) :
"""Compute the position of each geometric object
in concert with the other objects in the panel""" | params = self . position . setup_params ( self . data )
data = self . position . setup_data ( self . data , params )
data = self . position . compute_layer ( data , params , layout )
self . data = data |
def add_status_message ( self , message , severity = "info" ) :
"""Set a portal message""" | self . context . plone_utils . addPortalMessage ( message , severity ) |
def apply ( key , value ) :
'''Set a single key
. . note : :
This will strip comments from your config file''' | path = __opts__ [ 'conf_file' ]
if os . path . isdir ( path ) :
path = os . path . join ( path , 'master' )
data = values ( )
data [ key ] = value
with salt . utils . files . fopen ( path , 'w+' ) as fp_ :
salt . utils . yaml . safe_dump ( data , default_flow_style = False ) |
def sync_headers ( cloud_obj , headers = None , header_patterns = HEADER_PATTERNS ) :
"""Overwrites the given cloud _ obj ' s headers with the ones given as ` ` headers `
and adds additional headers as defined in the HEADERS setting depending on
the cloud _ obj ' s file name .""" | if headers is None :
headers = { }
# don ' t set headers on directories
content_type = getattr ( cloud_obj , "content_type" , None )
if content_type == "application/directory" :
return
matched_headers = { }
for pattern , pattern_headers in header_patterns :
if pattern . match ( cloud_obj . name ) :
matched_headers . update ( pattern_headers . copy ( ) )
# preserve headers already set
matched_headers . update ( cloud_obj . headers )
# explicitly set headers overwrite matches and already set headers
matched_headers . update ( headers )
if matched_headers != cloud_obj . headers :
cloud_obj . headers = matched_headers
cloud_obj . sync_metadata ( ) |
def multi ( self , lvl_list , msg , * args , ** kwargs ) :
"""Log a message at multiple levels""" | for level in lvl_list :
self . log ( level , msg , args , ** kwargs ) |
def delete_security_group ( self , sec_grp ) :
'''Deletes the specified security group''' | sec_grp_id = self . _find_security_group_id ( sec_grp )
ret = self . network_conn . delete_security_group ( sec_grp_id )
return ret if ret else True |
def _sortByCreated ( a , b ) :
"""Sort function for object by created date""" | if a . created < b . created :
return 1
elif a . created > b . created :
return - 1
else :
return 0 |
def process_link ( self , env , refnode , has_explicit_title , title , target ) :
"""Called after parsing title and target text , and creating the reference
node . Alter the reference node and return it with chapel module and
class information , if relevant .""" | refnode [ 'chpl:module' ] = env . temp_data . get ( 'chpl:module' )
refnode [ 'chpl:class' ] = env . temp_data . get ( 'chpl:class' )
if not has_explicit_title : # Only has a meaning for the target .
title = title . lstrip ( '.' )
# Only has a meaning for the title .
target = target . lstrip ( '~' )
if title [ 0 : 1 ] == '~' :
title = title [ 1 : ]
dot = title . rfind ( '.' )
if dot != - 1 :
title = title [ dot + 1 : ]
# IF the first character is a dot , search more specific names
# first . Else , search builtins first .
if target [ 0 : 1 ] == '.' :
target = target [ 1 : ]
refnode [ 'refspecific' ] = True
return title , target |
def get_lldp_tlv ( self , port_name , is_ncb = True , is_nb = False ) :
"""Function to Query LLDP TLV on the interface .""" | reply = None
if is_ncb :
reply = self . run_lldptool ( [ "get-tlv" , "-n" , "-i" , port_name , "-g" , "ncb" ] )
elif is_nb :
reply = self . run_lldptool ( [ "get-tlv" , "-n" , "-i" , port_name , "-g" , "nb" ] )
else :
LOG . error ( "Both NCB and NB are not selected to " "query LLDP" )
return reply |
def qos_map_cos_mutation_cos0 ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
qos = ET . SubElement ( config , "qos" , xmlns = "urn:brocade.com:mgmt:brocade-qos" )
map = ET . SubElement ( qos , "map" )
cos_mutation = ET . SubElement ( map , "cos-mutation" )
name_key = ET . SubElement ( cos_mutation , "name" )
name_key . text = kwargs . pop ( 'name' )
cos0 = ET . SubElement ( cos_mutation , "cos0" )
cos0 . text = kwargs . pop ( 'cos0' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def json ( body , charset = 'utf-8' , ** kwargs ) :
"""Takes JSON formatted data , converting it into native Python objects""" | return json_converter . loads ( text ( body , charset = charset ) ) |
def bulk_invoke ( func , args , nargs ) :
"""Bulk invoke a function via queues
Uses internal implementation details of rq .""" | # for comparison , simplest thing that works
# for i in nargs :
# argv = list ( args )
# argv . append ( i )
# func . delay ( * argv )
# some variances between cpy and pypy , sniff detect
for closure in func . delay . func_closure :
if getattr ( closure . cell_contents , 'queue' , None ) :
ctx = closure . cell_contents
break
q = Queue ( ctx . queue , connection = connection )
argv = list ( args )
argv . append ( None )
job = Job . create ( func , args = argv , connection = connection , description = "bucket-%s" % func . func_name , origin = q . name , status = JobStatus . QUEUED , timeout = ctx . timeout , result_ttl = 0 , ttl = ctx . ttl )
for n in chunks ( nargs , 100 ) :
job . created_at = datetime . utcnow ( )
with connection . pipeline ( ) as pipe :
for s in n :
argv [ - 1 ] = s
job . _id = unicode ( uuid4 ( ) )
job . args = argv
q . enqueue_job ( job , pipeline = pipe )
pipe . execute ( ) |
def raw_body ( self ) -> bytes :
"""Encoded Body""" | if self . _raw_body is None and self . original_body is not None :
if isinstance ( self . original_body , dict ) :
self . _raw_body = self . parser . encode ( self . original_body )
if isinstance ( self . _raw_body , str ) :
self . _raw_body = self . _raw_body . encode ( )
elif isinstance ( self . original_body , str ) :
self . _raw_body = self . original_body . encode ( )
elif isinstance ( self . original_body , bytes ) :
self . _raw_body = self . original_body
else :
self . _raw_body = self . parser . encode ( self . original_body )
if isinstance ( self . _raw_body , str ) :
self . _raw_body = self . _raw_body . encode ( )
return self . _raw_body |
def gp_ptspec ( ) :
"""example for a 2D - panel plot etc .""" | fenergies = [ '19' , '27' , '39' , '62' , ]
# '200 ' ]
nen = len ( fenergies )
mee_keys = [ 'pi0' , 'LMR' , 'omega' , 'phi' , 'IMR' , 'jpsi' ]
# mee _ keys = [ ' LMR ' , ]
mee_dict = OrderedDict ( ( k , '' ) for k in mee_keys )
yscale = { '200' : '300' , '62' : '5000' , '39' : '50' , '27' : '0.3' , '19' : '0.001' }
inDir , outDir = getWorkDirs ( )
data , data_avpt , dpt_dict = { } , { } , { }
yvals , yvalsPt = [ ] , [ ]
scale = { '19' : 1.3410566491548412 , '200' : 1.0 , '39' : 1.2719203877292842 , '27' : 1.350873678084769 , '62' : 1.2664666321635087 }
lmr_label = None
for filename in os . listdir ( inDir ) : # import data
file_url = os . path . join ( inDir , filename )
filebase = os . path . splitext ( filename ) [ 0 ]
# unique
energy , mee_name , mee_range , data_type = splitFileName ( filebase )
if mee_name == 'LMR' :
mee_range_split = map ( float , mee_range . split ( '-' ) )
lmr_label = 'LMR: %g < M_{ee} < %g GeV/c^{2}' % ( mee_range_split [ 0 ] , mee_range_split [ 1 ] )
if energy == '200' :
continue
if mee_name not in mee_keys :
continue
mee_dict [ mee_name ] = mee_range
data [ filebase ] = np . loadtxt ( open ( file_url , 'rb' ) )
if data_type == 'data' : # print data [ filebase ]
data [ filebase ] = data [ filebase ] [ : - 1 ]
# skip mT < 0.4 point
if energy == '200' :
data [ filebase ] [ : , ( 1 , 3 , 4 ) ] /= 0.5
# calculate average pT first
mask = ( data [ filebase ] [ : , 0 ] > 0.4 ) & ( data [ filebase ] [ : , 0 ] < 2.2 )
avpt_data = data [ filebase ] [ mask ]
pTs = avpt_data [ : , 0 ]
wghts = avpt_data [ : , 1 ]
probs = unp . uarray ( avpt_data [ : , 1 ] , avpt_data [ : , 3 ] )
# dN / pT
probs /= umath . fsum ( probs )
# probabilities
avpt = umath . fsum ( pTs * probs )
logging . info ( ( '%s: {} %g' % ( filebase , np . average ( pTs , weights = wghts ) ) ) . format ( avpt ) )
# TODO : syst . uncertainties
# save datapoint for average pT and append to yvalsPt for yaxis range
dp = [ float ( getEnergy4Key ( energy ) ) , avpt . nominal_value , 0. , avpt . std_dev , 0. ]
avpt_key = mee_name
if data_type == 'cocktail' :
avpt_key += '_c'
if data_type == 'medium' :
avpt_key += '_m'
if data_type == 'mediumMedOnly' :
avpt_key += '_mMed'
if data_type == 'mediumQgpOnly' :
avpt_key += '_mQgp'
if avpt_key in data_avpt :
data_avpt [ avpt_key ] . append ( dp )
else :
data_avpt [ avpt_key ] = [ dp ]
yvalsPt . append ( avpt . nominal_value )
# now adjust data for panel plot and append to yvals
if data_type != 'data' :
data [ filebase ] [ : , ( 1 , 3 , 4 ) ] /= scale [ energy ]
data [ filebase ] [ : , ( 1 , 3 , 4 ) ] *= float ( yscale [ energy ] )
if data_type == 'cocktail' or fnmatch ( data_type , '*medium*' ) :
data [ filebase ] [ : , 2 : ] = 0.
yvals += [ v for v in data [ filebase ] [ : , 1 ] if v > 0 ]
# prepare dict for panel plot
dpt_dict_key = getSubplotTitle ( mee_name , mee_range )
if dpt_dict_key not in dpt_dict :
ndsets = nen * 2
# TODO : currently only 19/39/62 medium avail . w / med / qgp / tot for each
# July14 : all energies available ; TODO : fix dsidx
if mee_name == 'LMR' :
ndsets += 4 * 3
dpt_dict [ dpt_dict_key ] = [ [ None ] * ndsets , [ None ] * ndsets , [ None ] * ndsets ]
enidx = fenergies . index ( energy )
dsidx = enidx
if fnmatch ( data_type , '*medium*' ) : # 19 : 0-2 , 27 : 3-5 , 39 : 6-8 , 62 : 9-11
dsidx = ( energy == '19' ) * 0 + ( energy == '27' ) * 3 + ( energy == '39' ) * 6 + ( energy == '62' ) * 9
dsidx += ( data_type == 'mediumQgpOnly' ) * 0 + ( data_type == 'mediumMedOnly' ) * 1
dsidx += ( data_type == 'medium' ) * 2
else :
dsidx += int ( mee_name == 'LMR' ) * 4 * 3
# number of medium calc avail .
dsidx += int ( data_type == 'data' ) * len ( fenergies )
dpt_dict [ dpt_dict_key ] [ 0 ] [ dsidx ] = data [ filebase ]
# data
if data_type == 'data' : # properties
dpt_dict [ dpt_dict_key ] [ 1 ] [ dsidx ] = 'lt 1 lw 4 ps 1.5 lc %s pt 18' % default_colors [ enidx ]
elif data_type == 'medium' :
dpt_dict [ dpt_dict_key ] [ 1 ] [ dsidx ] = 'with lines lt 1 lw 5 lc %s' % default_colors [ enidx ]
else :
dpt_dict [ dpt_dict_key ] [ 1 ] [ dsidx ] = 'with lines lt %d lw 5 lc %s' % ( 2 + ( data_type == 'mediumMedOnly' ) + ( data_type == 'mediumQgpOnly' ) * 2 , default_colors [ enidx ] )
dpt_dict [ dpt_dict_key ] [ 2 ] [ dsidx ] = ' ' . join ( [ # legend titles
getEnergy4Key ( energy ) , 'GeV' , '{/Symbol \264} %g' % ( Decimal ( yscale [ energy ] ) # . as _ tuple ( ) . exponent
) ] ) if data_type == 'data' else ''
# use mass range in dict key to sort dpt _ dict with increasing mass
plot_key_order = dpt_dict . keys ( )
plot_key_order . sort ( key = lambda x : float ( x . split ( ':' ) [ 1 ] . split ( '-' ) [ 0 ] ) )
# sort data _ avpt by energy and apply x - shift for better visibility
for k in data_avpt :
data_avpt [ k ] . sort ( key = lambda x : x [ 0 ] )
energies = [ dp [ 0 ] for dp in data_avpt [ mee_keys [ 0 ] ] ]
energies . append ( 215. )
# TODO : think of better upper limit
linsp = { }
for start , stop in zip ( energies [ : - 1 ] , energies [ 1 : ] ) :
linsp [ start ] = np . linspace ( start , stop , num = 4 * len ( mee_keys ) )
for k in data_avpt :
key = k . split ( '_' ) [ 0 ]
for i in xrange ( len ( data_avpt [ k ] ) ) :
data_avpt [ k ] [ i ] [ 0 ] = linsp [ energies [ i ] ] [ mee_keys . index ( key ) ]
# make panel plot
yMin , yMax = 0.5 * min ( yvals ) , 3 * max ( yvals )
make_panel ( dpt_dict = OrderedDict ( ( k , dpt_dict [ k ] ) for k in plot_key_order ) , name = os . path . join ( outDir , 'ptspec' ) , ylabel = '1/N@_{mb}^{evt} d^{2}N@_{ee}^{acc.}/dp_{T}dM_{ee} (c^3/GeV^2)' , xlabel = 'dielectron transverse momentum, p_{T} (GeV/c)' , ylog = True , xr = [ 0 , 2.2 ] , yr = [ 1e-9 , 1e4 ] , # lmargin = 0.12 , bmargin = 0.10 , tmargin = 1 . , rmargin = 1 . ,
key = [ 'bottom left' , 'samplen 0.5' , 'width -2' , 'opaque' ] , arrow_bar = 0.002 , layout = '3x2' , size = '8in,8in' )
# make plot for LMR spectra only
# lmr _ key = getSubplotTitle ( ' LMR ' , ' 0.4-0.76 ' )
# if energy = = ' 200 ' :
# lmr _ key = getSubplotTitle ( ' LMR ' , ' 0.3-0.76 ' )
# pseudo _ point = np . array ( [ [ - 1,0,0,0,0 ] ] )
# model _ titles = [ ' Cocktail + Model ' , ' Cocktail ' , ' in - Medium ' , ' QGP ' ]
# model _ props = [
# ' with lines lc % s lw 5 lt % d ' % ( default _ colors [ - 2 ] , i + 1)
# for i in xrange ( len ( model _ titles ) )
# make _ plot (
# data = dpt _ dict [ lmr _ key ] [ 0 ] + [ pseudo _ point ] * len ( model _ titles ) ,
# properties = dpt _ dict [ lmr _ key ] [ 1 ] + model _ props ,
# titles = dpt _ dict [ lmr _ key ] [ 2 ] + model _ titles ,
# name = os . path . join ( outDir , ' ptspecLMR ' ) ,
# ylabel = ' 1 / N @ _ { mb } ^ { evt } d ^ { 2 } N @ _ { ee } ^ { acc . } / dp _ { T } dM _ { ee } ( c ^ 3 / GeV ^ 2 ) ' ,
# xlabel = ' dielectron transverse momentum , p _ { T } ( GeV / c ) ' ,
# ylog = True , xr = [ 0 , 2.0 ] , yr = [ 1e - 8 , 100 ] ,
# lmargin = 0.15 , bmargin = 0.08 , rmargin = 0.98 , tmargin = 0.84,
# key = [ ' maxrows 4 ' , ' samplen 0.7 ' , ' width - 2 ' , ' at graph 1 . , 1.2 ' ] ,
# arrow _ bar = 0.005 , size = ' 10in , 13in ' ,
# labels = {
# ' stat . errors only ' : [ 0.7,0.95 , False ] , lmr _ label : [ 0.05,0.03 , False ] ,
# ' STAR Preliminary ' : [ 0.05,0.07 , False ] ,
# make mean pt plot
# yMinPt , yMaxPt = 0.95 * min ( yvalsPt ) , 1.05 * max ( yvalsPt )
# make _ plot (
# data = [ # cocktail
# np . array ( data _ avpt [ k + ' _ c ' ] ) for k in mee _ keys
# ] + [ # medium
# np . array ( data _ avpt [ ' LMR _ m ' ] )
# ] + [ # data
# np . array ( data _ avpt [ k ] ) for k in mee _ keys
# properties = [
# ' with lines lt 1 lw 4 lc % s ' % default _ colors [ i if i < 5 else i + 1]
# for i in xrange ( len ( mee _ keys ) )
# ' with lines lt 2 lw 4 lc % s ' % default _ colors [ mee _ keys . index ( ' LMR ' ) ]
# ' lt 1 lw 4 ps 1.5 lc % s pt 18 ' % default _ colors [ i if i < 5 else i + 1]
# for i in xrange ( len ( mee _ keys ) )
# titles = [ getMeeLabel ( k ) for k in mee _ keys ] + [ ' ' ] * ( len ( mee _ keys ) + 1 ) ,
# name = os . path . join ( outDir , ' meanPt ' ) ,
# xlabel = ' { / Symbol \ 326 } s _ { NN } ( GeV ) ' ,
# ylabel = ' { / Symbol \ 341 } p _ { T } { / Symbol \ 361 } in STAR Acceptance ( GeV / c ) ' ,
# xlog = True , xr = [ 17,220 ] , yr = [ yMinPt , yMaxPt ] , size = ' 11in , 9in ' ,
# key = [ ' maxrows 1 ' , ' at graph 1 , 1.1 ' ] ,
# lmargin = 0.11 , bmargin = 0.11 , tmargin = 1 . , rmargin = 1 . ,
# gpcalls = [
# ' format x " % g " ' ,
# ' xtics ( 20 , " " 30 , 40 , " " 50 , 60 , " " 70 , " " 80 , " " 90 , 100 , 200 ) ' ,
# # make mean pt plot for LMR only
# make _ plot (
# data = [
# np . array ( data _ avpt [ ' LMR _ c ' ] ) ,
# np . array ( data _ avpt [ ' LMR _ m ' ] ) ,
# np . array ( data _ avpt [ ' LMR ' ] )
# properties = [
# ' with lines lt 2 lw 4 lc % s ' % default _ colors [ 0 ] ,
# ' with lines lt 1 lw 4 lc % s ' % default _ colors [ 0 ] ,
# ' lt 1 lw 4 ps 1.5 lc % s pt 18 ' % default _ colors [ 0]
# titles = [
# ' cocktail ' , ' HMBT ' , getMeeLabel ( ' data ' )
# name = os . path . join ( outDir , ' meanPtLMR ' ) ,
# xlabel = ' { / Symbol \ 326 } s _ { NN } ( GeV ) ' ,
# ylabel = ' LMR { / Symbol \ 341 } p _ { T } { / Symbol \ 361 } in STAR Acceptance ( GeV / c ) ' ,
# lmargin = 0.17 , bmargin = 0.15 , tmargin = 0.95 , xlog = True , xr = [ 17,80 ] ,
# yr = [ 0.65,1.05 ] , # yr = [ yMinPt , yMaxPt ] ,
# key = [ ' bottom right ' ] ,
# gpcalls = [
# ' format x " % g " ' ,
# ' xtics ( 20 , 30 , 40 , " " 50 , 60 , " " 70 , " " 80 , " " 90 , 100 , 200 ) ' ,
# labels = {
# ' stat . errors only ' : [ 0.7,0.95 , False ] , lmr _ label : [ 0.05,0.07 , False ] ,
# '0.4 < p _ { T } < 2.2 GeV / c ' : [ 0.05,0.14 , False ]
return 'done' |
def union ( self ) :
"""Return the union of all flags in this dict
Returns
union : ` DataQualityFlag `
a new ` DataQualityFlag ` who ' s active and known segments
are the union of those of the values of this dict""" | usegs = reduce ( operator . or_ , self . values ( ) )
usegs . name = ' | ' . join ( self . keys ( ) )
return usegs |
def _do_prioritize ( items ) :
"""Determine if we should perform prioritization .
Currently done on tumor - only input samples and feeding into PureCN
which needs the germline annotations .""" | if not any ( "tumoronly-prioritization" in dd . get_tools_off ( d ) for d in items ) :
if vcfutils . get_paired_phenotype ( items [ 0 ] ) :
has_tumor = False
has_normal = False
for sub_data in items :
if vcfutils . get_paired_phenotype ( sub_data ) == "tumor" :
has_tumor = True
elif vcfutils . get_paired_phenotype ( sub_data ) == "normal" :
has_normal = True
return has_tumor and not has_normal |
def fetch ( self , subscription_id , data = { } , ** kwargs ) :
"""Fetch Subscription for given Id
Args :
subscription _ id : Id for which subscription object is retrieved
Returns :
Subscription dict for given subscription Id""" | return super ( Subscription , self ) . fetch ( subscription_id , data , ** kwargs ) |
def dskgd ( handle , dladsc ) :
"""Return the DSK descriptor from a DSK segment identified
by a DAS handle and DLA descriptor .
https : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / dskgd _ c . html
: param handle : Handle assigned to the opened DSK file .
: type handle : int
: param dladsc : DLA segment descriptor .
: type dladsc : spiceypy . utils . support _ types . SpiceDLADescr
: return : DSK segment descriptor .
: rtype : stypes . SpiceDSKDescr""" | handle = ctypes . c_int ( handle )
dskdsc = stypes . SpiceDSKDescr ( )
libspice . dskgd_c ( handle , ctypes . byref ( dladsc ) , ctypes . byref ( dskdsc ) )
return dskdsc |
def absnormpath ( self , path ) :
"""Absolutize and minimalize the given path .
Forces all relative paths to be absolute , and normalizes the path to
eliminate dot and empty components .
Args :
path : Path to normalize .
Returns :
The normalized path relative to the current working directory ,
or the root directory if path is empty .""" | path = self . normcase ( path )
cwd = self . _matching_string ( path , self . cwd )
if not path :
path = self . path_separator
elif not self . _starts_with_root_path ( path ) : # Prefix relative paths with cwd , if cwd is not root .
root_name = self . _matching_string ( path , self . root . name )
empty = self . _matching_string ( path , '' )
path = self . _path_separator ( path ) . join ( ( cwd != root_name and cwd or empty , path ) )
if path == self . _matching_string ( path , '.' ) :
path = cwd
return self . normpath ( path ) |
def energy_entropy ( data , fs , numOfShortBlocks = 10 ) :
"""Computes entropy of energy""" | data = np . mean ( data , axis = 1 )
Eol = np . sum ( data ** 2 )
# total data energy
L = len ( data )
subWinLength = int ( np . floor ( L / numOfShortBlocks ) )
if L != subWinLength * numOfShortBlocks :
data = data [ 0 : subWinLength * numOfShortBlocks ]
# subWindows is of size [ numOfShortBlocks x L ]
subWindows = data . reshape ( subWinLength , numOfShortBlocks , order = 'F' ) . copy ( )
# Compute normalized sub - data energies :
s = np . sum ( subWindows ** 2 , axis = 0 ) / ( Eol + EPSILON )
# Compute entropy of the normalized sub - data energies :
Entropy = - np . sum ( s * np . log2 ( s + EPSILON ) )
return Entropy |
def to_rgba ( val ) :
'''to _ rgba ( val ) is identical to matplotlib . colors . to _ rgba ( val ) except that it operates over lists
as well as individual elements to yield matrices of rgba values . In addition , it always yields
numpy vectors or matrices .''' | if pimms . is_npmatrix ( val ) and val . shape [ 1 ] == 4 :
return val
try :
return np . asarray ( matplotlib . colors . to_rgba ( val ) )
except Exception :
return np . asarray ( [ matplotlib . colors . to_rgba ( u ) for u in val ] ) |
def get_header_as_text ( file_content , reference_id ) :
"""Returns the cable ' s header as text .
` file _ content `
The HTML file content , c . f . ` get _ file _ content ` .""" | res = _CONTENT_PATTERN . findall ( file_content )
if len ( res ) == 2 :
content = res [ 0 ]
elif len ( res ) == 1 :
return ''
else :
raise ValueError ( 'Unexpected <code><pre> sections: "%r"' % res )
return _clean_html ( content ) |
def before_insert ( mapper , conn , target ) :
"""event . listen method for Sqlalchemy to set the seqience _ id for this
object and create an ObjectNumber value for the id _""" | # from identity import ObjectNumber
# assert not target . fk _ vid or not ObjectNumber . parse ( target . fk _ vid ) . revision
if target . sequence_id is None :
from ambry . orm . exc import DatabaseError
raise DatabaseError ( 'Must have sequence_id before insertion' )
# Check that the id column is always sequence id 1
assert ( target . name == 'id' ) == ( target . sequence_id == 1 ) , ( target . name , target . sequence_id )
Column . before_update ( mapper , conn , target ) |
def render_formset ( formset , ** kwargs ) :
"""Render a formset to a Bootstrap layout""" | renderer_cls = get_formset_renderer ( ** kwargs )
return renderer_cls ( formset , ** kwargs ) . render ( ) |
def get_plaintext_citations ( arxiv_id ) :
"""Get the citations of a given preprint , in plain text .
. . note : :
Bulk download of sources from arXiv is not permitted by their API . You should have a look at http : / / arxiv . org / help / bulk _ data _ s3.
: param arxiv _ id : The arXiv id ( e . g . ` ` 1401.2910 ` ` or ` ` 1401.2910v1 ` ` ) in a canonical form .
: returns : A list of cleaned plaintext citations .""" | plaintext_citations = [ ]
# Get the list of bbl files for this preprint
bbl_files = arxiv . get_bbl ( arxiv_id )
for bbl_file in bbl_files : # Fetch the cited DOIs for each of the bbl files
plaintext_citations . extend ( bbl . get_plaintext_citations ( bbl_file ) )
return plaintext_citations |
def lazy_callable ( modname , * names , ** kwargs ) :
"""Performs lazy importing of one or more callables .
: func : ` lazy _ callable ` creates functions that are thin wrappers that pass
any and all arguments straight to the target module ' s callables . These can
be functions or classes . The full loading of that module is only actually
triggered when the returned lazy function itself is called . This lazy
import of the target module uses the same mechanism as
: func : ` lazy _ module ` .
If , however , the target module has already been fully imported prior
to invocation of : func : ` lazy _ callable ` , then the target callables
themselves are returned and no lazy imports are made .
: func : ` lazy _ function ` and : func : ` lazy _ function ` are aliases of
: func : ` lazy _ callable ` .
Parameters
modname : str
The base module from where to import the callable ( s ) in * names * ,
or a full ' module _ name . callable _ name ' string .
names : str ( optional )
The callable name ( s ) to import from the module specified by * modname * .
If left empty , * modname * is assumed to also include the callable name
to import .
error _ strings : dict , optional
A dictionary of strings to use when reporting loading errors ( either a
missing module , or a missing callable name in the loaded module ) .
* error _ string * follows the same usage as described under
: func : ` lazy _ module ` , with the exceptions that 1 ) a further key ,
' msg _ callable ' , can be supplied to be used as the error when a module
is successfully loaded but the target callable can ' t be found therein
( defaulting to : attr : ` lazy _ import . _ MSG _ CALLABLE ` ) ; 2 ) a key ' callable '
is always added with the callable name being loaded .
lazy _ mod _ class : type , optional
See definition under : func : ` lazy _ module ` .
lazy _ call _ class : type , optional
Analogously to * lazy _ mod _ class * , allows setting a custom class to
handle lazy callables , other than the default : class : ` LazyCallable ` .
Returns
wrapper function or tuple of wrapper functions
If * names * is passed , returns a tuple of wrapper functions , one for
each element in * names * .
If only * modname * is passed it is assumed to be a full
' module _ name . callable _ name ' string , in which case the wrapper for the
imported callable is returned directly , and not in a tuple .
Notes
Unlike : func : ` lazy _ module ` , which returns a lazy module that eventually
mutates into the fully - functional version , : func : ` lazy _ callable ` only
returns thin wrappers that never change . This means that the returned
wrapper object never truly becomes the one under the module ' s namespace ,
even after successful loading of the module in * modname * . This is fine for
most practical use cases , but may break code that relies on the usage of
the returned objects oter than calling them . One such example is the lazy
import of a class : it ' s fine to use the returned wrapper to instantiate an
object , but it can ' t be used , for instance , to subclass from .
Examples
> > > import lazy _ import , sys
> > > fn = lazy _ import . lazy _ callable ( " numpy . arange " )
> > > sys . modules [ ' numpy ' ]
Lazily - loaded module numpy
> > > fn ( 10)
array ( [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ] )
> > > sys . modules [ ' numpy ' ]
< module ' numpy ' from ' / usr / local / lib / python3.5 / site - packages / numpy / _ _ init _ _ . py ' >
> > > import lazy _ import , sys
> > > cl = lazy _ import . lazy _ callable ( " numpy . ndarray " ) # a class
> > > obj = cl ( [ 1 , 2 ] ) # This works OK ( and also triggers the loading of numpy )
> > > class MySubclass ( cl ) : # This fails because cls is just a wrapper ,
> > > pass # not an actual class .
See Also
: func : ` lazy _ module `
: class : ` LazyCallable `
: class : ` LazyModule `""" | if not names :
modname , _ , name = modname . rpartition ( "." )
lazy_mod_class = _setdef ( kwargs , 'lazy_mod_class' , LazyModule )
lazy_call_class = _setdef ( kwargs , 'lazy_call_class' , LazyCallable )
error_strings = _setdef ( kwargs , 'error_strings' , { } )
_set_default_errornames ( modname , error_strings , call = True )
if not names : # We allow passing a single string as ' modname . callable _ name ' ,
# in which case the wrapper is returned directly and not as a list .
return _lazy_callable ( modname , name , error_strings . copy ( ) , lazy_mod_class , lazy_call_class )
return tuple ( _lazy_callable ( modname , cname , error_strings . copy ( ) , lazy_mod_class , lazy_call_class ) for cname in names ) |
def _coerce_to_ndarray ( self ) :
"""coerce to an ndarary of object dtype""" | # TODO ( jreback ) make this better
data = self . _data . astype ( object )
data [ self . _mask ] = self . _na_value
return data |
def checkInfo ( email = None , username = None , api_key = None ) :
'''Method that checks if the given hash is stored in the pipl . com website .
: param email : queries to be launched .
: param api _ key : api _ key to be used in pipl . com . If not provided , the API key will be searched in the config _ api _ keys . py file .
: return : Python structure for the Json received . It has the following structure :''' | # This is for i3visio
if api_key == None : # api _ key = raw _ input ( " Insert the API KEY here : \ t " )
allKeys = config_api_keys . returnListOfAPIKeys ( )
try :
api_key = allKeys [ "pipl_com" ]
except : # API _ Key not found . The samplekey will be used but it has a limit of 10 queries / day .
api_key = "samplekey"
results = { }
results [ "person" ] = [ ]
results [ "records" ] = [ ]
if username != None :
request = SearchAPIRequest ( username = username , api_key = api_key )
person , records = launchRequest ( request )
# Appending the results
results [ "person" ] . append ( person )
results [ "records" ] . append ( records )
if email != None :
request = SearchAPIRequest ( email = email , api_key = api_key )
person , records = launchRequest ( request )
# Appending the results
results [ "person" ] . append ( person )
results [ "records" ] . append ( records )
return results |
def hexdigest ( self ) :
"""Return the digest value as a string of hexadecimal digits .""" | if self . _pre_computed_hash is None :
return libssdeep_wrapper . fuzzy_digest ( self . _state , 0 )
else :
return self . _pre_computed_hash |
def _unwindGenerator ( self , generator , _prev = None ) :
"""Unwind ( resume ) generator .""" | while True :
if _prev :
ret , _prev = _prev , None
else :
try :
ret = next ( generator )
except StopIteration :
break
if isinstance ( ret , Request ) :
if ret . callback :
warnings . warn ( "Got a request with callback set, bypassing " "the generator wrapper. Generator may not " "be able to resume. %s" % ret )
elif ret . errback : # By Scrapy defaults , a request without callback defaults to
# self . parse spider method .
warnings . warn ( "Got a request with errback set, bypassing " "the generator wrapper. Generator may not " "be able to resume. %s" % ret )
else :
yield self . _wrapRequest ( ret , generator )
return
# A request with callbacks , item or None object .
yield ret |
def notify_thread_not_alive ( self , thread_id , use_lock = True ) :
"""if thread is not alive , cancel trace _ dispatch processing""" | if self . writer is None :
return
with self . _lock_running_thread_ids if use_lock else NULL :
if not self . _enable_thread_notifications :
return
thread = self . _running_thread_ids . pop ( thread_id , None )
if thread is None :
return
was_notified = thread . additional_info . pydev_notify_kill
if not was_notified :
thread . additional_info . pydev_notify_kill = True
self . writer . add_command ( self . cmd_factory . make_thread_killed_message ( thread_id ) ) |
def _add_thousand_g ( self , variant_obj , info_dict ) :
"""Add the thousand genomes frequency
Args :
variant _ obj ( puzzle . models . Variant )
info _ dict ( dict ) : A info dictionary""" | thousand_g = info_dict . get ( '1000GAF' )
if thousand_g :
logger . debug ( "Updating thousand_g to: {0}" . format ( thousand_g ) )
variant_obj . thousand_g = float ( thousand_g )
variant_obj . add_frequency ( '1000GAF' , variant_obj . get ( 'thousand_g' ) ) |
def watch_instances ( self , flag ) :
"""Whether or not the Class Instances are being watched .""" | lib . EnvSetDefclassWatchInstances ( self . _env , int ( flag ) , self . _cls ) |
def draw_label ( self , layout_info , ax ) :
"""Draw facet label onto the axes .
This function will only draw labels if they are needed .
Parameters
layout _ info : dict - like
Layout information . Row from the ` layout ` table .
ax : axes
Axes to label""" | toprow = layout_info [ 'ROW' ] == 1
rightcol = layout_info [ 'COL' ] == self . ncol
if toprow and len ( self . cols ) :
label_info = layout_info [ list ( self . cols ) ]
label_info . _meta = { 'dimension' : 'cols' }
label_info = self . labeller ( label_info )
self . draw_strip_text ( label_info , 'top' , ax )
if rightcol and len ( self . rows ) :
label_info = layout_info [ list ( self . rows ) ]
label_info . _meta = { 'dimension' : 'rows' }
label_info = self . labeller ( label_info )
self . draw_strip_text ( label_info , 'right' , ax ) |
def get_rubric_metadata ( self ) :
"""Gets the metadata for a rubric assessment .
return : ( osid . Metadata ) - metadata for the assesment
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for osid . resource . ResourceForm . get _ group _ metadata _ template
metadata = dict ( self . _mdata [ 'rubric' ] )
metadata . update ( { 'existing_id_values' : self . _my_map [ 'rubricId' ] } )
return Metadata ( ** metadata ) |
def int_pair ( s , default = ( 0 , None ) ) :
"""Return the digits to either side of a single non - digit character as a 2 - tuple of integers
> > > int _ pair ( ' 90210-007 ' )
(90210 , 7)
> > > int _ pair ( ' 04321.0123 ' )
(4321 , 123)""" | s = re . split ( r'[^0-9]+' , str ( s ) . strip ( ) )
if len ( s ) and len ( s [ 0 ] ) :
if len ( s ) > 1 and len ( s [ 1 ] ) :
return ( int ( s [ 0 ] ) , int ( s [ 1 ] ) )
return ( int ( s [ 0 ] ) , default [ 1 ] )
return default |
def _make_futures ( futmap_keys , class_check , make_result_fn ) :
"""Create futures and a futuremap for the keys in futmap _ keys ,
and create a request - level future to be bassed to the C API .""" | futmap = { }
for key in futmap_keys :
if class_check is not None and not isinstance ( key , class_check ) :
raise ValueError ( "Expected list of {}" . format ( type ( class_check ) ) )
futmap [ key ] = concurrent . futures . Future ( )
if not futmap [ key ] . set_running_or_notify_cancel ( ) :
raise RuntimeError ( "Future was cancelled prematurely" )
# Create an internal future for the entire request ,
# this future will trigger _ make _ . . . _ result ( ) and set result / exception
# per topic , future in futmap .
f = concurrent . futures . Future ( )
f . add_done_callback ( lambda f : make_result_fn ( f , futmap ) )
if not f . set_running_or_notify_cancel ( ) :
raise RuntimeError ( "Future was cancelled prematurely" )
return f , futmap |
def get_version ( ) :
"""Get the Windows OS version running on the machine .
Params :
None
Returns :
The Windows OS version running on the machine ( comparables with the values list in the class ) .""" | # Other OS check
if not 'win' in sys . platform :
return NO_WIN
# Get infos
win_ver = sys . getwindowsversion ( )
try : # Python 3.6 . x or upper - > Use ' platform _ version ' attribute
major , minor , build = win_ver . platform_version
except AttributeError :
if sys . version_info < ( 3 , 0 ) : # Python 2.7 . x - > Use ' platform ' module to ensure the correct values ( seems that Win 10 is not correctly detected )
from platform import _get_real_winver
major , minor , build = _get_real_winver ( win_ver . major , win_ver . minor , win_ver . build )
major , minor , build = int ( major ) , int ( minor ) , int ( build )
# ' long ' to ' int '
else : # Python 3.0 . x - 3.5 . x - > Keep ' sys . getwindowsversion ( ) ' ' values
major , minor , build = win_ver . major , win_ver . minor , win_ver . build
# Check is is server or not ( it works only on Python 2.7 . x or newer )
try :
is_server = 1 if win_ver . product_type == 3 else 0
except AttributeError :
is_server = 0
# Parse Service Pack version ( or Build number )
try :
if major == 10 : # The OS is Windows 10 or Windows Server 2016,
# so the service pack version is instead the Build number
sp_ver = build
else :
sp_ver = win_ver . service_pack_major or 0
except AttributeError :
try :
sp_ver = int ( win_ver . service_pack . rsplit ( ' ' , 1 ) )
except ( IndexError , ValueError ) :
sp_ver = 0
# Return the final version data
return ( major , minor , sp_ver , is_server ) |
async def start_pairing ( self ) :
"""Start pairing procedure .""" | self . srp . initialize ( )
msg = messages . crypto_pairing ( { tlv8 . TLV_METHOD : b'\x00' , tlv8 . TLV_SEQ_NO : b'\x01' } )
resp = await self . protocol . send_and_receive ( msg , generate_identifier = False )
pairing_data = _get_pairing_data ( resp )
if tlv8 . TLV_BACK_OFF in pairing_data :
time = int . from_bytes ( pairing_data [ tlv8 . TLV_BACK_OFF ] , byteorder = 'big' )
raise Exception ( 'back off {0}s' . format ( time ) )
self . _atv_salt = pairing_data [ tlv8 . TLV_SALT ]
self . _atv_pub_key = pairing_data [ tlv8 . TLV_PUBLIC_KEY ] |
def decode_varint_1 ( buffer , pos = 0 ) :
"""Decode an integer from a varint presentation . See
https : / / developers . google . com / protocol - buffers / docs / encoding ? csw = 1 # varints
on how those can be produced .
Arguments :
buffer ( bytes - like ) : any object acceptable by ` ` memoryview ` `
pos ( int ) : optional position to read from
Returns :
( int , int ) : Decoded int value and next read position""" | value = 0
shift = 0
memview = memoryview ( buffer )
for i in range ( pos , pos + 10 ) :
try :
byte = _read_byte ( memview , i )
except IndexError :
raise ValueError ( "End of byte stream" )
if byte & 0x80 != 0 :
value |= ( byte & 0x7f ) << shift
shift += 7
else :
value |= byte << shift
break
else : # Max size of endcoded double is 10 bytes for unsigned values
raise ValueError ( "Out of double range" )
# Normalize sign
return ( value >> 1 ) ^ - ( value & 1 ) , i + 1 |
async def on_error ( self , event_method , * args , ** kwargs ) :
"""| coro |
The default error handler provided by the client .
By default this prints to : data : ` sys . stderr ` however it could be
overridden to have a different implementation .
Check : func : ` discord . on _ error ` for more details .""" | print ( 'Ignoring exception in {}' . format ( event_method ) , file = sys . stderr )
traceback . print_exc ( ) |
def hurst_rs ( data , nvals = None , fit = "RANSAC" , debug_plot = False , debug_data = False , plot_file = None , corrected = True , unbiased = True ) :
"""Calculates the Hurst exponent by a standard rescaled range ( R / S ) approach .
Explanation of Hurst exponent :
The Hurst exponent is a measure for the " long - term memory " of a
time series , meaning the long statistical dependencies in the data that do
not originate from cycles .
It originates from H . E . Hursts observations of the problem of long - term
storage in water reservoirs . If x _ i is the discharge of a river in year i
and we observe this discharge for N years , we can calculate the storage
capacity that would be required to keep the discharge steady at its mean
value .
To do so , we first substract the mean over all x _ i from the individual
x _ i to obtain the departures x ' _ i from the mean for each year i . As the
excess or deficit in discharge always carrys over from year i to year i + 1,
we need to examine the cumulative sum of x ' _ i , denoted by y _ i . This
cumulative sum represents the filling of our hypothetical storage . If the
sum is above 0 , we are storing excess discharge from the river , if it is
below zero we have compensated a deficit in discharge by releasing
water from the storage . The range ( maximum - minimum ) R of y _ i therefore
represents the total capacity required for the storage .
Hurst showed that this value follows a steady trend for varying N if it
is normalized by the standard deviation sigma over the x _ i . Namely he
obtained the following formula :
R / sigma = ( N / 2 ) ^ K
In this equation , K is called the Hurst exponent . Its value is 0.5 for
white noise , but becomes greater for time series that exhibit some positive
dependency on previous values . For negative dependencies it becomes less
than 0.5.
Explanation of the algorithm :
The rescaled range ( R / S ) approach is directly derived from Hurst ' s
definition . The time series of length N is split into non - overlapping
subseries of length n . Then , R and S ( S = sigma ) are calculated for each
subseries and the mean is taken over all subseries yielding ( R / S ) _ n . This
process is repeated for several lengths n . Finally , the exponent K is
obtained by fitting a straight line to the plot of log ( ( R / S ) _ n ) vs log ( n ) .
There seems to be no consensus how to chose the subseries lenghts n .
This function therefore leaves the choice to the user . The module provides
some utility functions for " typical " values :
* binary _ n : N / 2 , N / 4 , N / 8 , . . .
* logarithmic _ n : min _ n , min _ n * f , min _ n * f ^ 2 , . . .
References :
. . [ h _ 1 ] H . E . Hurst , “ The problem of long - term storage in reservoirs , ”
International Association of Scientific Hydrology . Bulletin , vol . 1,
no . 3 , pp . 13–27 , 1956.
. . [ h _ 2 ] H . E . Hurst , “ A suggested statistical model of some time series
which occur in nature , ” Nature , vol . 180 , p . 494 , 1957.
. . [ h _ 3 ] R . Weron , “ Estimating long - range dependence : finite sample
properties and confidence intervals , ” Physica A : Statistical Mechanics
and its Applications , vol . 312 , no . 1 , pp . 285–299 , 2002.
Reference Code :
. . [ h _ a ] " hurst " function in R - package " pracma " ,
url : https : / / cran . r - project . org / web / packages / pracma / pracma . pdf
Note : Pracma yields several estimates of the Hurst exponent , which
are listed below . Unless otherwise stated they use the divisors
of the length of the sequence as n . The length is reduced by at
most 1 % to find the value that has the most divisors .
* The " Simple R / S " estimate is just log ( ( R / S ) _ n ) / log ( n ) for
n = N .
* The " theoretical Hurst exponent " is the value that would be
expected of an uncorrected rescaled range approach for random
noise of the size of the input data .
* The " empirical Hurst exponent " is the uncorrected Hurst exponent
obtained by the rescaled range approach .
* The " corrected empirical Hurst exponent " is the Anis - Lloyd - Peters
corrected Hurst exponent , but with sqrt ( 1/2 * pi * n ) added to
the ( R / S ) _ n before the log .
* The " corrected R over S Hurst exponent " uses the R - function " lm "
instead of pracmas own " polyfit " and uses n = N / 2 , N / 4 , N / 8 , . . .
by successively halving the subsequences ( which means that some
subsequences may be one element longer than others ) . In contrast
to its name it does not use the Anis - Lloyd - Peters correction
factor .
If you want to compare the output of pracma to the output of
nolds , the " empirical hurst exponent " is the only measure that
exactly corresponds to the Hurst measure implemented in nolds
( by choosing corrected = False , fit = " poly " and employing the same
strategy for choosing n as the divisors of the ( reduced )
sequence length ) .
. . [ h _ b ] Rafael Weron , " HURST : MATLAB function to compute the Hurst
exponent using R / S Analysis " ,
url : https : / / ideas . repec . org / c / wuu / hscode / m11003 . html
Note : When the same values for nvals are used and fit is set to
" poly " , nolds yields exactly the same results as this
implementation .
. . [ h _ c ] Bill Davidson , " Hurst exponent " ,
url : http : / / www . mathworks . com / matlabcentral / fileexchange / 9842 - hurst - exponent
. . [ h _ d ] Tomaso Aste , " Generalized Hurst exponent " ,
url : http : / / de . mathworks . com / matlabcentral / fileexchange / 30076 - generalized - hurst - exponent
Args :
data ( array - like of float ) :
time series
Kwargs :
nvals ( iterable of int ) :
sizes of subseries to use
( default : logmid _ n ( total _ N , ratio = 1/4.0 , nsteps = 15 ) , that is 15
logarithmically spaced values in the medium 25 % of the logarithmic range )
Generally , the choice for n is a trade - off between the length and the
number of the subsequences that are used for the calculation of the
( R / S ) _ n . Very low values of n lead to high variance in the ` ` r ` ` and ` ` s ` `
while very high values may leave too few subsequences that the mean along
them is still meaningful . Logarithmic spacing makes sense , because it
translates to even spacing in the log - log - plot .
fit ( str ) :
the fitting method to use for the line fit , either ' poly ' for normal
least squares polynomial fitting or ' RANSAC ' for RANSAC - fitting which
is more robust to outliers
debug _ plot ( boolean ) :
if True , a simple plot of the final line - fitting step will be shown
debug _ data ( boolean ) :
if True , debugging data will be returned alongside the result
plot _ file ( str ) :
if debug _ plot is True and plot _ file is not None , the plot will be saved
under the given file name instead of directly showing it through
` ` plt . show ( ) ` `
corrected ( boolean ) :
if True , the Anis - Lloyd - Peters correction factor will be applied to the
output according to the expected value for the individual ( R / S ) _ n
( see [ h _ 3 ] _ )
unbiased ( boolean ) :
if True , the standard deviation based on the unbiased variance
(1 / ( N - 1 ) instead of 1 / N ) will be used . This should be the default choice ,
since the true mean of the sequences is not known . This parameter should
only be changed to recreate results of other implementations .
Returns :
float :
estimated Hurst exponent K using a rescaled range approach ( if K = 0.5
there are no long - range correlations in the data , if K < 0.5 there are
negative long - range correlations , if K > 0.5 there are positive
long - range correlations )
(1d - vector , 1d - vector , list ) :
only present if debug _ data is True : debug data of the form
` ` ( nvals , rsvals , poly ) ` ` where ` ` nvals ` ` are the values used for log ( n ) ,
` ` rsvals ` ` are the corresponding log ( ( R / S ) _ n ) and ` ` poly ` ` are the line
coefficients ( ` ` [ slope , intercept ] ` ` )""" | data = np . asarray ( data )
total_N = len ( data )
if nvals is None : # chooses a default value for nvals that will give 15 logarithmically
# spaced datapoints leaning towards the middle of the logarithmic range
# ( since both too small and too large n introduce too much variance )
nvals = logmid_n ( total_N , ratio = 1 / 4.0 , nsteps = 15 )
# get individual values for ( R / S ) _ n
rsvals = np . array ( [ rs ( data , n , unbiased = unbiased ) for n in nvals ] )
# filter NaNs ( zeros should not be possible , because if R is 0 then
# S is also zero )
not_nan = np . logical_not ( np . isnan ( rsvals ) )
rsvals = rsvals [ not_nan ]
nvals = np . asarray ( nvals ) [ not_nan ]
# it may happen that no rsvals are left ( if all values of data are the same )
if len ( rsvals ) == 0 :
poly = [ np . nan , np . nan ]
if debug_plot :
warnings . warn ( "Cannot display debug plot, all (R/S)_n are NaN" )
else : # fit a line to the logarithm of the obtained ( R / S ) _ n
xvals = np . log ( nvals )
yvals = np . log ( rsvals )
if corrected :
yvals -= np . log ( [ expected_rs ( n ) for n in nvals ] )
poly = poly_fit ( xvals , yvals , 1 , fit = fit )
if debug_plot :
plot_reg ( xvals , yvals , poly , "log(n)" , "log((R/S)_n)" , fname = plot_file )
# account for correction if necessary
h = poly [ 0 ] + 0.5 if corrected else poly [ 0 ]
# return line slope ( + correction ) as hurst exponent
if debug_data :
return ( h , ( np . log ( nvals ) , np . log ( rsvals ) , poly ) )
else :
return h |
def create_top_level_index_entry ( title , max_depth , subtitles ) :
"""Function for creating a text entry in index . rst for its content .
: param title : Title for the content .
: type title : str
: param max _ depth : Value for max _ depth in the top level index content .
: type max _ depth : int
: param subtitles : list of subtitles that is available .
: type subtitles : list
: return : A text for the content of top level index .
: rtype : str""" | return_text = title + '\n'
dash = '-' * len ( title ) + '\n'
return_text += dash + '\n'
return_text += '.. toctree::' + '\n'
return_text += ' :maxdepth: ' + str ( max_depth ) + '\n\n'
for subtitle in subtitles :
return_text += ' ' + subtitle + '\n\n'
return return_text |
def delete_columns ( mat , columns_to_delete ) :
'''> > > a = csr _ matrix ( np . array ( [ [ 0 , 1 , 3 , 0 , 1 , 0 ] ,
[0 , 0 , 1 , 0 , 1 , 1 ] ] )
> > > delete _ columns ( a , [ 1,2 ] ) . todense ( )
matrix ( [ [ 0 , 0 , 1 , 0 ] ,
[0 , 0 , 1 , 1 ] ] )
Parameters
mat : csr _ matrix
columns _ to _ delete : list [ int ]
Returns
csr _ matrix that is stripped of columns indices columns _ to _ delete''' | column_mask = np . ones ( mat . shape [ 1 ] , dtype = bool )
column_mask [ columns_to_delete ] = 0
return mat . tocsc ( ) [ : , column_mask ] . tocsr ( ) |
def add_apis ( self , logical_id , apis ) :
"""Stores the given APIs tagged under the given logicalId
Parameters
logical _ id : str
LogicalId of the AWS : : Serverless : : Api resource
apis : list of samcli . commands . local . lib . provider . Api
List of APIs available in this resource""" | properties = self . _get_properties ( logical_id )
properties . apis . extend ( apis ) |
def makeMNBaseURL ( url ) :
"""Attempt to create a valid MN BaseURL when one or more sections of the URL are
missing .""" | o = urllib . parse . urlparse ( url , scheme = d1_common . const . DEFAULT_MN_PROTOCOL )
if o . netloc and o . path :
netloc = o . netloc
path = o . path
elif o . netloc :
netloc = o . netloc
path = d1_common . const . DEFAULT_MN_PATH
elif o . path :
s = o . path . split ( '/' , 1 )
netloc = s [ 0 ]
if len ( s ) == 1 :
path = d1_common . const . DEFAULT_MN_PATH
else :
path = s [ 1 ]
else :
netloc = d1_common . const . DEFAULT_MN_HOST
path = d1_common . const . DEFAULT_MN_PATH
return urllib . parse . urlunparse ( ( o . scheme , netloc , path , o . params , o . query , o . fragment ) ) |
def _regularize ( x , y , ties ) :
"""Regularize the values , make them ordered and remove duplicates .
If the ` ` ties ` ` parameter is explicitly set to ' ordered ' then order
is already assumed . Otherwise , the removal process will happen .
Parameters
x : array - like , shape = ( n _ samples , )
The x vector .
y : array - like , shape = ( n _ samples , )
The y vector .
ties : str
One of { ' ordered ' , ' mean ' } , handles the ties .""" | x , y = [ column_or_1d ( check_array ( arr , ensure_2d = False , force_all_finite = False , dtype = DTYPE ) ) for arr in ( x , y ) ]
nx = x . shape [ 0 ]
if nx != y . shape [ 0 ] :
raise ValueError ( 'array dim mismatch: %i != %i' % ( nx , y . shape [ 0 ] ) )
# manipulate x if needed . if ties is ' ordered ' we assume that x is
# already ordered and everything has been handled already . . .
if ties != 'ordered' :
o = np . argsort ( x )
# keep ordered with one another
x = x [ o ]
y = y [ o ]
# what if any are the same ?
ux = np . unique ( x )
if ux . shape [ 0 ] < nx : # Do we want to warn for this ?
# warnings . warn ( ' collapsing to unique " x " values ' )
# vectorize this function to apply to each " cell " in the array
def tie_apply ( f , u_val ) :
vals = y [ x == u_val ]
# mask y where x = = the unique value
return f ( vals )
# replace the duplicates in the y array with the " tie " func
func = VALID_TIES . get ( ties , _identity )
# maybe expensive to vectorize on the fly ? Not sure ; would need
# to do some benchmarking . However , we need to in order to keep y
# and x in scope . . .
y = np . vectorize ( tie_apply ) ( func , ux )
# does ux need ordering ? hmm . .
x = ux
return x , y |
def gatherBy ( self , func ) :
'Generate only rows for which the given func returns True .' | for i in rotate_range ( len ( self . rows ) , self . cursorRowIndex ) :
try :
r = self . rows [ i ]
if func ( r ) :
yield r
except Exception :
pass |
def parse_args ( ) :
"""Parses command line arguments .""" | parser = ArgumentParser ( description = "ModelBase builder" )
subparsers = parser . add_subparsers ( )
sql_parser = subparsers . add_parser ( "get-query" , description = "Usage: e.g. psql -c \"copy ($(python3 lib/generate_models.py get-query)) to " + "stdout with csv header\" DB_NAME postgres" )
sql_parser . set_defaults ( func = print_sql_query )
gen_parser = subparsers . add_parser ( "generate" )
gen_parser . add_argument ( "filename" , nargs = "?" , help = "Read this file for input, or STDIN if not " "given" )
gen_parser . add_argument ( "-i" , "--indent" , default = " " )
gen_parser . add_argument ( "-c" , "--created-at-col-name" , default = "created_at" )
gen_parser . add_argument ( "-u" , "--updated-at-col-name" , default = "updated_at" )
gen_parser . set_defaults ( func = generate_models )
args = parser . parse_args ( )
if hasattr ( args , "func" ) :
return args
else :
arg_parser . print_help ( )
sys . exit ( 1 ) |
def OnSearch ( self , event ) :
"""Event handler for starting the search""" | search_string = self . search . GetValue ( )
if search_string not in self . search_history :
self . search_history . append ( search_string )
if len ( self . search_history ) > 10 :
self . search_history . pop ( 0 )
self . menu = self . make_menu ( )
self . search . SetMenu ( self . menu )
search_flags = self . search_options + [ "FIND_NEXT" ]
post_command_event ( self , self . FindMsg , text = search_string , flags = search_flags )
self . search . SetFocus ( ) |
def handle_presence ( self , old_present ) :
'''Fire presence events if enabled''' | # On the first run it may need more time for the EventPublisher
# to come up and be ready . Set the timeout to account for this .
if self . presence_events and self . event . connect_pull ( timeout = 3 ) :
present = self . ckminions . connected_ids ( )
new = present . difference ( old_present )
lost = old_present . difference ( present )
if new or lost : # Fire new minions present event
data = { 'new' : list ( new ) , 'lost' : list ( lost ) }
self . event . fire_event ( data , tagify ( 'change' , 'presence' ) )
data = { 'present' : list ( present ) }
self . event . fire_event ( data , tagify ( 'present' , 'presence' ) )
old_present . clear ( )
old_present . update ( present ) |
def modify_process_properties ( self , key_value_map = { } , pid = None ) :
'''modify _ process _ properties ( self , key _ value _ map = { } , pid = None )
Modify process output properties .
Please note that process property key provided must be declared as an output property in the relevant service specification .
: Parameters :
* * key _ value _ map * ( ` object ` ) - - key value map with process properties to modify
* * pid * ( ` string ` ) - - Identifier of an existing process
: Example :
. . code - block : : python
process _ output _ properties = { " my _ output _ param " : " 1 " }
pid = opereto _ client . create _ process ( service = ' simple _ shell _ command ' , title = ' Test simple shell command service ' )
opereto _ client . modify _ process _ properties ( process _ output _ properties , pid )''' | pid = self . _get_pid ( pid )
request_data = { "properties" : key_value_map }
return self . _call_rest_api ( 'post' , '/processes/' + pid + '/output' , data = request_data , error = 'Failed to output properties' ) |
def reset ( self , rg = None ) :
'''Enable all offline cpus , and reset max and min frequencies files
rg : range or list of threads to reset''' | if type ( rg ) == int :
rg = [ rg ]
to_reset = rg if rg else self . __get_ranges ( "present" )
self . enable_cpu ( to_reset )
for cpu in to_reset :
fpath = path . join ( "cpu%i" % cpu , "cpufreq" , "cpuinfo_max_freq" )
max_freq = self . __read_cpu_file ( fpath )
fpath = path . join ( "cpu%i" % cpu , "cpufreq" , "cpuinfo_min_freq" )
min_freq = self . __read_cpu_file ( fpath )
fpath = path . join ( "cpu%i" % cpu , "cpufreq" , "scaling_max_freq" )
self . __write_cpu_file ( fpath , max_freq . encode ( ) )
fpath = path . join ( "cpu%i" % cpu , "cpufreq" , "scaling_min_freq" )
self . __write_cpu_file ( fpath , min_freq . encode ( ) ) |
def application_adapter ( obj , request ) :
"""Adapter for rendering a : class : ` pyramid _ urireferencer . models . ApplicationResponse ` to json .
: param pyramid _ urireferencer . models . ApplicationResponse obj : The response to be rendered .
: rtype : : class : ` dict `""" | return { 'title' : obj . title , 'uri' : obj . uri , 'service_url' : obj . service_url , 'success' : obj . success , 'has_references' : obj . has_references , 'count' : obj . count , 'items' : [ { 'uri' : i . uri , 'title' : i . title } for i in obj . items ] if obj . items is not None else None } |
def set_base_url ( self , platform : str = "prod" ) :
"""Set Isogeo base URLs according to platform .
: param str platform : platform to use . Options :
* prod [ DEFAULT ]
* qa
* int""" | platform = platform . lower ( )
self . platform = platform
if platform == "prod" :
ssl = True
logging . debug ( "Using production platform." )
elif platform == "qa" :
ssl = False
logging . debug ( "Using Quality Assurance platform (reduced perfs)." )
else :
logging . error ( "Platform must be one of: {}" . format ( " | " . join ( self . API_URLS . keys ( ) ) ) )
raise ValueError ( 3 , "Platform must be one of: {}" . format ( " | " . join ( self . API_URLS . keys ( ) ) ) , )
# method ending
return ( platform . lower ( ) , self . API_URLS . get ( platform ) , self . APP_URLS . get ( platform ) , self . CSW_URLS . get ( platform ) , self . MNG_URLS . get ( platform ) , self . OC_URLS . get ( platform ) , ssl , ) |
def open_dialog ( self , verbose = False ) :
"""The command line dialog provides a field to enter commands and view
results . It also provides the help command to display namespaces ,
commands , and arguments .
: param verbose : print more""" | response = api ( url = self . __url + "/open dialog" , verbose = verbose )
return response |
def get_proficiency_query_session_for_objective_bank ( self , objective_bank_id , proxy ) :
"""Gets the ` ` OsidSession ` ` associated with the proficiency query service for the given objective bank .
: param objective _ bank _ id : the ` ` Id ` ` of the obective bank
: type objective _ bank _ id : ` ` osid . id . Id ` `
: param proxy : a proxy
: type proxy : ` ` osid . proxy . Proxy ` `
: return : a ` ` ProficiencyQuerySession ` `
: rtype : ` ` osid . learning . ProficiencyQuerySession ` `
: raise : ` ` NotFound ` ` - - no ` ` ObjectiveBank ` ` found by the given ` ` Id ` `
: raise : ` ` NullArgument ` ` - - ` ` objective _ bank _ id ` ` or ` ` proxy ` ` is ` ` null ` `
: raise : ` ` OperationFailed ` ` - - unable to complete request
: raise : ` ` Unimplemented ` ` - - ` ` supports _ proficiency _ query ( ) ` ` or ` ` supports _ visible _ federation ( ) ` ` is ` ` false ` `
* compliance : optional - - This method must be implemented if ` ` supports _ proficiency _ query ( ) ` ` and ` ` supports _ visible _ federation ( ) ` ` are ` ` true ` ` *""" | if not objective_bank_id :
raise NullArgument
if not self . supports_proficiency_query ( ) :
raise Unimplemented ( )
try :
from . import sessions
except ImportError :
raise OperationFailed ( )
proxy = self . _convert_proxy ( proxy )
try :
session = sessions . ProficiencyQuerySession ( objective_bank_id = objective_bank_id , proxy = proxy , runtime = self . _runtime )
except AttributeError :
raise OperationFailed ( )
return session |
def _tiff_from_exif_segment ( cls , stream , offset , segment_length ) :
"""Return a | Tiff | instance parsed from the Exif APP1 segment of
* segment _ length * at * offset * in * stream * .""" | # wrap full segment in its own stream and feed to Tiff ( )
stream . seek ( offset + 8 )
segment_bytes = stream . read ( segment_length - 8 )
substream = BytesIO ( segment_bytes )
return Tiff . from_stream ( substream ) |
def _AddPropertiesForNonRepeatedScalarField ( field , cls ) :
"""Adds a public property for a nonrepeated , scalar protocol message field .
Clients can use this property to get and directly set the value of the field .
Note that when the client sets the value of a field by using this property ,
all necessary " has " bits are set as a side - effect , and we also perform
type - checking .
Args :
field : A FieldDescriptor for this field .
cls : The class we ' re constructing .""" | proto_field_name = field . name
property_name = _PropertyName ( proto_field_name )
type_checker = type_checkers . GetTypeChecker ( field )
default_value = field . default_value
valid_values = set ( )
is_proto3 = field . containing_type . syntax == "proto3"
def getter ( self ) : # TODO ( protobuf - team ) : This may be broken since there may not be
# default _ value . Combine with has _ default _ value somehow .
return self . _fields . get ( field , default_value )
getter . __module__ = None
getter . __doc__ = 'Getter for %s.' % proto_field_name
clear_when_set_to_default = is_proto3 and not field . containing_oneof
def field_setter ( self , new_value ) : # pylint : disable = protected - access
# Testing the value for truthiness captures all of the proto3 defaults
# (0 , 0.0 , enum 0 , and False ) .
new_value = type_checker . CheckValue ( new_value )
if clear_when_set_to_default and not new_value :
self . _fields . pop ( field , None )
else :
self . _fields [ field ] = new_value
# Check _ cached _ byte _ size _ dirty inline to improve performance , since scalar
# setters are called frequently .
if not self . _cached_byte_size_dirty :
self . _Modified ( )
if field . containing_oneof :
def setter ( self , new_value ) :
field_setter ( self , new_value )
self . _UpdateOneofState ( field )
else :
setter = field_setter
setter . __module__ = None
setter . __doc__ = 'Setter for %s.' % proto_field_name
# Add a property to encapsulate the getter / setter .
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr ( cls , property_name , property ( getter , setter , doc = doc ) ) |
def render_template ( template ) :
"""takes a template to render to and returns a function that
takes an object to render the data for this template .
If callable _ or _ dict is callable , it will be called with
the request and any additional arguments to produce the
template paramaters . This is useful for a view - like function
that returns a dict - like object instead of an HttpResponse .
Otherwise , callable _ or _ dict is used as the parameters for
the rendered response .""" | def outer_wrapper ( callable_or_dict = None , statuscode = None , ** kwargs ) :
def wrapper ( request , * args , ** wrapper_kwargs ) :
if callable ( callable_or_dict ) :
params = callable_or_dict ( request , * args , ** wrapper_kwargs )
else :
params = callable_or_dict
# If we want to return some other response type we can ,
# that simply overrides the default behavior
if params is None or isinstance ( params , dict ) :
resp = render ( request , template , params , ** kwargs )
else :
resp = params
if statuscode :
resp . status_code = statuscode
return resp
return wrapper
return outer_wrapper |
def _rank ( self , ranking , n ) :
"""return the first n sentences with highest ranking""" | return nlargest ( n , ranking , key = ranking . get ) |
async def dump_variant ( self , elem , elem_type = None , params = None ) :
"""Dumps variant type to the writer .
Supports both wrapped and raw variant .
: param writer :
: param elem :
: param elem _ type :
: param params :
: return :""" | if isinstance ( elem , x . VariantType ) or elem_type . WRAPS_VALUE :
await dump_uvarint ( self . iobj , elem . variant_elem_type . VARIANT_CODE )
await self . _dump_field ( getattr ( elem , elem . variant_elem ) , elem . variant_elem_type )
else :
fdef = elem_type . find_fdef ( elem_type . f_specs ( ) , elem )
vcode = fdef [ 1 ] . BOOST_VARIANT_CODE if hasattr ( fdef [ 1 ] , 'BOOST_VARIANT_CODE' ) else fdef [ 1 ] . VARIANT_CODE
await dump_uvarint ( self . iobj , vcode )
await self . _dump_field ( elem , fdef [ 1 ] ) |
def remap_overlapping_column_names ( table_op , root_table , data_columns ) :
"""Return an ` ` OrderedDict ` ` mapping possibly suffixed column names to
column names without suffixes .
Parameters
table _ op : TableNode
The ` ` TableNode ` ` we ' re selecting from .
root _ table : TableNode
The root table of the expression we ' re selecting from .
data _ columns : set or frozenset
The available columns to select from
Returns
mapping : OrderedDict [ str , str ]
A map from possibly - suffixed column names to column names without
suffixes .""" | if not isinstance ( table_op , ops . Join ) :
return None
left_root , right_root = ops . distinct_roots ( table_op . left , table_op . right )
suffixes = { left_root : constants . LEFT_JOIN_SUFFIX , right_root : constants . RIGHT_JOIN_SUFFIX , }
column_names = [ ( { name , name + suffixes [ root_table ] } & data_columns , name ) for name in root_table . schema . names ]
mapping = OrderedDict ( ( first ( col_name ) , final_name ) for col_name , final_name in column_names if col_name )
return mapping |
def set_basic_params ( self , workers = None , zerg_server = None , fallback_node = None , concurrent_events = None , cheap_mode = None , stats_server = None , quiet = None , buffer_size = None , fallback_nokey = None , subscription_key = None , emperor_command_socket = None ) :
""": param int workers : Number of worker processes to spawn .
: param str | unicode zerg _ server : Attach the router to a zerg server .
: param str | unicode fallback _ node : Fallback to the specified node in case of error .
: param int concurrent _ events : Set the maximum number of concurrent events router can manage .
Default : system dependent .
: param bool cheap _ mode : Enables cheap mode . When the router is in cheap mode ,
it will not respond to requests until a node is available .
This means that when there are no nodes subscribed , only your local app ( if any ) will respond .
When all of the nodes go down , the router will return in cheap mode .
: param str | unicode stats _ server : Router stats server address to run at .
: param bool quiet : Do not report failed connections to instances .
: param int buffer _ size : Set internal buffer size in bytes . Default : page size .
: param bool fallback _ nokey : Move to fallback node even if a subscription key is not found .
: param str | unicode subscription _ key : Skip uwsgi parsing and directly set a key .
: param str | unicode emperor _ command _ socket : Set the emperor command socket that will receive spawn commands .
See ` . empire . set _ emperor _ command _ params ( ) ` .""" | super ( RouterFast , self ) . set_basic_params ( ** filter_locals ( locals ( ) , [ 'fallback_nokey' , 'subscription_key' , 'emperor_command_socket' , ] ) )
self . _set_aliased ( 'fallback-on-no-key' , fallback_nokey , cast = bool )
self . _set_aliased ( 'force-key' , subscription_key )
self . _set_aliased ( 'emperor-socket' , emperor_command_socket )
return self |
def get_available_plugins ( self ) :
"""check requested plugins availability
and handle missing plugins
: return : list of namedtuples , runnable plugins data""" | available_plugins = [ ]
PluginData = namedtuple ( 'PluginData' , 'name, plugin_class, conf, is_allowed_to_fail' )
for plugin_request in self . plugins_conf :
plugin_name = plugin_request [ 'name' ]
try :
plugin_class = self . plugin_classes [ plugin_name ]
except KeyError :
if plugin_request . get ( 'required' , True ) :
msg = ( "no such plugin: '%s', did you set " "the correct plugin type?" ) % plugin_name
exc = PluginFailedException ( msg )
self . on_plugin_failed ( plugin_name , exc )
logger . error ( msg )
raise exc
else : # This plugin is marked as not being required
logger . warning ( "plugin '%s' requested but not available" , plugin_name )
continue
plugin_is_allowed_to_fail = plugin_request . get ( 'is_allowed_to_fail' , getattr ( plugin_class , "is_allowed_to_fail" , True ) )
plugin_conf = plugin_request . get ( "args" , { } )
plugin = PluginData ( plugin_name , plugin_class , plugin_conf , plugin_is_allowed_to_fail )
available_plugins . append ( plugin )
return available_plugins |
def delete_collection_initializer_configuration ( self , ** kwargs ) : # noqa : E501
"""delete _ collection _ initializer _ configuration # noqa : E501
delete collection of InitializerConfiguration # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . delete _ collection _ initializer _ configuration ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param bool include _ uninitialized : If true , partially initialized resources are included in the response .
: param str pretty : If ' true ' , then the output is pretty printed .
: param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications .
: param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything .
: param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything .
: param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned .
: param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv .
: param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity .
: param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion .
: return : V1Status
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . delete_collection_initializer_configuration_with_http_info ( ** kwargs )
# noqa : E501
else :
( data ) = self . delete_collection_initializer_configuration_with_http_info ( ** kwargs )
# noqa : E501
return data |
def _send_event ( self , title , text , tags , event_type , aggregation_key , severity = 'info' ) :
"""Emit an event to the Datadog Event Stream .""" | event_dict = { 'timestamp' : int ( time ( ) ) , 'source_type_name' : self . SOURCE_TYPE_NAME , 'msg_title' : title , 'event_type' : event_type , 'alert_type' : severity , 'msg_text' : text , 'tags' : tags , 'aggregation_key' : aggregation_key , }
self . event ( event_dict ) |
def prob ( self , comparison_vectors , return_type = None ) :
"""Compute the probabilities for each record pair .
For each pair of records , estimate the probability of being a match .
Parameters
comparison _ vectors : pandas . DataFrame
The dataframe with comparison vectors .
return _ type : str
Deprecated . ( default ' series ' )
Returns
pandas . Series or numpy . ndarray
The probability of being a match for each record pair .""" | if return_type is not None :
warnings . warn ( "The argument 'return_type' is removed. " "Default value is now 'series'." , VisibleDeprecationWarning , stacklevel = 2 )
logging . info ( "Classification - compute probabilities" )
prob_match = self . _prob_match ( comparison_vectors . values )
return pandas . Series ( prob_match , index = comparison_vectors . index ) |
def get_serializer ( serializer_format ) :
"""Get the serializer for a specific format""" | if serializer_format == Format . JSON :
return _serialize_json
if serializer_format == Format . PICKLE :
return _serialize_pickle |
def _ParseRecordExtraField ( self , byte_stream , file_offset ) :
"""Parses a record extra field .
Args :
byte _ stream ( bytes ) : byte stream .
file _ offset ( int ) : offset of the record extra field relative to
the start of the file .
Returns :
asl _ record _ extra _ field : record extra field .
Raises :
ParseError : if the record extra field cannot be parsed .""" | extra_field_map = self . _GetDataTypeMap ( 'asl_record_extra_field' )
try :
record_extra_field = self . _ReadStructureFromByteStream ( byte_stream , file_offset , extra_field_map )
except ( ValueError , errors . ParseError ) as exception :
raise errors . ParseError ( ( 'Unable to parse record extra field at offset: 0x{0:08x} with error: ' '{1!s}' ) . format ( file_offset , exception ) )
return record_extra_field |
def analyze_string_content ( self , string , line_num , filename ) :
"""Searches string for custom pattern , and captures all high entropy strings that
match self . regex , with a limit defined as self . entropy _ limit .""" | output = { }
for result in self . secret_generator ( string ) :
if self . _is_sequential_string ( result ) :
continue
secret = PotentialSecret ( self . secret_type , filename , result , line_num )
output [ secret ] = secret
return output |
def getFontsByFontInfoAttribute ( self , * attributeValuePairs ) :
"""Get a list of fonts that match the ( attribute , value )
combinations in ` ` attributeValuePairs ` ` .
> > > subFonts = fonts . getFontsByFontInfoAttribute ( ( " xHeight " , 20 ) )
> > > subFonts = fonts . getFontsByFontInfoAttribute ( ( " xHeight " , 20 ) , ( " descender " , - 150 ) )
This will return an instance of : class : ` BaseFontList ` .""" | found = self
for attr , value in attributeValuePairs :
found = self . _matchFontInfoAttributes ( found , ( attr , value ) )
return found |
def is_for_driver_task ( self ) :
"""See whether this function descriptor is for a driver or not .
Returns :
True if this function descriptor is for driver tasks .""" | return all ( len ( x ) == 0 for x in [ self . module_name , self . class_name , self . function_name ] ) |
def create_dialog ( self ) :
"""Create the dialog .""" | bbox = QDialogButtonBox ( QDialogButtonBox . Ok | QDialogButtonBox . Cancel )
self . idx_ok = bbox . button ( QDialogButtonBox . Ok )
self . idx_cancel = bbox . button ( QDialogButtonBox . Cancel )
filebutton = QPushButton ( )
filebutton . setText ( 'Choose' )
self . idx_filename = filebutton
self . xp_format = FormMenu ( [ 'CSV' , 'Brain Vision' ] )
self . all_types = FormBool ( 'All event types' )
self . idx_evt_type = QListWidget ( )
self . idx_evt_type . setSelectionMode ( QAbstractItemView . ExtendedSelection )
filebutton . clicked . connect ( self . save_as )
self . all_types . connect ( self . toggle_buttons )
bbox . clicked . connect ( self . button_clicked )
form = QFormLayout ( )
form . addRow ( 'Filename' , self . idx_filename )
form . addRow ( 'Format' , self . xp_format )
form . addRow ( self . all_types )
form . addRow ( 'Event type(s)' , self . idx_evt_type )
btnlayout = QHBoxLayout ( )
btnlayout . addStretch ( 1 )
btnlayout . addWidget ( bbox )
vlayout = QVBoxLayout ( )
vlayout . addLayout ( form )
vlayout . addStretch ( 1 )
vlayout . addLayout ( btnlayout )
self . setLayout ( vlayout ) |
def get_node ( self , process_name , timeperiod ) :
"""Method retrieves a tree node identified by the time _ qualifier and the timeperiod""" | if process_name not in self . process_hierarchy :
raise ValueError ( 'unable to retrieve the node due to unknown process: {0}' . format ( process_name ) )
time_qualifier = self . process_hierarchy [ process_name ] . process_entry . time_qualifier
return self . _get_node ( time_qualifier , timeperiod ) |
def create_reader ( name , * args , format = None , registry = default_registry , ** kwargs ) :
"""Create a reader instance , guessing its factory using filename ( and eventually format ) .
: param name :
: param args :
: param format :
: param registry :
: param kwargs :
: return : mixed""" | return registry . get_reader_factory_for ( name , format = format ) ( name , * args , ** kwargs ) |
def levenshtein ( str1 , s2 ) :
'''Distance between two strings''' | N1 = len ( str1 )
N2 = len ( s2 )
stringRange = [ range ( N1 + 1 ) ] * ( N2 + 1 )
for i in range ( N2 + 1 ) :
stringRange [ i ] = range ( i , i + N1 + 1 )
for i in range ( 0 , N2 ) :
for j in range ( 0 , N1 ) :
if str1 [ j ] == s2 [ i ] :
stringRange [ i + 1 ] [ j + 1 ] = min ( stringRange [ i + 1 ] [ j ] + 1 , stringRange [ i ] [ j + 1 ] + 1 , stringRange [ i ] [ j ] )
else :
stringRange [ i + 1 ] [ j + 1 ] = min ( stringRange [ i + 1 ] [ j ] + 1 , stringRange [ i ] [ j + 1 ] + 1 , stringRange [ i ] [ j ] + 1 )
return stringRange [ N2 ] [ N1 ] |
def enqueue_data ( self , event_type , data ) :
"""Enqueue a data item for specific event type""" | with self . lock :
listeners = self . listeners . values ( )
for listener in listeners :
listener . enqueue ( event_type , data )
self . must_process = True |
def create_inverse_model ( self , encoded_state , encoded_next_state ) :
"""Creates inverse model TensorFlow ops for Curiosity module .
Predicts action taken given current and future encoded states .
: param encoded _ state : Tensor corresponding to encoded current state .
: param encoded _ next _ state : Tensor corresponding to encoded next state .""" | combined_input = tf . concat ( [ encoded_state , encoded_next_state ] , axis = 1 )
hidden = tf . layers . dense ( combined_input , 256 , activation = self . swish )
if self . brain . vector_action_space_type == "continuous" :
pred_action = tf . layers . dense ( hidden , self . act_size [ 0 ] , activation = None )
squared_difference = tf . reduce_sum ( tf . squared_difference ( pred_action , self . selected_actions ) , axis = 1 )
self . inverse_loss = tf . reduce_mean ( tf . dynamic_partition ( squared_difference , self . mask , 2 ) [ 1 ] )
else :
pred_action = tf . concat ( [ tf . layers . dense ( hidden , self . act_size [ i ] , activation = tf . nn . softmax ) for i in range ( len ( self . act_size ) ) ] , axis = 1 )
cross_entropy = tf . reduce_sum ( - tf . log ( pred_action + 1e-10 ) * self . selected_actions , axis = 1 )
self . inverse_loss = tf . reduce_mean ( tf . dynamic_partition ( cross_entropy , self . mask , 2 ) [ 1 ] ) |
def _identify_heterogeneity_blocks_shared ( in_file , segment_fn , params , work_dir , somatic_info ) :
"""Identify heterogeneity blocks corresponding to segmentation from CNV input file .""" | out_file = os . path . join ( work_dir , "%s-hetblocks.bed" % utils . splitext_plus ( os . path . basename ( in_file ) ) [ 0 ] )
if not utils . file_uptodate ( out_file , in_file ) :
with file_transaction ( somatic_info . tumor_data , out_file ) as tx_out_file :
with open ( tx_out_file , "w" ) as out_handle :
for chrom , freqs , coords in _freqs_by_chromosome ( in_file , params , somatic_info ) :
for start , end in segment_fn ( chrom , freqs , coords ) :
out_handle . write ( "%s\t%s\t%s\n" % ( chrom , start , end ) )
return out_file |
def sharing_agreements ( self ) :
"""| Comment : The ids of the sharing agreements used for this ticket""" | if self . api and self . sharing_agreement_ids :
return self . api . _get_sharing_agreements ( self . sharing_agreement_ids ) |
def tangent_lineation_plot ( ax , strikes , dips , rakes ) :
"""Makes a tangent lineation plot for normal faults with the given strikes ,
dips , and rakes .""" | # Calculate the position of the rake of the lineations , but don ' t plot yet
rake_x , rake_y = mplstereonet . rake ( strikes , dips , rakes )
# Calculate the direction the arrows should point
# These are all normal faults , so the arrows point away from the center
# Because we ' re plotting at the pole location , however , we need to flip this
# from what we plotted with the " ball of string " plot .
mag = np . hypot ( rake_x , rake_y )
u , v = - rake_x / mag , - rake_y / mag
# Calculate the position of the poles
pole_x , pole_y = mplstereonet . pole ( strikes , dips )
# Plot the arrows centered on the pole locations . . .
arrows = ax . quiver ( pole_x , pole_y , u , v , width = 1 , headwidth = 4 , units = 'dots' , pivot = 'middle' )
return arrows |
def assert_has_calls ( self , calls , any_order = False ) :
"""assert the mock has been called with the specified calls .
The ` mock _ calls ` list is checked for the calls .
If ` any _ order ` is False ( the default ) then the calls must be
sequential . There can be extra calls before or after the
specified calls .
If ` any _ order ` is True then the calls can be in any order , but
they must all appear in ` mock _ calls ` .""" | if not any_order :
if calls not in self . mock_calls :
raise AssertionError ( 'Calls not found.\nExpected: %r\n' 'Actual: %r' % ( calls , self . mock_calls ) )
return
all_calls = list ( self . mock_calls )
not_found = [ ]
for kall in calls :
try :
all_calls . remove ( kall )
except ValueError :
not_found . append ( kall )
if not_found :
raise AssertionError ( '%r not all found in call list' % ( tuple ( not_found ) , ) ) |
def load ( fname ) :
"""Load an embedding dump generated by ` save `""" | content = _open ( fname ) . read ( )
if PY2 :
state = pickle . loads ( content )
else :
state = pickle . loads ( content , encoding = 'latin1' )
voc , vec = state
if len ( voc ) == 2 :
words , counts = voc
word_count = dict ( zip ( words , counts ) )
vocab = CountedVocabulary ( word_count = word_count )
else :
vocab = OrderedVocabulary ( voc )
return Embedding ( vocabulary = vocab , vectors = vec ) |
def int_args ( self ) :
"""Iterate through all the possible arg positions that can only be used to store integer or pointer values
Does not take into account customizations .
Returns an iterator of SimFunctionArguments""" | if self . ARG_REGS is None :
raise NotImplementedError ( )
for reg in self . ARG_REGS : # pylint : disable = not - an - iterable
yield SimRegArg ( reg , self . arch . bytes ) |
def new_stories ( self , raw = False , limit = None ) :
"""Returns list of item ids of current new stories
Args :
limit ( int ) : specifies the number of stories to be returned .
raw ( bool ) : Flag to indicate whether to transform all
objects into raw json .
Returns :
` list ` object containing ids of new stories .""" | new_stories = self . _get_stories ( 'newstories' , limit )
if raw :
new_stories = [ story . raw for story in new_stories ]
return new_stories |
def dashboards ( self , filter = None , startAt = 0 , maxResults = 20 ) :
"""Return a ResultList of Dashboard resources and a ` ` total ` ` count .
: param filter : either " favourite " or " my " , the type of dashboards to return
: type filter : Optional [ str ]
: param startAt : index of the first dashboard to return ( Default : 0)
: type startAt : int
: param maxResults : maximum number of dashboards to return .
If maxResults evaluates as False , it will try to get all items in batches . ( Default : 20)
: type maxResults : int
: rtype : ResultList""" | params = { }
if filter is not None :
params [ 'filter' ] = filter
return self . _fetch_pages ( Dashboard , 'dashboards' , 'dashboard' , startAt , maxResults , params ) |
def split_input ( cls , job_config ) :
"""Inherit docs .""" | params = job_config . input_reader_params
shard_count = job_config . shard_count
query_spec = cls . _get_query_spec ( params )
if not property_range . should_shard_by_property_range ( query_spec . filters ) :
return super ( ModelDatastoreInputReader , cls ) . split_input ( job_config )
p_range = property_range . PropertyRange ( query_spec . filters , query_spec . model_class_path )
p_ranges = p_range . split ( shard_count )
# User specified a namespace .
if query_spec . ns :
ns_range = namespace_range . NamespaceRange ( namespace_start = query_spec . ns , namespace_end = query_spec . ns , _app = query_spec . app )
ns_ranges = [ copy . copy ( ns_range ) for _ in p_ranges ]
else :
ns_keys = namespace_range . get_namespace_keys ( query_spec . app , cls . MAX_NAMESPACES_FOR_KEY_SHARD + 1 )
if not ns_keys :
return
# User doesn ' t specify ns but the number of ns is small .
# We still split by property range .
if len ( ns_keys ) <= cls . MAX_NAMESPACES_FOR_KEY_SHARD :
ns_ranges = [ namespace_range . NamespaceRange ( _app = query_spec . app ) for _ in p_ranges ]
# Lots of namespaces . Split by ns .
else :
ns_ranges = namespace_range . NamespaceRange . split ( n = shard_count , contiguous = False , can_query = lambda : True , _app = query_spec . app )
p_ranges = [ copy . copy ( p_range ) for _ in ns_ranges ]
assert len ( p_ranges ) == len ( ns_ranges )
iters = [ db_iters . RangeIteratorFactory . create_property_range_iterator ( p , ns , query_spec ) for p , ns in zip ( p_ranges , ns_ranges ) ]
return [ cls ( i ) for i in iters ] |
def crt_mdl_rsp ( arySptExpInf , tplPngSize , aryMdlParams , varPar , strCrd = 'crt' , lgcPrint = True ) :
"""Create responses of 2D Gauss models to spatial conditions .
Parameters
arySptExpInf : 3d numpy array , shape [ n _ x _ pix , n _ y _ pix , n _ conditions ]
All spatial conditions stacked along second axis .
tplPngSize : tuple , 2
Pixel dimensions of the visual space ( width , height ) .
aryMdlParams : 2d numpy array , shape [ n _ x _ pos * n _ y _ pos * n _ sd , 3]
Model parameters ( x , y , sigma ) for all models .
varPar : int , positive
Number of cores to parallelize over .
strCrd , string , either ' crt ' or ' pol '
Whether model parameters are provided in cartesian or polar coordinates
lgcPrint : boolean
Whether print statements should be executed .
Returns
aryMdlCndRsp : 2d numpy array , shape [ n _ x _ pos * n _ y _ pos * n _ sd , n _ cond ]
Responses of 2D Gauss models to spatial conditions .""" | if varPar == 1 : # if the number of cores requested by the user is equal to 1,
# we save the overhead of multiprocessing by calling aryMdlCndRsp
# directly
aryMdlCndRsp = cnvl_2D_gauss ( 0 , aryMdlParams , arySptExpInf , tplPngSize , None , strCrd = strCrd )
else : # The long array with all the combinations of model parameters is put
# into separate chunks for parallelisation , using a list of arrays .
lstMdlParams = np . array_split ( aryMdlParams , varPar )
# Create a queue to put the results in :
queOut = mp . Queue ( )
# Empty list for results from parallel processes ( for pRF model
# responses ) :
lstMdlTc = [ None ] * varPar
# Empty list for processes :
lstPrcs = [ None ] * varPar
if lgcPrint :
print ( '---------Running parallel processes' )
# Create processes :
for idxPrc in range ( 0 , varPar ) :
lstPrcs [ idxPrc ] = mp . Process ( target = cnvl_2D_gauss , args = ( idxPrc , lstMdlParams [ idxPrc ] , arySptExpInf , tplPngSize , queOut ) , kwargs = { 'strCrd' : strCrd } , )
# Daemon ( kills processes when exiting ) :
lstPrcs [ idxPrc ] . Daemon = True
# Start processes :
for idxPrc in range ( 0 , varPar ) :
lstPrcs [ idxPrc ] . start ( )
# Collect results from queue :
for idxPrc in range ( 0 , varPar ) :
lstMdlTc [ idxPrc ] = queOut . get ( True )
# Join processes :
for idxPrc in range ( 0 , varPar ) :
lstPrcs [ idxPrc ] . join ( )
if lgcPrint :
print ( '---------Collecting results from parallel processes' )
# Put output arrays from parallel process into one big array
lstMdlTc = sorted ( lstMdlTc )
aryMdlCndRsp = np . empty ( ( 0 , arySptExpInf . shape [ - 1 ] ) )
for idx in range ( 0 , varPar ) :
aryMdlCndRsp = np . concatenate ( ( aryMdlCndRsp , lstMdlTc [ idx ] [ 1 ] ) , axis = 0 )
# Clean up :
del ( lstMdlParams )
del ( lstMdlTc )
return aryMdlCndRsp . astype ( 'float16' ) |
def get_privileges ( self , application = None , name = None , params = None ) :
"""` < TODO > ` _
: arg application : Application name
: arg name : Privilege name""" | return self . transport . perform_request ( "GET" , _make_path ( "_security" , "privilege" , application , name ) , params = params , ) |
def add_receipt ( self , block_header : BlockHeader , index_key : int , receipt : Receipt ) -> Hash32 :
"""Adds the given receipt to the provided block header .
Returns the updated ` receipts _ root ` for updated block header .""" | receipt_db = HexaryTrie ( db = self . db , root_hash = block_header . receipt_root )
receipt_db [ index_key ] = rlp . encode ( receipt )
return receipt_db . root_hash |
def _init_decoder ( self ) :
"""Set - up the _ decoder attribute if necessary .""" | # Note : content - encoding value should be case - insensitive , per RFC 7230
# Section 3.2
content_encoding = self . headers . get ( 'content-encoding' , '' ) . lower ( )
if self . _decoder is None :
if content_encoding in self . CONTENT_DECODERS :
self . _decoder = _get_decoder ( content_encoding )
elif ',' in content_encoding :
encodings = [ e . strip ( ) for e in content_encoding . split ( ',' ) if e . strip ( ) in self . CONTENT_DECODERS ]
if len ( encodings ) :
self . _decoder = _get_decoder ( content_encoding ) |
def _transform ( self , q ) :
"""exp ( i q . r ( i ) ) v ( i )""" | s2p = self . _primitive . get_supercell_to_primitive_map ( )
p2s = self . _primitive . get_primitive_to_supercell_map ( )
num_s = self . _supercell . get_number_of_atoms ( )
num_p = self . _primitive . get_number_of_atoms ( )
v = self . _velocities
q_array = np . reshape ( q , ( - 1 , 3 ) )
dtype = "c%d" % ( np . dtype ( 'double' ) . itemsize * 2 )
v_q = np . zeros ( ( v . shape [ 0 ] , num_p , len ( q_array ) , 3 ) , dtype = dtype )
for p_i , s_i in enumerate ( p2s ) :
for s_j , s2p_j in enumerate ( s2p ) :
if s2p_j == s_i :
for q_i , pf in enumerate ( self . _get_phase_factor ( p_i , s_j , q_array ) ) :
v_q [ : , p_i , q_i , : ] += pf * v [ : , s_j , : ]
return v_q |
def user_data ( self , access_token , * args , ** kwargs ) :
"""Load user data from OAuth Profile Google App Engine App""" | url = GOOGLE_APPENGINE_PROFILE_V2
return self . get_json ( url , headers = { 'Authorization' : 'Bearer ' + access_token } ) |
def get_abilities ( ) :
"""Visit Bulbapedia and pull names and descriptions from the table , ' list of Abilities . ' Save as JSON .""" | page = requests . get ( 'http://bulbapedia.bulbagarden.net/wiki/Ability' )
soup = bs4 . BeautifulSoup ( page . text )
table = soup . find ( "table" , { "class" : "sortable" } )
tablerows = [ tr for tr in table . children if tr != '\n' ] [ 1 : ]
abilities = { }
for tr in tablerows :
cells = tr . find_all ( 'td' )
ability_name = cells [ 1 ] . get_text ( ) . strip ( ) . replace ( ' ' , '-' ) . lower ( )
ability_desc = unicode ( cells [ 2 ] . get_text ( ) . strip ( ) )
abilities [ ability_name ] = ability_desc
srcpath = path . dirname ( __file__ )
with io . open ( path . join ( srcpath , 'abilities.json' ) , 'w' , encoding = 'utf-8' ) as f :
f . write ( json . dumps ( abilities , ensure_ascii = False ) ) |
def ending ( self , end , predicate = None , index = None ) :
"""Retrieves a set of Match objects that ends at given index .
: param end : the ending index
: type end : int
: param predicate :
: type predicate :
: return : set of matches
: rtype : set [ Match ]""" | return filter_index ( _BaseMatches . _base ( self . _end_dict [ end ] ) , predicate , index ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.