signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def add_transform_columns ( self ) :
"""add transformed values to the Pst . parameter _ data attribute""" | for col in [ "parval1" , "parlbnd" , "parubnd" , "increment" ] :
if col not in self . parameter_data . columns :
continue
self . parameter_data . loc [ : , col + "_trans" ] = ( self . parameter_data . loc [ : , col ] * self . parameter_data . scale ) + self . parameter_data . offset
# isnotfixed = self . parameter _ data . partrans ! = " fixed "
islog = self . parameter_data . partrans == "log"
self . parameter_data . loc [ islog , col + "_trans" ] = self . parameter_data . loc [ islog , col + "_trans" ] . apply ( lambda x : np . log10 ( x ) ) |
def get_wellseries ( self , matrix ) :
"""Returns the grid as a WellSeries of WellSeries""" | res = OrderedDict ( )
for col , cells in matrix . items ( ) :
if col not in res :
res [ col ] = OrderedDict ( )
for row , cell in cells . items ( ) :
res [ col ] [ row ] = self . children_by_name [ '' . join ( cell ) ]
res [ col ] = WellSeries ( res [ col ] , name = col )
return WellSeries ( res ) |
def setZValue ( self , zValue ) :
"""Sets the z - value for this layer to the inputed value .
: param zValue | < int >
: return < bool > changed""" | if zValue == self . _zValue :
return False
self . _zValue = zValue
self . sync ( )
return True |
def to_pycbc ( self , copy = True ) :
"""Convert this ` TimeSeries ` into a PyCBC
` ~ pycbc . types . timeseries . TimeSeries `
Parameters
copy : ` bool ` , optional , default : ` True `
if ` True ` , copy these data to a new array
Returns
timeseries : ` ~ pycbc . types . timeseries . TimeSeries `
a PyCBC representation of this ` TimeSeries `""" | from pycbc import types
return types . TimeSeries ( self . value , delta_t = self . dt . to ( 's' ) . value , epoch = self . epoch . gps , copy = copy ) |
def _multi_deref ( tensors : List [ tf . Tensor ] , index : tf . Tensor ) -> List [ tf . Tensor ] :
"""Equivalent to ` [ t [ index , . . . ] for t in tensors ] ` .
See ` _ deref ` for more details .""" | assert tensors
assert tensors [ 0 ] . shape [ 0 ] > 0
return _deref_helper ( lambda i : [ tensor [ i , ... ] for tensor in tensors ] , index , 0 , tensors [ 0 ] . shape [ 0 ] - 1 ) |
def _translate_pattern ( self , pattern , anchor = True , prefix = None , is_regex = False ) :
"""Translate a shell - like wildcard pattern to a compiled regular
expression .
Return the compiled regex . If ' is _ regex ' true ,
then ' pattern ' is directly compiled to a regex ( if it ' s a string )
or just returned as - is ( assumes it ' s a regex object ) .""" | if is_regex :
if isinstance ( pattern , str ) :
return re . compile ( pattern )
else :
return pattern
if _PYTHON_VERSION > ( 3 , 2 ) : # ditch start and end characters
start , _ , end = self . _glob_to_re ( '_' ) . partition ( '_' )
if pattern :
pattern_re = self . _glob_to_re ( pattern )
if _PYTHON_VERSION > ( 3 , 2 ) :
assert pattern_re . startswith ( start ) and pattern_re . endswith ( end )
else :
pattern_re = ''
base = re . escape ( os . path . join ( self . base , '' ) )
if prefix is not None : # ditch end of pattern character
if _PYTHON_VERSION <= ( 3 , 2 ) :
empty_pattern = self . _glob_to_re ( '' )
prefix_re = self . _glob_to_re ( prefix ) [ : - len ( empty_pattern ) ]
else :
prefix_re = self . _glob_to_re ( prefix )
assert prefix_re . startswith ( start ) and prefix_re . endswith ( end )
prefix_re = prefix_re [ len ( start ) : len ( prefix_re ) - len ( end ) ]
sep = os . sep
if os . sep == '\\' :
sep = r'\\'
if _PYTHON_VERSION <= ( 3 , 2 ) :
pattern_re = '^' + base + sep . join ( ( prefix_re , '.*' + pattern_re ) )
else :
pattern_re = pattern_re [ len ( start ) : len ( pattern_re ) - len ( end ) ]
pattern_re = r'%s%s%s%s.*%s%s' % ( start , base , prefix_re , sep , pattern_re , end )
else : # no prefix - - respect anchor flag
if anchor :
if _PYTHON_VERSION <= ( 3 , 2 ) :
pattern_re = '^' + base + pattern_re
else :
pattern_re = r'%s%s%s' % ( start , base , pattern_re [ len ( start ) : ] )
return re . compile ( pattern_re ) |
def tagged ( * tags : Tags ) -> Callable :
global GREENSIM_TAG_ATTRIBUTE
"""Decorator for adding a label to the process .
These labels are applied to any child Processes produced by event""" | def hook ( event : Callable ) :
def wrapper ( * args , ** kwargs ) :
event ( * args , ** kwargs )
setattr ( wrapper , GREENSIM_TAG_ATTRIBUTE , tags )
return wrapper
return hook |
def printImportedNames ( self ) :
"""Produce a report of imported names .""" | for module in self . listModules ( ) :
print ( "%s:" % module . modname )
print ( " %s" % "\n " . join ( imp . name for imp in module . imported_names ) ) |
def parse_match ( match ) :
"""Accept an re match object resulting from an ` ` UPLOAD _ RE ` ` match
and return a two - tuple where the first element is the
corresponding ` ` FileUpload ` ` and the second is a dictionary of the
key = value options .
If there is no ` ` FileUpload ` ` object corresponding to the match ,
the first element of the returned tuple is None .""" | try :
upload = FileUpload . objects . get ( slug = match . group ( 1 ) )
except FileUpload . DoesNotExist :
upload = None
options = parse_options ( match . group ( 2 ) )
return ( upload , options ) |
def merge_requests ( self , ** kwargs ) :
"""List the merge requests related to this milestone .
Args :
all ( bool ) : If True , return all the items , without pagination
per _ page ( int ) : Number of items to retrieve per request
page ( int ) : ID of the page to return ( starts with page 1)
as _ list ( bool ) : If set to False and no pagination option is
defined , return a generator instead of a list
* * kwargs : Extra options to send to the server ( e . g . sudo )
Raises :
GitlabAuthenticationError : If authentication is not correct
GitlabListError : If the list could not be retrieved
Returns :
RESTObjectList : The list of merge requests""" | path = '%s/%s/merge_requests' % ( self . manager . path , self . get_id ( ) )
data_list = self . manager . gitlab . http_list ( path , as_list = False , ** kwargs )
manager = ProjectMergeRequestManager ( self . manager . gitlab , parent = self . manager . _parent )
# FIXME ( gpocentek ) : the computed manager path is not correct
return RESTObjectList ( manager , ProjectMergeRequest , data_list ) |
def add ( self , addon , dev = False , interactive = True ) :
"""Add a new dependency and install it .""" | dependencies = self . get_dependency_manager ( dev = dev )
other_dependencies = self . get_dependency_manager ( dev = not dev )
existing = dependencies . get ( addon )
self . stdout . write ( style . format_command ( 'Adding' , addon ) )
dependencies . add ( addon )
try : # try running the build
self . build ( )
self . refresh ( )
# remove version of this in other requirements file
other_dependencies . remove ( addon , warn = False )
# run new addon constructor
constructor_name = '%s.init' % Dependency ( addon ) . module_name
constructor = self . blueprints . get ( constructor_name )
if constructor :
context = constructor . load_context ( ) . main ( [ ] , standalone_mode = False )
self . generate ( constructor , context , interactive = interactive )
except Exception as e : # restore original settings
self . stdout . write ( style . red ( str ( e ) ) )
self . stdout . write ( style . yellow ( 'Could not find %s' % addon ) )
dependencies . remove ( addon )
if existing :
dependencies . add ( existing )
return |
def ID_from_data ( self , ID_field = '$SRC' ) :
'''Returns the well ID from the src keyword in the FCS file . ( e . g . , A2)
This keyword may not appear in FCS files generated by other machines ,
in which case this function will raise an exception .''' | try :
return self . get_meta_fields ( ID_field ) [ ID_field ]
except KeyError :
msg = "The keyword '{}' does not exist in the following FCS file: {}"
msg = msg . format ( ID_field , self . datafile )
raise Exception ( msg ) |
def process ( self , key , val ) :
"""Try to look for ` key ` in all required and optional fields . If found ,
set the ` val ` .""" | for field in self . fields :
if field . check ( key , val ) :
return
for field in self . optional :
if field . check ( key , val ) :
return |
def winning_abbr ( self ) :
"""Returns a ` ` string ` ` of the winning team ' s abbreviation , such as
' ALABAMA '
for the Alabama Crimson Tide .""" | if self . winner == HOME :
if 'cfb/schools' not in str ( self . _home_name ) :
return self . _home_name . text ( )
return utils . _parse_abbreviation ( self . _home_name )
if 'cfb/schools' not in str ( self . _away_name ) :
return self . _away_name . text ( )
return utils . _parse_abbreviation ( self . _away_name ) |
def extract_numeric_values_from_string ( str_contains_values ) : # type : ( AnyStr ) - > Optional [ List [ Union [ int , float ] ] ]
"""Find numeric values from string , e . g . , 1 , . 7 , 1.2 , 4e2 , 3e - 3 , - 9 , etc .
Reference : ` how - to - extract - a - floating - number - from - a - string - in - python ` _
Examples :
> > > input _ str = ' . 1 . 12 9.1 98.1 1 . 12 . 1 12'
> > > StringClass . extract _ numeric _ values _ from _ string ( input _ str )
[0.1 , 0.12 , 9.1 , 98.1 , 1 , 12 , 1 , 12]
> > > input _ str = ' - 1 + 1 2e9 + 2E + 09 - 2e - 9'
> > > StringClass . extract _ numeric _ values _ from _ string ( input _ str )
[ - 1 , 1 , 200000 , 200000 , - 2e - 09]
> > > input _ str = ' current level : - 2.03e + 2db '
> > > StringClass . extract _ numeric _ values _ from _ string ( input _ str )
[ - 203]
Args :
str _ contains _ values : string which may contains numeric values
Returns :
list of numeric values
. . _ how - to - extract - a - floating - number - from - a - string - in - python :
https : / / stackoverflow . com / questions / 4703390 / how - to - extract - a - floating - number - from - a - string - in - python / 4703508#4703508""" | numeric_const_pattern = r'[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[Ee][+-]?\d+)?'
rx = re . compile ( numeric_const_pattern , re . VERBOSE )
value_strs = rx . findall ( str_contains_values )
if len ( value_strs ) == 0 :
return None
else :
return [ int ( float ( v ) ) if float ( v ) % 1. == 0 else float ( v ) for v in value_strs ] |
def get_callable_fq_for_code ( code , locals_dict = None ) :
"""Determines the function belonging to a given code object in a fully qualified fashion .
Returns a tuple consisting of
- the callable
- a list of classes and inner classes , locating the callable ( like a fully qualified name )
- a boolean indicating whether the callable is a method""" | if code in _code_callable_dict :
res = _code_callable_dict [ code ]
if not res [ 0 ] is None or locals_dict is None :
return res
md = getmodule ( code )
if not md is None :
nesting = [ ]
res , slf = _get_callable_fq_for_code ( code , md , md , False , nesting , set ( ) )
if res is None and not locals_dict is None :
nesting = [ ]
res , slf = _get_callable_from_locals ( code , locals_dict , md , False , nesting )
else :
_code_callable_dict [ code ] = ( res , nesting , slf )
return res , nesting , slf
else :
return None , None , None |
def recommend_get ( self , adgroup_id , ** kwargs ) :
'''xxxxx . xxxxx . keywords . recommend . get
取得一个推广组的推荐关键词列表''' | request = TOPRequest ( 'xxxxx.xxxxx.keywords.recommend.get' )
request [ 'adgroup_id' ] = adgroup_id
for k , v in kwargs . iteritems ( ) :
if k not in ( 'nick' , 'order_by' , 'search' , 'pertinence' , 'page_size' , 'page_no' ) and v == None :
continue
request [ k ] = v
self . create ( self . execute ( request ) , models = { 'result' : RecommendWordPage } )
return self . result |
def addElement ( self , etype = 'hex8' , corners = [ - 1.0 , - 1.0 , - 1.0 , 1. , - 1.0 , - 1.0 , 1.0 , 1.0 , - 1.0 , - 1.0 , 1.0 , - 1.0 , - 1.0 , - 1.0 , 1.0 , 1.0 , - 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , - 1.0 , 1.0 , 1.0 ] , name = 'new_elem' ) :
'''corners - list of nodal coordinates properly ordered for element type ( counter clockwise )''' | lastelm = self . elements [ - 1 ] [ 1 ]
lastnode = self . nodes [ - 1 ] [ 0 ]
elm = [ etype , lastelm + 1 ]
for i in range ( old_div ( len ( corners ) , 3 ) ) :
elm . append ( lastnode + 1 + i )
self . elements . append ( elm )
self . elsets [ 'e' + name ] = { }
self . elsets [ 'e' + name ] [ int ( elm [ 1 ] ) ] = True
cnt = 1
self . nsets [ 'n' + name ] = [ ]
for i in range ( 0 , len ( corners ) , 3 ) :
self . nodes . append ( [ lastnode + cnt , corners [ i ] , corners [ i + 1 ] , corners [ i + 2 ] ] )
self . nsets [ 'n' + name ] . append ( lastnode + cnt )
cnt += 1
# if this is a quad4 or tri3 element make a surface set
if etype == 'quad4' or etype == 'tri3' :
self . fsets [ 'f' + name ] = [ [ etype , MeshDef . facetID , lastnode + 1 , lastnode + 2 , lastnode + 3 , lastnode + 4 ] ]
MeshDef . facetID += 1 |
def _print_value ( self ) :
"""Generates the table values .""" | for line in range ( self . Lines_num ) :
for col , length in zip ( self . Table , self . AttributesLength ) :
vals = list ( col . values ( ) ) [ 0 ]
val = vals [ line ] if len ( vals ) != 0 and line < len ( vals ) else ''
self . StrTable += "| "
self . StrTable += self . _pad_string ( val , length * 2 )
self . StrTable += "|" + '\n'
self . _print_divide ( ) |
def join_room ( self , room_name ) :
"""Connects to a given room
If it does not exist it is created""" | logging . debug ( 'Joining room {ro}' . format ( ro = room_name ) )
for room in self . rooms :
if room . name == room_name :
room . add_user ( self )
self . _rooms [ room_name ] = room
room . welcome ( self )
break
else :
room = Room ( room_name )
self . rooms . append ( room )
self . _rooms [ room_name ] = room
room . add_user ( self ) |
def add_line ( self , window , text , row = None , col = None , attr = None ) :
"""Unicode aware version of curses ' s built - in addnstr method .
Safely draws a line of text on the window starting at position
( row , col ) . Checks the boundaries of the window and cuts off the text
if it exceeds the length of the window .""" | # The following arg combos must be supported to conform with addnstr
# ( window , text )
# ( window , text , attr )
# ( window , text , row , col )
# ( window , text , row , col , attr )
cursor_row , cursor_col = window . getyx ( )
row = row if row is not None else cursor_row
col = col if col is not None else cursor_col
max_rows , max_cols = window . getmaxyx ( )
n_cols = max_cols - col - 1
if n_cols <= 0 : # Trying to draw outside of the screen bounds
return
try :
text = self . clean ( text , n_cols )
params = [ ] if attr is None else [ attr ]
window . addstr ( row , col , text , * params )
except ( curses . error , ValueError , TypeError ) as e : # Curses handling of strings with invalid null bytes ( b ' \ 00 ' )
# python 2 : TypeError : " int , int , str "
# python 3 : ValueError : " embedded null byte "
_logger . warning ( 'add_line raised an exception' )
_logger . exception ( str ( e ) ) |
def get_system_config_dir ( ) :
"""Returns system config location . E . g . / etc / dvc . conf .
Returns :
str : path to the system config directory .""" | from appdirs import site_config_dir
return site_config_dir ( appname = Config . APPNAME , appauthor = Config . APPAUTHOR ) |
def anchor ( parser , token ) :
"""Parses a tag that ' s supposed to be in this format : { % anchor field title % }""" | bits = [ b . strip ( '"\'' ) for b in token . split_contents ( ) ]
if len ( bits ) < 2 :
raise TemplateSyntaxError , "anchor tag takes at least 1 argument"
try :
title = bits [ 2 ]
except IndexError :
title = bits [ 1 ] . capitalize ( )
return SortAnchorNode ( bits [ 1 ] . strip ( ) , title . strip ( ) ) |
def build_job_configs ( self , args ) :
"""Hook to build job configurations""" | job_configs = { }
ttype = args [ 'ttype' ]
( sim_targets_yaml , sim ) = NAME_FACTORY . resolve_targetfile ( args )
targets = load_yaml ( sim_targets_yaml )
base_config = dict ( ttype = ttype , roi_baseline = args [ 'roi_baseline' ] , extracopy = args [ 'extracopy' ] , sim = sim )
for target_name in targets . keys ( ) :
targetdir = NAME_FACTORY . sim_targetdir ( target_type = ttype , target_name = target_name , sim_name = sim )
logfile = os . path . join ( targetdir , 'copy_base_dir.log' )
job_config = base_config . copy ( )
job_config . update ( dict ( target = target_name , logfile = logfile ) )
job_configs [ target_name ] = job_config
return job_configs |
def get_meta_attribute ( self , param ) :
"""Retrieves django - meta attributes from apphook config instance
: param param : django - meta attribute passed as key""" | return self . _get_meta_value ( param , getattr ( self . app_config , param ) ) or '' |
def buffered_read ( fh , lock , offsets , bytecounts , buffersize = None ) :
"""Return iterator over segments read from file .""" | if buffersize is None :
buffersize = 2 ** 26
length = len ( offsets )
i = 0
while i < length :
data = [ ]
with lock :
size = 0
while size < buffersize and i < length :
fh . seek ( offsets [ i ] )
bytecount = bytecounts [ i ]
data . append ( fh . read ( bytecount ) )
# buffer = bytearray ( bytecount )
# n = fh . readinto ( buffer )
# data . append ( buffer [ : n ] )
size += bytecount
i += 1
for segment in data :
yield segment |
def _apply_cached_indexes ( self , cached_indexes , persist = False ) :
"""Reassign various resampler index attributes .""" | # cacheable _ dict = { }
for elt in [ 'valid_input_index' , 'valid_output_index' , 'index_array' , 'distance_array' ] :
val = cached_indexes [ elt ]
if isinstance ( val , tuple ) :
val = cached_indexes [ elt ] [ 0 ]
elif isinstance ( val , np . ndarray ) :
val = da . from_array ( val , chunks = CHUNK_SIZE )
elif persist and isinstance ( val , da . Array ) :
cached_indexes [ elt ] = val = val . persist ( )
setattr ( self . resampler , elt , val ) |
def step ( self , actions ) :
"""Takes a step in all environments .
Subclasses should override _ step to do the actual reset if something other
than the default implementation is desired .
Args :
actions : Batch of actions .
Returns :
( preprocessed _ observations , processed _ rewards , dones , infos ) .""" | observations , raw_rewards , dones , infos = self . _step ( actions )
# Process rewards .
raw_rewards = raw_rewards . astype ( np . float32 )
processed_rewards = self . process_rewards ( raw_rewards )
# Process observations .
processed_observations = self . process_observations ( observations )
# Record history .
self . trajectories . step ( processed_observations , raw_rewards , processed_rewards , dones , actions )
return processed_observations , processed_rewards , dones , infos |
def add_property_orders ( query_proto , * orders ) :
"""Add ordering constraint for the given datastore . Query proto message .
Args :
query _ proto : datastore . Query proto message .
orders : list of propertype name string , default to ascending
order and set descending if prefixed by ' - ' .
Usage :
> > > add _ property _ orders ( query _ proto , ' foo ' ) # sort by foo asc
> > > add _ property _ orders ( query _ proto , ' - bar ' ) # sort by bar desc""" | for order in orders :
proto = query_proto . order . add ( )
if order [ 0 ] == '-' :
order = order [ 1 : ]
proto . direction = query_pb2 . PropertyOrder . DESCENDING
else :
proto . direction = query_pb2 . PropertyOrder . ASCENDING
proto . property . name = order |
def delete ( path , regex = None , recurse = False , test = False ) :
"""Deletes the file or directory at ` path ` . If ` path ` is a directory and
` regex ` is provided , matching files will be deleted ; ` recurse ` controls
whether subdirectories are recursed . A list of deleted items is returned .
If ` test ` is true , nothing will be deleted and a list of items that would
have been deleted is returned .""" | deleted = [ ]
if op . isfile ( path ) :
if not test :
os . remove ( path )
else :
return [ path ]
return [ ] if op . exists ( path ) else [ path ]
elif op . isdir ( path ) :
if regex :
for r , ds , fs in os . walk ( path ) :
for i in fs :
if _is_match ( regex , i ) :
deleted += delete ( op . join ( r , i ) , test = test )
if not recurse :
break
else :
if not test :
shutil . rmtree ( path )
else :
return [ path ]
return [ ] if op . exists ( path ) else [ path ]
return deleted |
def detect_HouseDetector ( dat_orig , s_freq , time , opts ) :
"""House arousal detection .
Parameters
dat _ orig : ndarray ( dtype = ' float ' )
vector with the data for one channel
s _ freq : float
sampling frequency
time : ndarray ( dtype = ' float ' )
vector with the time points for each sample
opts : instance of ' DetectSlowWave '
' duration ' : tuple of float
min and max duration of arousal
Returns
list of dict
list of detected arousals
float
arousal density , per 30 - s epoch""" | nperseg = int ( opts . spectrogram [ 'dur' ] * s_freq )
overlap = opts . spectrogram [ 'overlap' ]
noverlap = int ( overlap * nperseg )
detrend = opts . spectrogram [ 'detrend' ]
min_interval = int ( opts . min_interval * s_freq )
sf , t , dat_det = spectrogram ( dat_orig , fs = s_freq , nperseg = nperseg , noverlap = noverlap , detrend = detrend )
freq1 = opts . freq_band1
freq2 = opts . freq_band2
f0 = asarray ( [ abs ( freq1 [ 0 ] - x ) for x in sf ] ) . argmin ( ) if freq1 [ 0 ] else None
f1 = asarray ( [ abs ( freq1 [ 1 ] - x ) for x in sf ] ) . argmin ( ) if freq1 [ 1 ] else None
f2 = asarray ( [ abs ( freq2 [ 1 ] - x ) for x in sf ] ) . argmin ( ) if freq2 [ 1 ] else None
f3 = asarray ( [ abs ( freq2 [ 1 ] - x ) for x in sf ] ) . argmin ( ) if freq2 [ 1 ] else None
dat_eq1 = zeros ( dat_det . shape [ 1 ] )
dat_eq2 = zeros ( dat_det . shape [ 1 ] )
for i in range ( dat_det . shape [ 1 ] ) :
dat_eq1 [ i ] = splitpoint ( dat_det [ f0 : f1 , i ] , sf [ f0 : f1 ] )
dat_eq2 [ i ] = splitpoint ( dat_det [ f2 : f3 , i ] , sf [ f2 : f3 ] )
dat_acc = dat_eq1 [ 1 : ] / dat_eq1 [ : - 1 ]
starts = dat_acc >= opts . det_thresh
print ( f'starts: {sum(starts)}' )
print ( f'1.01: {sum(dat_acc >= 1.01)}' )
print ( f'1.02: {sum(dat_acc >= 1.02)}' )
print ( f'1.05: {sum(dat_acc >= 1.05)}' )
print ( f'1.1: {sum(dat_acc >= 1.1)}' )
print ( f'1.2: {sum(dat_acc >= 1.2)}' )
print ( f'1.3: {sum(dat_acc >= 1.3)}' )
print ( f'1.4: {sum(dat_acc >= 1.4)}' )
print ( f'1.5: {sum(dat_acc >= 1.5)}' )
print ( f'1.75: {sum(dat_acc >= 1.75)}' )
print ( f'2: {sum(dat_acc >= 2)}' )
print ( f'2.5: {sum(dat_acc >= 2.5)}' )
print ( f'3: {sum(dat_acc >= 3)}' )
print ( f'5: {sum(dat_acc >= 5)}' )
print ( f'10: {sum(dat_acc >= 10)}' )
if starts . any ( ) :
new_starts = asarray ( zeros ( len ( starts ) ) , dtype = bool )
ends = asarray ( zeros ( len ( starts ) - 1 ) , dtype = bool )
iter_len = len ( starts ) - 2
i = 0
while i <= iter_len :
if starts [ i ] :
for j , k in enumerate ( dat_eq2 [ i + 2 : - 1 ] ) :
if k < dat_eq2 [ i ] * opts . det_thresh_end :
new_starts [ i ] = True
ends [ i + j + 1 ] = True
break
i += j + min_interval
else :
i += 1
if sum ( new_starts ) > sum ( ends ) : # a start without an end
ends [ - 1 ] = True
events = vstack ( ( where ( new_starts == True ) [ 0 ] + 1 , where ( ends == True ) [ 0 ] + 2 ) ) . T
if overlap :
events = events - int ( 1 / 2 / overlap )
# from win centre to win start
events = events * ( nperseg - noverlap )
# upsample
print ( f'n_events before dur = {events.shape}' )
events = within_duration ( events , time , opts . duration )
print ( f'n_events after dur = {events.shape}' )
events = remove_straddlers ( events , time , s_freq )
print ( f'n_events after strad = {events.shape}' )
ar_in_chan = make_arousals ( events , time , s_freq )
else :
lg . info ( 'No arousals found' )
ar_in_chan = [ ]
return ar_in_chan |
def get_by_ip_hostname ( self , ip_hostname ) :
"""Retrieve a storage system by its IP .
Works only with API version < = 300.
Args :
ip _ hostname : Storage system IP or hostname .
Returns :
dict""" | resources = self . _client . get_all ( )
resources_filtered = [ x for x in resources if x [ 'credentials' ] [ 'ip_hostname' ] == ip_hostname ]
if resources_filtered :
return resources_filtered [ 0 ]
else :
return None |
def cmdloop ( self ) :
"""Start the main loop of the interactive shell .
The preloop ( ) and postloop ( ) methods are always run before and after the
main loop , respectively .
Returns :
' root ' : Inform the parent shell to to keep exiting until the root
shell is reached .
' all ' : Exit all the way back the the command line shell .
False , None , or anything that are evaluated as False : Exit this
shell , enter the parent shell .
An integer : The depth of the shell to exit to . 0 = root shell .
History :
_ ShellBase histories are persistently saved to files , whose name matches
the prompt string . For example , if the prompt of a subshell is
' ( Foo - Bar - Kar ) $ ' , the name of its history file is s - Foo - Bar - Kar .
The history _ fname property encodes this algorithm .
All history files are saved to the the directory whose path is
self . _ temp _ dir . Subshells use the same temp _ dir as their parent
shells , thus their root shell .
The history of the parent shell is saved and restored by the parent
shell , as in launch _ subshell ( ) . The history of the subshell is saved
and restored by the subshell , as in cmdloop ( ) .
When a subshell is started , i . e . , when the cmdloop ( ) method of the
subshell is called , the subshell will try to load its own history
file , whose file name is determined by the naming convention
introduced earlier .
Completer Delimiters :
Certain characters such as ' - ' could be part of a command . But by
default they are considered the delimiters by the readline library ,
which causes completion candidates with those characters to
malfunction .
The old completer delimiters are saved before the loop and restored
after the loop ends . This is to keep the environment clean .""" | self . print_debug ( "Enter subshell '{}'" . format ( self . prompt ) )
# Save the completer function , the history buffer , and the
# completer _ delims .
old_completer = readline . get_completer ( )
old_delims = readline . get_completer_delims ( )
new_delims = '' . join ( list ( set ( old_delims ) - set ( _ShellBase . _non_delims ) ) )
readline . set_completer_delims ( new_delims )
# Load the new completer function and start a new history buffer .
readline . set_completer ( self . __driver_stub )
readline . clear_history ( )
if os . path . isfile ( self . history_fname ) :
readline . read_history_file ( self . history_fname )
# main loop
try : # The exit _ directive :
# True Leave this shell , enter the parent shell .
# False Continue with the loop .
# ' root ' Exit to the root shell .
# ' all ' Exit to the command line .
# an integer The depth of the shell to exit to . 0 = root
# shell . Negative number is taken as error .
self . preloop ( )
while True :
exit_directive = False
try :
if self . batch_mode :
line = self . _pipe_end . recv ( )
else :
line = input ( self . prompt ) . strip ( )
except EOFError :
line = _ShellBase . EOF
try :
exit_directive = self . __exec_line__ ( line )
except :
self . stderr . write ( traceback . format_exc ( ) )
if type ( exit_directive ) is int :
if len ( self . _mode_stack ) > exit_directive :
break
if len ( self . _mode_stack ) == exit_directive :
continue
if self . _mode_stack and exit_directive == 'root' :
break
if exit_directive in { 'all' , True , } :
break
finally :
self . postloop ( )
# Restore the completer function , save the history , and restore old
# delims .
readline . set_completer ( old_completer )
readline . write_history_file ( self . history_fname )
readline . set_completer_delims ( old_delims )
self . print_debug ( "Leave subshell '{}': {}" . format ( self . prompt , exit_directive ) )
return exit_directive |
def _as_rescale ( self , get , targetbitdepth ) :
"""Helper used by : meth : ` asRGB8 ` and : meth : ` asRGBA8 ` .""" | width , height , pixels , meta = get ( )
maxval = 2 ** meta [ 'bitdepth' ] - 1
targetmaxval = 2 ** targetbitdepth - 1
factor = float ( targetmaxval ) / float ( maxval )
meta [ 'bitdepth' ] = targetbitdepth
def iterscale ( rows ) :
for row in rows :
yield array ( 'BH' [ targetbitdepth > 8 ] , [ int ( round ( x * factor ) ) for x in row ] )
if maxval == targetmaxval :
return width , height , pixels , meta
else :
if 'transparent' in meta :
transparent = meta [ 'transparent' ]
if isinstance ( transparent , tuple ) :
transparent = tuple ( list ( iterscale ( ( transparent , ) ) ) [ 0 ] )
else :
transparent = tuple ( list ( iterscale ( ( ( transparent , ) , ) ) ) [ 0 ] ) [ 0 ]
meta [ 'transparent' ] = transparent
return width , height , iterscale ( pixels ) , meta |
def del_store ( source , store , saltenv = 'base' ) :
'''Delete the given cert into the given Certificate Store
source
The source certificate file this can be in the form
salt : / / path / to / file
store
The certificate store to delete the certificate from
saltenv
The salt environment to use this is ignored if the path
is local
CLI Example :
. . code - block : : bash
salt ' * ' certutil . del _ store salt : / / cert . cer TrustedPublisher''' | cert_file = __salt__ [ 'cp.cache_file' ] ( source , saltenv )
serial = get_cert_serial ( cert_file )
cmd = "certutil.exe -delstore {0} {1}" . format ( store , serial )
return __salt__ [ 'cmd.run' ] ( cmd ) |
def get_pourbaix_domains ( pourbaix_entries , limits = None ) :
"""Returns a set of pourbaix stable domains ( i . e . polygons ) in
pH - V space from a list of pourbaix _ entries
This function works by using scipy ' s HalfspaceIntersection
function to construct all of the 2 - D polygons that form the
boundaries of the planes corresponding to individual entry
gibbs free energies as a function of pH and V . Hyperplanes
of the form a * pH + b * V + 1 - g ( 0 , 0 ) are constructed and
supplied to HalfspaceIntersection , which then finds the
boundaries of each pourbaix region using the intersection
points .
Args :
pourbaix _ entries ( [ PourbaixEntry ] ) : Pourbaix entries
with which to construct stable pourbaix domains
limits ( [ [ float ] ] ) : limits in which to do the pourbaix
analysis
Returns :
Returns a dict of the form { entry : [ boundary _ points ] } .
The list of boundary points are the sides of the N - 1
dim polytope bounding the allowable ph - V range of each entry .""" | if limits is None :
limits = [ [ - 2 , 16 ] , [ - 4 , 4 ] ]
# Get hyperplanes
hyperplanes = [ np . array ( [ - PREFAC * entry . npH , - entry . nPhi , 0 , - entry . energy ] ) * entry . normalization_factor for entry in pourbaix_entries ]
hyperplanes = np . array ( hyperplanes )
hyperplanes [ : , 2 ] = 1
max_contribs = np . max ( np . abs ( hyperplanes ) , axis = 0 )
g_max = np . dot ( - max_contribs , [ limits [ 0 ] [ 1 ] , limits [ 1 ] [ 1 ] , 0 , 1 ] )
# Add border hyperplanes and generate HalfspaceIntersection
border_hyperplanes = [ [ - 1 , 0 , 0 , limits [ 0 ] [ 0 ] ] , [ 1 , 0 , 0 , - limits [ 0 ] [ 1 ] ] , [ 0 , - 1 , 0 , limits [ 1 ] [ 0 ] ] , [ 0 , 1 , 0 , - limits [ 1 ] [ 1 ] ] , [ 0 , 0 , - 1 , 2 * g_max ] ]
hs_hyperplanes = np . vstack ( [ hyperplanes , border_hyperplanes ] )
interior_point = np . average ( limits , axis = 1 ) . tolist ( ) + [ g_max ]
hs_int = HalfspaceIntersection ( hs_hyperplanes , np . array ( interior_point ) )
# organize the boundary points by entry
pourbaix_domains = { entry : [ ] for entry in pourbaix_entries }
for intersection , facet in zip ( hs_int . intersections , hs_int . dual_facets ) :
for v in facet :
if v < len ( pourbaix_entries ) :
this_entry = pourbaix_entries [ v ]
pourbaix_domains [ this_entry ] . append ( intersection )
# Remove entries with no pourbaix region
pourbaix_domains = { k : v for k , v in pourbaix_domains . items ( ) if v }
pourbaix_domain_vertices = { }
for entry , points in pourbaix_domains . items ( ) :
points = np . array ( points ) [ : , : 2 ]
# Initial sort to ensure consistency
points = points [ np . lexsort ( np . transpose ( points ) ) ]
center = np . average ( points , axis = 0 )
points_centered = points - center
# Sort points by cross product of centered points ,
# isn ' t strictly necessary but useful for plotting tools
point_comparator = lambda x , y : x [ 0 ] * y [ 1 ] - x [ 1 ] * y [ 0 ]
points_centered = sorted ( points_centered , key = cmp_to_key ( point_comparator ) )
points = points_centered + center
# Create simplices corresponding to pourbaix boundary
simplices = [ Simplex ( points [ indices ] ) for indices in ConvexHull ( points ) . simplices ]
pourbaix_domains [ entry ] = simplices
pourbaix_domain_vertices [ entry ] = points
return pourbaix_domains , pourbaix_domain_vertices |
def replace_namespaced_config_map ( self , name , namespace , body , ** kwargs ) : # noqa : E501
"""replace _ namespaced _ config _ map # noqa : E501
replace the specified ConfigMap # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . replace _ namespaced _ config _ map ( name , namespace , body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the ConfigMap ( required )
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param V1ConfigMap body : ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: return : V1ConfigMap
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . replace_namespaced_config_map_with_http_info ( name , namespace , body , ** kwargs )
# noqa : E501
else :
( data ) = self . replace_namespaced_config_map_with_http_info ( name , namespace , body , ** kwargs )
# noqa : E501
return data |
def add_key_path ( key_proto , * path_elements ) :
"""Add path elements to the given datastore . Key proto message .
Args :
key _ proto : datastore . Key proto message .
* path _ elements : list of ancestors to add to the key .
( kind1 , id1 / name1 , . . . , kindN , idN / nameN ) , the last 2 elements
represent the entity key , if no terminating id / name : they key
will be an incomplete key .
Raises :
TypeError : the given id or name has the wrong type .
Returns :
the same datastore . Key .
Usage :
> > > add _ key _ path ( key _ proto , ' Kind ' , ' name ' ) # no parent , with name
datastore . Key ( . . . )
> > > add _ key _ path ( key _ proto , ' Kind2 ' , 1 ) # no parent , with id
datastore . Key ( . . . )
> > > add _ key _ path ( key _ proto , ' Kind ' , ' name ' , ' Kind2 ' , 1 ) # parent , complete
datastore . Key ( . . . )
> > > add _ key _ path ( key _ proto , ' Kind ' , ' name ' , ' Kind2 ' ) # parent , incomplete
datastore . Key ( . . . )""" | for i in range ( 0 , len ( path_elements ) , 2 ) :
pair = path_elements [ i : i + 2 ]
elem = key_proto . path . add ( )
elem . kind = pair [ 0 ]
if len ( pair ) == 1 :
return
# incomplete key
id_or_name = pair [ 1 ]
if isinstance ( id_or_name , ( int , long ) ) :
elem . id = id_or_name
elif isinstance ( id_or_name , basestring ) :
elem . name = id_or_name
else :
raise TypeError ( 'Expected an integer id or string name as argument %d; ' 'received %r (a %s).' % ( i + 2 , id_or_name , type ( id_or_name ) ) )
return key_proto |
def build_pub_dates ( article , pub_dates ) :
"convert pub _ dates into ArticleDate objects and add them to article" | for pub_date in pub_dates : # always want a date type , take it from pub - type if must
if pub_date . get ( 'date-type' ) :
date_instance = ea . ArticleDate ( pub_date . get ( 'date-type' ) , pub_date . get ( 'date' ) )
elif pub_date . get ( 'pub-type' ) :
date_instance = ea . ArticleDate ( pub_date . get ( 'pub-type' ) , pub_date . get ( 'date' ) )
# Set more values
utils . set_attr_if_value ( date_instance , 'pub_type' , pub_date . get ( 'pub-type' ) )
utils . set_attr_if_value ( date_instance , 'publication_format' , pub_date . get ( 'publication-format' ) )
utils . set_attr_if_value ( date_instance , 'day' , pub_date . get ( 'day' ) )
utils . set_attr_if_value ( date_instance , 'month' , pub_date . get ( 'month' ) )
utils . set_attr_if_value ( date_instance , 'year' , pub_date . get ( 'year' ) )
article . add_date ( date_instance ) |
def terminate ( self , nodes = None ) :
"""Destroy one or many nodes .
: param nodes : Nodes to be destroyed .
: type nodes : ` ` list ` `
: return : List of nodes which failed to terminate .
: rtype : ` ` list ` `""" | if not self . is_connected ( ) :
return None
nodes = nodes or self . nodes
failed_kill = [ ]
result = self . gce . ex_destroy_multiple_nodes ( nodes , poll_interval = 1 , ignore_errors = False )
# Verify whether all instances have been terminated .
for i , success in enumerate ( result ) :
if success :
logging . info ( 'Successfully destroyed: %s' , nodes [ i ] . name )
else :
logging . error ( 'Failed to destroy: %s' , nodes [ i ] . name )
failed_kill . append ( nodes [ i ] )
return failed_kill |
def get_name ( self ) :
"""@ rtype : str
@ return : Module name , as used in labels .
@ warning : Names are B { NOT } guaranteed to be unique .
If you need unique identification for a loaded module ,
use the base address instead .
@ see : L { get _ label }""" | pathname = self . get_filename ( )
if pathname :
modName = self . __filename_to_modname ( pathname )
if isinstance ( modName , compat . unicode ) :
try :
modName = modName . encode ( 'cp1252' )
except UnicodeEncodeError :
e = sys . exc_info ( ) [ 1 ]
warnings . warn ( str ( e ) )
else :
modName = "0x%x" % self . get_base ( )
return modName |
def _get_message ( self , key , since = None ) :
"""Return the MdMessage object for the key .
The object is either returned from the cache in the store or
made , cached and then returned .
If ' since ' is passed in the modification time of the file is
checked and the message is only returned if the mtime is since
the specified time .
If the ' since ' check fails , None is returned .
' since ' must be seconds since epoch .""" | stored = self . store [ key ]
if isinstance ( stored , dict ) :
filename = stored [ "path" ]
folder = stored [ "folder" ]
if since and since > 0.0 :
st = stat ( filename )
if st . st_mtime < since :
return None
stored = MdMessage ( key , filename = filename , folder = folder , filesystem = folder . filesystem )
self . store [ key ] = stored
else :
if since and since > 0.0 :
st = stat ( stored . filename )
if st . st_mtime < since :
return None
return stored |
def match_tracks ( self , set_a , set_b , closest_matches = False ) :
"""Find the optimal set of matching assignments between set a and set b . This function supports optimal 1:1
matching using the Munkres method and matching from every object in set a to the closest object in set b .
In this situation set b accepts multiple matches from set a .
Args :
set _ a :
set _ b :
closest _ matches :
Returns :""" | costs = self . track_cost_matrix ( set_a , set_b ) * 100
min_row_costs = costs . min ( axis = 1 )
min_col_costs = costs . min ( axis = 0 )
good_rows = np . where ( min_row_costs < 100 ) [ 0 ]
good_cols = np . where ( min_col_costs < 100 ) [ 0 ]
assignments = [ ]
if len ( good_rows ) > 0 and len ( good_cols ) > 0 :
if closest_matches :
b_matches = costs [ np . meshgrid ( good_rows , good_cols , indexing = 'ij' ) ] . argmin ( axis = 1 )
a_matches = np . arange ( b_matches . size )
initial_assignments = [ ( good_rows [ a_matches [ x ] ] , good_cols [ b_matches [ x ] ] ) for x in range ( b_matches . size ) ]
else :
munk = Munkres ( )
initial_assignments = munk . compute ( costs [ np . meshgrid ( good_rows , good_cols , indexing = 'ij' ) ] . tolist ( ) )
initial_assignments = [ ( good_rows [ x [ 0 ] ] , good_cols [ x [ 1 ] ] ) for x in initial_assignments ]
for a in initial_assignments :
if costs [ a [ 0 ] , a [ 1 ] ] < 100 :
assignments . append ( a )
return assignments |
def process_agreement_events_publisher ( publisher_account , agreement_id , did , service_agreement , price , consumer_address , condition_ids ) :
"""Process the agreement events during the register of the service agreement for the publisher side
: param publisher _ account : Account instance of the publisher
: param agreement _ id : id of the agreement , hex str
: param did : DID , str
: param service _ agreement : ServiceAgreement instance
: param price : Asset price , int
: param consumer _ address : ethereum account address of consumer , hex str
: param condition _ ids : is a list of bytes32 content - addressed Condition IDs , bytes32
: return :""" | conditions_dict = service_agreement . condition_by_name
events_manager = EventsManager . get_instance ( Keeper . get_instance ( ) )
events_manager . watch_lock_reward_event ( agreement_id , access_secret_store_condition . fulfillAccessSecretStoreCondition , None , ( agreement_id , did , service_agreement , consumer_address , publisher_account ) , conditions_dict [ 'lockReward' ] . timeout )
events_manager . watch_access_event ( agreement_id , escrow_reward_condition . fulfillEscrowRewardCondition , None , ( agreement_id , service_agreement , price , consumer_address , publisher_account , condition_ids ) , conditions_dict [ 'accessSecretStore' ] . timeout )
events_manager . watch_reward_event ( agreement_id , verify_reward_condition . verifyRewardTokens , None , ( agreement_id , did , service_agreement , price , consumer_address , publisher_account ) , conditions_dict [ 'escrowReward' ] . timeout ) |
def child_task ( self ) :
'''child process - this holds all the GUI elements''' | self . parent_pipe_send . close ( )
self . parent_pipe_recv . close ( )
from MAVProxy . modules . lib import wx_processguard
from MAVProxy . modules . lib . wx_loader import wx
from MAVProxy . modules . lib . wxconsole_ui import ConsoleFrame
app = wx . App ( False )
app . frame = ConsoleFrame ( state = self , title = self . title )
app . frame . SetDoubleBuffered ( True )
app . frame . Show ( )
app . MainLoop ( ) |
def bld_rafter_deflection ( length = - 9 , force = - 9 , E_mod_elasticity = - 9 , I_moment_of_intertia = - 9 ) :
"""calculate rafter deflections - see test _ calc _ building _ design . py for
Sample values for equations below from Structures II course""" | if length == - 9 :
length = float ( input ( 'enter rafter length : ' ) )
if force == - 9 :
force = float ( input ( 'enter Force or weight applied to roof : ' ) )
if E_mod_elasticity == - 9 :
E_mod_elasticity = float ( input ( 'enter modulus of elasticity x10**5 (Steel beam example=2.1) : ' ) )
if I_moment_of_intertia == - 9 :
I_moment_of_intertia = float ( input ( 'enter members "moment of intertia x10**6" (for Steel beam 410UB53.7=188 ) :' ) )
res = { }
res [ 'max deflection - centre load' ] = ( 1 * force * ( length ** 3 ) ) / ( 48 * ( E_mod_elasticity * 10 ** 5 ) * ( I_moment_of_intertia * 10 ** 6 ) )
res [ 'max deflection - distrib load' ] = ( 5 * force * ( length ** 4 ) ) / ( 384 * ( E_mod_elasticity * 10 ** 5 ) * ( I_moment_of_intertia * 10 ** 6 ) )
pprint ( res )
return res |
def subscribe ( user_id , to_all = False , campaign_ids = None , on_error = None , on_success = None ) :
"""Resubscribe a user to some or all campaigns .
: param str | number user _ id : the id you use to identify a user . this should
be static for the lifetime of a user .
: param bool to _ all True to reubscribe to all campaigns . Take precedence over
campaigns IDs if both are given .
: param list of str campaign _ ids List of campaign IDs to resubscribe the user to .
: param func on _ error : An optional function to call in the event of an error .
on _ error callback should take 2 parameters : ` code ` and ` error ` . ` code ` will be
one of outbound . ERROR _ XXXXX . ` error ` will be the corresponding message .
: param func on _ success : An optional function to call if / when the API call succeeds .
on _ success callback takes no parameters .""" | __subscription ( user_id , unsubscribe = False , all_campaigns = to_all , campaign_ids = campaign_ids , on_error = on_error , on_success = on_success , ) |
def utf8 ( value ) :
"""Converts a string argument to a byte string .
If the argument is already a byte string or None , it is returned unchanged .
Otherwise it must be a unicode string and is encoded as utf8.""" | if isinstance ( value , _UTF8_TYPES ) :
return value
if not isinstance ( value , unicode_type ) :
raise TypeError ( "Expected bytes, unicode, or None; got %r" % type ( value ) )
return value . encode ( "utf-8" ) |
def create_signature ( key_dict , data ) :
"""< Purpose >
Return a signature dictionary of the form :
{ ' keyid ' : ' f30a0870d026980100c0573bd557394f8c1bbd6 . . . ' ,
' sig ' : ' . . . ' } .
The signing process will use the private key in
key _ dict [ ' keyval ' ] [ ' private ' ] and ' data ' to generate the signature .
The following signature schemes are supported :
' RSASSA - PSS '
RFC3447 - RSASSA - PSS
http : / / www . ietf . org / rfc / rfc3447.
' ed25519'
ed25519 - high - speed high security signatures
http : / / ed25519 . cr . yp . to /
Which signature to generate is determined by the key type of ' key _ dict '
and the available cryptography library specified in ' settings ' .
> > > ed25519 _ key = generate _ ed25519 _ key ( )
> > > data = ' The quick brown fox jumps over the lazy dog '
> > > signature = create _ signature ( ed25519 _ key , data )
> > > securesystemslib . formats . SIGNATURE _ SCHEMA . matches ( signature )
True
> > > len ( signature [ ' sig ' ] )
128
> > > rsa _ key = generate _ rsa _ key ( 2048)
> > > signature = create _ signature ( rsa _ key , data )
> > > securesystemslib . formats . SIGNATURE _ SCHEMA . matches ( signature )
True
> > > ecdsa _ key = generate _ ecdsa _ key ( )
> > > signature = create _ signature ( ecdsa _ key , data )
> > > securesystemslib . formats . SIGNATURE _ SCHEMA . matches ( signature )
True
< Arguments >
key _ dict :
A dictionary containing the keys . An example RSA key dict has the
form :
{ ' keytype ' : ' rsa ' ,
' scheme ' : ' rsassa - pss - sha256 ' ,
' keyid ' : ' f30a0870d026980100c0573bd557394f8c1bbd6 . . . ' ,
' keyval ' : { ' public ' : ' - - - - - BEGIN RSA PUBLIC KEY - - - - - . . . ' ,
' private ' : ' - - - - - BEGIN RSA PRIVATE KEY - - - - - . . . ' } }
The public and private keys are strings in PEM format .
data :
Data to be signed . This should be a bytes object ; data should be
encoded / serialized before it is passed here . The same value can be be
passed into securesystemslib . verify _ signature ( ) ( along with the public
key ) to later verify the signature .
< Exceptions >
securesystemslib . exceptions . FormatError , if ' key _ dict ' is improperly
formatted .
securesystemslib . exceptions . UnsupportedAlgorithmError , if ' key _ dict '
specifies an unsupported key type or signing scheme .
TypeError , if ' key _ dict ' contains an invalid keytype .
< Side Effects >
The cryptography library specified in ' settings ' is called to perform the
actual signing routine .
< Returns >
A signature dictionary conformant to
' securesystemslib _ format . SIGNATURE _ SCHEMA ' .""" | # Does ' key _ dict ' have the correct format ?
# This check will ensure ' key _ dict ' has the appropriate number of objects
# and object types , and that all dict keys are properly named .
# Raise ' securesystemslib . exceptions . FormatError ' if the check fails .
# The key type of ' key _ dict ' must be either ' rsa ' or ' ed25519 ' .
securesystemslib . formats . ANYKEY_SCHEMA . check_match ( key_dict )
# Signing the ' data ' object requires a private key . ' rsassa - pss - sha256 ' ,
# ' ed25519 ' , and ' ecdsa - sha2 - nistp256 ' are the only signing schemes currently
# supported . RSASSA - PSS keys and signatures can be generated and verified by
# pyca _ crypto _ keys . py , and Ed25519 keys by PyNaCl and PyCA ' s optimized , pure
# python implementation of Ed25519.
signature = { }
keytype = key_dict [ 'keytype' ]
scheme = key_dict [ 'scheme' ]
public = key_dict [ 'keyval' ] [ 'public' ]
private = key_dict [ 'keyval' ] [ 'private' ]
keyid = key_dict [ 'keyid' ]
sig = None
if keytype == 'rsa' :
if scheme == 'rsassa-pss-sha256' :
private = private . replace ( '\r\n' , '\n' )
sig , scheme = securesystemslib . pyca_crypto_keys . create_rsa_signature ( private , data , scheme )
else :
raise securesystemslib . exceptions . UnsupportedAlgorithmError ( 'Unsupported' ' RSA signature scheme specified: ' + repr ( scheme ) )
elif keytype == 'ed25519' :
public = binascii . unhexlify ( public . encode ( 'utf-8' ) )
private = binascii . unhexlify ( private . encode ( 'utf-8' ) )
sig , scheme = securesystemslib . ed25519_keys . create_signature ( public , private , data , scheme )
elif keytype == 'ecdsa-sha2-nistp256' :
sig , scheme = securesystemslib . ecdsa_keys . create_signature ( public , private , data , scheme )
# ' securesystemslib . formats . ANYKEY _ SCHEMA ' should have detected invalid key
# types . This is a defensive check against an invalid key type .
else : # pragma : no cover
raise TypeError ( 'Invalid key type.' )
# Build the signature dictionary to be returned .
# The hexadecimal representation of ' sig ' is stored in the signature .
signature [ 'keyid' ] = keyid
signature [ 'sig' ] = binascii . hexlify ( sig ) . decode ( )
return signature |
async def storm ( self , text , opts = None , user = None ) :
'''Evaluate a storm query and yield ( node , path ) tuples .
Yields :
( Node , Path ) tuples''' | if user is None :
user = self . auth . getUserByName ( 'root' )
await self . boss . promote ( 'storm' , user = user , info = { 'query' : text } )
async with await self . snap ( user = user ) as snap :
async for mesg in snap . storm ( text , opts = opts , user = user ) :
yield mesg |
def prob_lnm ( m1 , m2 , s1z , s2z , ** kwargs ) :
'''Return probability density for uniform in log
Parameters
m1 : array
Component masses 1
m2 : array
Component masses 2
s1z : array
Aligned spin 1 ( Not in use currently )
s2z :
Aligned spin 2 ( Not in use currently )
* * kwargs : string
Keyword arguments as model parameters
Returns
p _ m1 _ m2 : array
The probability density for m1 , m2 pair''' | min_mass = kwargs . get ( 'min_mass' , 5. )
max_mass = kwargs . get ( 'max_mass' , 95. )
max_mtotal = min_mass + max_mass
m1 , m2 = np . array ( m1 ) , np . array ( m2 )
C_lnm = integrate . quad ( lambda x : ( log ( max_mtotal - x ) - log ( min_mass ) ) / x , min_mass , max_mass ) [ 0 ]
xx = np . minimum ( m1 , m2 )
m1 = np . maximum ( m1 , m2 )
m2 = xx
bound = np . sign ( max_mtotal - m1 - m2 )
bound += np . sign ( max_mass - m1 ) * np . sign ( m2 - min_mass )
idx = np . where ( bound != 2 )
p_m1_m2 = ( 1 / C_lnm ) * ( 1. / m1 ) * ( 1. / m2 )
p_m1_m2 [ idx ] = 0
return p_m1_m2 |
def get_ast_dict ( belstr , component_type : str = "" ) :
"""Convert BEL string to AST dictionary
Args :
belstr : BEL string
component _ type : Empty string or ' subject ' or ' object ' to indicate that we
are parsing the subject or object field input""" | errors = [ ]
parsed = { }
bels = list ( belstr )
char_locs , errors = parse_chars ( bels , errors )
parsed , errors = parse_functions ( belstr , char_locs , parsed , errors )
parsed , errors = parse_args ( bels , char_locs , parsed , errors )
parsed , errors = arg_types ( parsed , errors )
parsed , errors = parse_relations ( belstr , char_locs , parsed , errors )
parsed , errors = parse_nested ( bels , char_locs , parsed , errors )
errors = parsed_top_level_errors ( parsed , errors )
ast , errors = parsed_to_ast ( parsed , errors , component_type = component_type )
return ast , errors |
def register ( linter ) :
'''Required method to auto register this checker''' | linter . register_checker ( ResourceLeakageChecker ( linter ) )
linter . register_checker ( BlacklistedImportsChecker ( linter ) )
linter . register_checker ( MovedTestCaseClassChecker ( linter ) )
linter . register_checker ( BlacklistedLoaderModulesUsageChecker ( linter ) )
linter . register_checker ( BlacklistedFunctionsChecker ( linter ) ) |
def timestamps ( self ) :
'''Get all timestamps from all series in the group .''' | timestamps = set ( )
for series in self . groups . itervalues ( ) :
timestamps |= set ( series . timestamps )
return sorted ( list ( timestamps ) ) |
def density ( self ) :
"""Gives emprical PDF , like np . histogram ( . . . . , density = True )""" | h = self . histogram . astype ( np . float )
bindifs = np . array ( np . diff ( self . bin_edges ) , float )
return h / ( bindifs * self . n ) |
def draw ( self ) :
"""Draws the Plot to screen .
If there is a continuous datatype for the nodes , it will be reflected
in self . sm being constructed ( in ` compute _ node _ colors ` ) . It will then
automatically add in a colorbar to the plot and scale the plot axes
accordingly .""" | self . draw_nodes ( )
self . draw_edges ( )
# note that self . groups only exists on condition
# that group _ label _ position was given !
if hasattr ( self , "groups" ) and self . groups :
self . draw_group_labels ( )
logging . debug ( "DRAW: {0}" . format ( self . sm ) )
if self . sm :
self . figure . subplots_adjust ( right = 0.8 )
cax = self . figure . add_axes ( [ 0.85 , 0.2 , 0.05 , 0.6 ] )
self . figure . colorbar ( self . sm , cax = cax )
self . ax . relim ( )
self . ax . autoscale_view ( )
self . ax . set_aspect ( "equal" ) |
def set_privkey_compressed ( privkey , compressed = True ) :
"""Make sure the private key given is compressed or not compressed""" | if len ( privkey ) != 64 and len ( privkey ) != 66 :
raise ValueError ( "expected 32-byte private key as a hex string" )
# compressed ?
if compressed and len ( privkey ) == 64 :
privkey += '01'
if not compressed and len ( privkey ) == 66 :
if privkey [ - 2 : ] != '01' :
raise ValueError ( "private key does not end in '01'" )
privkey = privkey [ : - 2 ]
return privkey |
def parse ( self , fileobj , name_hint = '' , parser = None ) :
"""Fill from a file - like object .""" | self . current_block = None
# Reset current block
parser = parser or Parser ( )
for line in parser . parse ( fileobj , name_hint = name_hint ) :
self . handle_line ( line ) |
def reset_namespace ( self , namespace = None , params = None ) :
"""Will delete and recreate specified namespace
args :
namespace ( str ) : Namespace to reset
params ( dict ) : params used to reset the namespace""" | namespace = pick ( namespace , self . namespace )
params = pick ( params , self . namespace_params )
log . warning ( " Reseting namespace '%s' at host: %s" , namespace , self . url )
try :
self . delete_namespace ( namespace )
except KeyError :
pass
self . create_namespace ( namespace , params ) |
def get_midi_data ( self ) :
"""Collect and return the raw , binary MIDI data from the tracks .""" | tracks = [ t . get_midi_data ( ) for t in self . tracks if t . track_data != '' ]
return self . header ( ) + '' . join ( tracks ) |
def is_connected ( self ) :
"""Returns * True * if the SMTP connection is initialized and
connected . Otherwise returns * False *""" | try :
self . _conn . noop ( )
except ( AttributeError , smtplib . SMTPServerDisconnected ) :
return False
else :
return True |
def add_source ( self , label , source_type , ** kwargs ) :
"""Add a source to the spec .
Sources should have a unique label . This will help tracing where your
configurations are coming from if you turn up the log - level .
The keyword arguments are significant . Different sources require
different keyword arguments . Required keys for each source _ type are
listed below , for a detailed list of all possible arguments , see the
individual source ' s documentation .
source _ type : dict
required keyword arguments :
- data - A dictionary
source _ type : environment
No required keyword arguments .
source _ type : etcd
required keyword arguments :
- client - A client from the python - etcd package .
source _ type : json
required keyword arguments :
- filename - A JSON file .
- data - A string representation of JSON
source _ type : kubernetes
required keyword arguments :
- client - A client from the kubernetes package
- name - The name of the ConfigMap to load
source _ type : yaml
required keyword arguments :
- filename - A YAML file .
Args :
label ( str ) : A label for the source .
source _ type ( str ) : A source type , available source types depend
on the packages installed . See ` ` yapconf . ALL _ SUPPORTED _ SOURCES ` `
for a complete list .""" | self . _sources [ label ] = get_source ( label , source_type , ** kwargs ) |
def timesince ( dt , default = 'just now' ) :
'''Returns string representing ' time since ' e . g .
3 days ago , 5 hours ago etc .
> > > now = datetime . datetime . now ( )
> > > timesince ( now )
' just now '
> > > timesince ( now - datetime . timedelta ( seconds = 1 ) )
'1 second ago '
> > > timesince ( now - datetime . timedelta ( seconds = 2 ) )
'2 seconds ago '
> > > timesince ( now - datetime . timedelta ( seconds = 60 ) )
'1 minute ago '
> > > timesince ( now - datetime . timedelta ( seconds = 61 ) )
'1 minute and 1 second ago '
> > > timesince ( now - datetime . timedelta ( seconds = 62 ) )
'1 minute and 2 seconds ago '
> > > timesince ( now - datetime . timedelta ( seconds = 120 ) )
'2 minutes ago '
> > > timesince ( now - datetime . timedelta ( seconds = 121 ) )
'2 minutes and 1 second ago '
> > > timesince ( now - datetime . timedelta ( seconds = 122 ) )
'2 minutes and 2 seconds ago '
> > > timesince ( now - datetime . timedelta ( seconds = 3599 ) )
'59 minutes and 59 seconds ago '
> > > timesince ( now - datetime . timedelta ( seconds = 3600 ) )
'1 hour ago '
> > > timesince ( now - datetime . timedelta ( seconds = 3601 ) )
'1 hour and 1 second ago '
> > > timesince ( now - datetime . timedelta ( seconds = 3602 ) )
'1 hour and 2 seconds ago '
> > > timesince ( now - datetime . timedelta ( seconds = 3660 ) )
'1 hour and 1 minute ago '
> > > timesince ( now - datetime . timedelta ( seconds = 3661 ) )
'1 hour and 1 minute ago '
> > > timesince ( now - datetime . timedelta ( seconds = 3720 ) )
'1 hour and 2 minutes ago '
> > > timesince ( now - datetime . timedelta ( seconds = 3721 ) )
'1 hour and 2 minutes ago '
> > > timesince ( datetime . timedelta ( seconds = 3721 ) )
'1 hour and 2 minutes ago ' ''' | if isinstance ( dt , datetime . timedelta ) :
diff = dt
else :
now = datetime . datetime . now ( )
diff = abs ( now - dt )
periods = ( ( diff . days / 365 , 'year' , 'years' ) , ( diff . days % 365 / 30 , 'month' , 'months' ) , ( diff . days % 30 / 7 , 'week' , 'weeks' ) , ( diff . days % 7 , 'day' , 'days' ) , ( diff . seconds / 3600 , 'hour' , 'hours' ) , ( diff . seconds % 3600 / 60 , 'minute' , 'minutes' ) , ( diff . seconds % 60 , 'second' , 'seconds' ) , )
output = [ ]
for period , singular , plural in periods :
if int ( period ) :
if int ( period ) == 1 :
output . append ( '%d %s' % ( period , singular ) )
else :
output . append ( '%d %s' % ( period , plural ) )
if output :
return '%s ago' % ' and ' . join ( output [ : 2 ] )
return default |
def generate_binary ( outputfname , format_ , progname = '' , binary_files = None , headless_binary_files = None ) :
"""Outputs the memory binary to the
output filename using one of the given
formats : tap , tzx or bin""" | global AUTORUN_ADDR
org , binary = MEMORY . dump ( )
if gl . has_errors :
return
if binary_files is None :
binary_files = [ ]
if headless_binary_files is None :
headless_binary_files = [ ]
bin_blocks = [ ]
for fname in binary_files :
with api . utils . open_file ( fname ) as f :
bin_blocks . append ( ( os . path . basename ( fname ) , f . read ( ) ) )
headless_bin_blocks = [ ]
for fname in headless_binary_files :
with api . utils . open_file ( fname ) as f :
headless_bin_blocks . append ( f . read ( ) )
if AUTORUN_ADDR is None :
AUTORUN_ADDR = org
if not progname :
progname = os . path . basename ( outputfname ) [ : 10 ]
if OPTIONS . use_loader . value :
import basic
# Minimalist basic tokenizer
program = basic . Basic ( )
if org > 16383 : # Only for zx48k : CLEAR if below 16383
program . add_line ( [ [ 'CLEAR' , org - 1 ] ] )
program . add_line ( [ [ 'LOAD' , '""' , program . token ( 'CODE' ) ] ] )
if OPTIONS . autorun . value :
program . add_line ( [ [ 'RANDOMIZE' , program . token ( 'USR' ) , AUTORUN_ADDR ] ] )
else :
program . add_line ( [ [ 'REM' ] , [ 'RANDOMIZE' , program . token ( 'USR' ) , AUTORUN_ADDR ] ] )
if format_ in ( 'tap' , 'tzx' ) :
t = { 'tap' : outfmt . TAP , 'tzx' : outfmt . TZX } [ format_ ] ( )
if OPTIONS . use_loader . value :
t . save_program ( 'loader' , program . bytes , line = 1 )
# Put line 0 to protect against MERGE
t . save_code ( progname , org , binary )
for name , block in bin_blocks :
t . save_code ( name , 0 , block )
for block in headless_bin_blocks :
t . standard_block ( block )
t . dump ( outputfname )
else :
with open ( outputfname , 'wb' ) as f :
f . write ( bytearray ( binary ) ) |
def _read_packet ( self ) :
"""Reads next TDS packet from the underlying transport
If timeout is happened during reading of packet ' s header will
cancel current request .
Can only be called when transport ' s read pointer is at the begining
of the packet .""" | try :
pos = 0
while pos < _header . size :
received = self . _transport . recv_into ( self . _bufview [ pos : _header . size - pos ] )
if received == 0 :
raise tds_base . ClosedConnectionError ( )
pos += received
except tds_base . TimeoutError :
self . _session . put_cancel ( )
raise
self . _pos = _header . size
self . _type , self . _status , self . _size , self . _session . _spid , _ = _header . unpack_from ( self . _bufview , 0 )
self . _have = pos
while pos < self . _size :
received = self . _transport . recv_into ( self . _bufview [ pos : ] , self . _size - pos )
if received == 0 :
raise tds_base . ClosedConnectionError ( )
pos += received
self . _have += received |
def pathpatch_2d_to_3d ( pathpatch , z = 0 , normal = 'z' ) :
"""Transforms a 2D Patch to a 3D patch using the given normal vector .
The patch is projected into they XY plane , rotated about the origin
and finally translated by z .""" | if type ( normal ) is str : # Translate strings to normal vectors
index = "xyz" . index ( normal )
normal = np . roll ( ( 1.0 , 0 , 0 ) , index )
normal /= np . linalg . norm ( normal )
# Make sure the vector is normalised
path = pathpatch . get_path ( )
# Get the path and the associated transform
trans = pathpatch . get_patch_transform ( )
path = trans . transform_path ( path )
# Apply the transform
pathpatch . __class__ = art3d . PathPatch3D
# Change the class
pathpatch . _code3d = path . codes
# Copy the codes
pathpatch . _facecolor3d = pathpatch . get_facecolor
# Get the face color
verts = path . vertices
# Get the vertices in 2D
d = np . cross ( normal , ( 0 , 0 , 1 ) )
# Obtain the rotation vector
M = rotation_matrix ( d )
# Get the rotation matrix
pathpatch . _segment3d = np . array ( [ np . dot ( M , ( x , y , 0 ) ) + ( 0 , 0 , z ) for x , y in verts ] )
return pathpatch |
def closest_points ( S ) :
"""Closest pair of points
: param S : list of points
: requires : size of S at least 2
: modifies : changes the order in S
: returns : pair of points p , q from S with minimum Euclidean distance
: complexity : expected linear time""" | shuffle ( S )
assert len ( S ) >= 2
p = S [ 0 ]
q = S [ 1 ]
d = dist ( p , q )
while d > 0 :
r = improve ( S , d )
if r :
d , p , q = r
else :
break
return p , q |
def render_iconchoicefield ( field , attrs ) :
"""Render a ChoiceField with icon support ; where the value is split by a pipe
( | ) : first element being the value , last element is the icon .""" | choices = ""
# Loop over every choice to manipulate
for choice in field . field . _choices :
value = choice [ 1 ] . split ( "|" )
# Value | Icon
# Each choice is formatted with the choice value being split with
# the " | " as the delimeter . First element is the value , the second
# is the icon to be used .
choices += format_html ( wrappers . ICON_CHOICE_TEMPLATE , choice [ 0 ] , # Key
mark_safe ( wrappers . ICON_TEMPLATE . format ( value [ - 1 ] ) ) , # Icon
value [ 0 ] # Value
)
# Render a dropdown field
return render_choicefield ( field , attrs , choices ) |
def distinct_seeds ( k ) :
"""returns k distinct seeds for random number generation""" | seeds = [ ]
for _ in range ( k ) :
while True :
s = random . randint ( 2 ** 32 - 1 )
if s not in seeds :
break
seeds . append ( s )
return seeds |
def verify_header_chain ( cls , path , chain = None ) :
"""Verify that a given chain of block headers
has sufficient proof of work .""" | if chain is None :
chain = SPVClient . load_header_chain ( path )
prev_header = chain [ 0 ]
for i in xrange ( 1 , len ( chain ) ) :
header = chain [ i ]
height = header . get ( 'block_height' )
prev_hash = prev_header . get ( 'hash' )
if prev_hash != header . get ( 'prev_block_hash' ) :
log . error ( "prev hash mismatch: %s vs %s" % ( prev_hash , header . get ( 'prev_block_hash' ) ) )
return False
bits , target = SPVClient . get_target ( path , height / BLOCK_DIFFICULTY_CHUNK_SIZE , chain )
if bits != header . get ( 'bits' ) :
log . error ( "bits mismatch: %s vs %s" % ( bits , header . get ( 'bits' ) ) )
return False
_hash = header . get ( 'hash' )
if int ( '0x' + _hash , 16 ) > target :
log . error ( "insufficient proof of work: %s vs target %s" % ( int ( '0x' + _hash , 16 ) , target ) )
return False
prev_header = header
return True |
def list_ext ( self , collection , path , retrieve_all , ** _params ) :
"""Client extension hook for list .""" | return self . list ( collection , path , retrieve_all , ** _params ) |
def vectorize_inhibit ( audio : np . ndarray ) -> np . ndarray :
"""Returns an array of inputs generated from the
wake word audio that shouldn ' t cause an activation""" | def samp ( x ) :
return int ( pr . sample_rate * x )
inputs = [ ]
for offset in range ( samp ( inhibit_t ) , samp ( inhibit_dist_t ) , samp ( inhibit_hop_t ) ) :
if len ( audio ) - offset < samp ( pr . buffer_t / 2. ) :
break
inputs . append ( vectorize ( audio [ : - offset ] ) )
return np . array ( inputs ) if inputs else np . empty ( ( 0 , pr . n_features , pr . feature_size ) ) |
def get_all_mfa_devices ( self , user_name , marker = None , max_items = None ) :
"""Get all MFA devices associated with an account .
: type user _ name : string
: param user _ name : The username of the user
: type marker : string
: param marker : Use this only when paginating results and only in
follow - up request after you ' ve received a response
where the results are truncated . Set this to the
value of the Marker element in the response you
just received .
: type max _ items : int
: param max _ items : Use this only when paginating results to indicate
the maximum number of groups you want in the
response .""" | params = { 'UserName' : user_name }
if marker :
params [ 'Marker' ] = marker
if max_items :
params [ 'MaxItems' ] = max_items
return self . get_response ( 'ListMFADevices' , params , list_marker = 'MFADevices' ) |
def save_plots ( self , directory , format = "png" , recommended_only = False ) :
"""Save images of dose - response curve - fits for each model .
Parameters
directory : str
Directory where the PNG files will be saved .
format : str , optional
Image output format . Valid options include : png , pdf , svg , ps , eps
recommended _ only : bool , optional
If True , only recommended models for each session are included . If
no model is recommended , then a row with it ' s ID will be included ,
but all fields will be null .
Returns
None""" | for i , session in enumerate ( self ) :
session . save_plots ( directory , prefix = str ( i ) , format = format , recommended_only = recommended_only ) |
def get_section_path ( section ) :
"""Return a list with keys to access the section from root
: param section : A Section
: type section : Section
: returns : list of strings in the order to access the given section from root
: raises : None""" | keys = [ ]
p = section
for i in range ( section . depth ) :
keys . insert ( 0 , p . name )
p = p . parent
return keys |
def version_option ( f ) :
"""Largely a custom clone of click . version _ option - - almost identical , but
prints our special output .""" | def callback ( ctx , param , value ) : # copied from click . decorators . version _ option
# no idea what resilient _ parsing means , but . . .
if not value or ctx . resilient_parsing :
return
print_version ( )
ctx . exit ( 0 )
return click . option ( "--version" , is_flag = True , expose_value = False , is_eager = True , callback = callback , cls = HiddenOption , ) ( f ) |
def download_url ( url , destination , retries = None , retry_delay = None , runner = None ) :
"""Download the given URL with wget to the provided path . The command is
run via Fabric on the current remote machine . Therefore , the destination
path should be for the remote machine .
: param str url : URL to download onto the remote machine
: param str destination : Path to download the URL to on the remote
machine
: param int retries : Max number of times to retry downloads after a failure
: param float retry _ delay : Number of seconds between download retries
: param FabRunner runner : Optional runner to use for executing commands .
: return : The results of the wget call""" | runner = runner if runner is not None else FabRunner ( )
return try_repeatedly ( lambda : runner . run ( "wget --quiet --output-document '{0}' '{1}'" . format ( destination , url ) ) , max_retries = retries , delay = retry_delay ) |
def delete_idx_status ( self , rdf_class ) :
"""Removes all of the index status triples from the datastore
Args :
rdf _ class : The class of items to remove the status from""" | sparql_template = """
DELETE
{{
?s kds:esIndexTime ?esTime .
?s kds:esIndexError ?esError .
}}
WHERE
{{
VALUES ?rdftypes {{\n\t\t{} }} .
?s a ?rdftypes .
OPTIONAL {{
?s kds:esIndexTime ?esTime
}}
OPTIONAL {{
?s kds:esIndexError ?esError
}}
FILTER(bound(?esTime)||bound(?esError))
}}
"""
rdf_types = [ rdf_class . uri ] + [ item . uri for item in rdf_class . subclasses ]
sparql = sparql_template . format ( "\n\t\t" . join ( rdf_types ) )
log . warn ( "Deleting index status for %s" , rdf_class . uri )
return self . tstore_conn . update_query ( sparql ) |
def decompose ( df , period = 365 , lo_frac = 0.6 , lo_delta = 0.01 ) :
"""Create a seasonal - trend ( with Loess , aka " STL " ) decomposition of observed time series data .
This implementation is modeled after the ` ` statsmodels . tsa . seasonal _ decompose ` ` method
but substitutes a Lowess regression for a convolution in its trend estimation .
This is an additive model , Y [ t ] = T [ t ] + S [ t ] + e [ t ]
For more details on lo _ frac and lo _ delta , see :
` statsmodels . nonparametric . smoothers _ lowess . lowess ( ) `
Args :
df ( pandas . Dataframe ) : Time series of observed counts . This DataFrame must be continuous ( no
gaps or missing data ) , and include a ` ` pandas . DatetimeIndex ` ` .
period ( int , optional ) : Most significant periodicity in the observed time series , in units of
1 observation . Ex : to accomodate strong annual periodicity within years of daily
observations , ` ` period = 365 ` ` .
lo _ frac ( float , optional ) : Fraction of data to use in fitting Lowess regression .
lo _ delta ( float , optional ) : Fractional distance within which to use linear - interpolation
instead of weighted regression . Using non - zero ` ` lo _ delta ` ` significantly decreases
computation time .
Returns :
` statsmodels . tsa . seasonal . DecomposeResult ` : An object with DataFrame attributes for the
seasonal , trend , and residual components , as well as the average seasonal cycle .""" | # use some existing pieces of statsmodels
lowess = sm . nonparametric . lowess
_pandas_wrapper , _ = _maybe_get_pandas_wrapper_freq ( df )
# get plain np array
observed = np . asanyarray ( df ) . squeeze ( )
# calc trend , remove from observation
trend = lowess ( observed , [ x for x in range ( len ( observed ) ) ] , frac = lo_frac , delta = lo_delta * len ( observed ) , return_sorted = False )
detrended = observed - trend
# period must not be larger than size of series to avoid introducing NaNs
period = min ( period , len ( observed ) )
# calc one - period seasonality , remove tiled array from detrended
period_averages = np . array ( [ pd_nanmean ( detrended [ i : : period ] ) for i in range ( period ) ] )
# 0 - center the period avgs
period_averages -= np . mean ( period_averages )
seasonal = np . tile ( period_averages , len ( observed ) // period + 1 ) [ : len ( observed ) ]
resid = detrended - seasonal
# convert the arrays back to appropriate dataframes , stuff them back into
# the statsmodel object
results = list ( map ( _pandas_wrapper , [ seasonal , trend , resid , observed ] ) )
dr = DecomposeResult ( seasonal = results [ 0 ] , trend = results [ 1 ] , resid = results [ 2 ] , observed = results [ 3 ] , period_averages = period_averages )
return dr |
def p_lconcatlist ( self , p ) :
'lconcatlist : lconcatlist COMMA lconcat _ one' | p [ 0 ] = p [ 1 ] + ( p [ 3 ] , )
p . set_lineno ( 0 , p . lineno ( 1 ) ) |
def exit_standby ( name , instance_ids , should_decrement_desired_capacity = False , region = None , key = None , keyid = None , profile = None ) :
'''Exit desired instances from StandBy mode
. . versionadded : : 2016.11.0
CLI example : :
salt - call boto _ asg . exit _ standby my _ autoscale _ group _ name ' [ " i - xxxxx " ] ' ''' | conn = _get_conn_autoscaling_boto3 ( region = region , key = key , keyid = keyid , profile = profile )
try :
response = conn . exit_standby ( InstanceIds = instance_ids , AutoScalingGroupName = name )
except ClientError as e :
err = __utils__ [ 'boto3.get_error' ] ( e )
if e . response . get ( 'Error' , { } ) . get ( 'Code' ) == 'ResourceNotFoundException' :
return { 'exists' : False }
return { 'error' : err }
return all ( activity [ 'StatusCode' ] != 'Failed' for activity in response [ 'Activities' ] ) |
def rsr ( self ) :
"""A getter for the relative spectral response ( rsr ) curve""" | arr = np . array ( [ self . wave . value , self . throughput ] ) . swapaxes ( 0 , 1 )
return arr |
def update_copyright ( path , year ) :
"""Update a file ' s copyright statement to include the given year""" | with open ( path , "r" ) as fobj :
text = fobj . read ( ) . rstrip ( )
match = COPYRIGHT_REGEX . search ( text )
x = match . start ( "years" )
y = match . end ( "years" )
if text [ y - 1 ] == " " : # don ' t strip trailing whitespace
y -= 1
yearstr = match . group ( "years" )
years = set ( _parse_years ( yearstr ) ) | { year }
with open ( path , "w" ) as fobj :
print ( text [ : x ] + _format_years ( years ) + text [ y : ] , file = fobj ) |
def state_size ( self ) :
"""Tuple of ` tf . TensorShape ` s indicating the size of state tensors .""" | hidden_size = tf . TensorShape ( self . _input_shape [ : - 1 ] + ( self . _output_channels , ) )
return ( hidden_size , hidden_size ) |
def get_all ( ) :
'''Return a list of all available services
CLI Example :
. . code - block : : bash
salt ' * ' service . get _ all''' | if not os . path . isdir ( _GRAINMAP . get ( __grains__ . get ( 'os' ) , '/etc/init.d' ) ) :
return [ ]
return sorted ( os . listdir ( _GRAINMAP . get ( __grains__ . get ( 'os' ) , '/etc/init.d' ) ) ) |
def _set_history ( self , v , load = False ) :
"""Setter method for history , mapped from YANG variable / cpu _ state / history ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ history is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ history ( ) directly .
YANG Description : CPU utilization histogram""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = history . history , is_container = 'container' , presence = False , yang_name = "history" , rest_name = "history" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'callpoint' : u'RAS-process-cpu-history' , u'cli-suppress-show-path' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-RAS-operational' , defining_module = 'brocade-RAS-operational' , yang_type = 'container' , is_config = False )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """history must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=history.history, is_container='container', presence=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'RAS-process-cpu-history', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-RAS-operational', defining_module='brocade-RAS-operational', yang_type='container', is_config=False)""" , } )
self . __history = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def collection ( self , path ) :
"""To return all items generated by get collection .""" | data = [ ]
for item in self . get_collection ( path ) :
data . append ( item )
return data |
def fetch ( self , query_string = "" , common_query_options = None , limit_since = False ) :
"""Backend for the actual gerrit query .
query _ string :
basic query terms , e . g . , ' status : abandoned '
common _ query _ options :
[ optional ] rest of the query string ; if omitted , the default
one is used ( limit by the current user and since option ) ;
if empty , nothing will be added to query _ string
limit _ since :
[ optional ] Boolean ( defaults to False ) post - process the results
to eliminate items created after since option .""" | work_list = [ ]
log . info ( u"Searching for changes by {0}" . format ( self . user ) )
log . debug ( 'query_string = {0}, common_query_options = {1}' . format ( query_string , common_query_options ) )
self . since_date = self . get_gerrit_date ( self . options . since )
if common_query_options is None : # Calculate age from self . options . since
# Amount of time that has expired since the change was last
# updated with a review comment or new patch set .
# Meaning that the last time we changed the review is
# GREATER than the given age .
# For age SMALLER we need - age : < time >
common_query_options = '+owner:{0}' . format ( self . user . login )
if not limit_since :
age = ( TODAY - self . since_date ) . days
common_query_options += '+-age:{0}d' . format ( age )
common_query_options += '+since:{0}+until:{1}' . format ( self . get_gerrit_date ( self . options . since ) , self . get_gerrit_date ( self . options . until ) )
if isinstance ( common_query_options , basestring ) and len ( common_query_options ) > 0 :
query_string += common_query_options
log . debug ( 'query_string = {0}' . format ( query_string ) )
log . debug ( 'self.prefix = {0}' . format ( self . prefix ) )
log . debug ( '[fetch] self.base_url = {0}' . format ( self . base_url ) )
work_list = self . repo . search ( query_string )
if limit_since :
tmplist = [ ]
log . debug ( 'Limiting by since option' )
self . stats = [ ]
for chg in work_list :
log . debug ( 'chg = {0}' . format ( chg ) )
chg_created = self . get_gerrit_date ( chg [ 'created' ] [ : 10 ] )
log . debug ( 'chg_created = {0}' . format ( chg_created ) )
if chg_created >= self . since_date :
tmplist . append ( chg )
work_list = tmplist [ : ]
log . debug ( u"work_list = {0}" . format ( work_list ) )
# Return the list of tick _ data objects
return [ Change ( ticket , prefix = self . prefix ) for ticket in work_list ] |
def fetch_options ( self , ) :
"""Set and return the options for possible files to
load , replace etc . The stored element will determine the options .
The refobjinterface and typinterface are responsible for providing the options
: returns : the options
: rtype : : class : ` jukeboxcore . gui . treemodel . TreeModel `
: raises : None""" | self . _options , self . _taskfileinfo_options = self . get_refobjinter ( ) . fetch_options ( self . get_typ ( ) , self . get_element ( ) )
return self . _options |
def _flatten_plus_safe ( tmp_dir , rollback_files ) :
"""Flatten names of files and create temporary file names .""" | tx_fpaths , orig_files = [ ] , [ ]
for fnames in rollback_files :
if isinstance ( fnames , six . string_types ) :
fnames = [ fnames ]
for fname in fnames :
tx_file = fname + '.tx'
tx_fpath = join ( tmp_dir , tx_file ) if tmp_dir else tx_file
tx_fpaths . append ( tx_fpath )
orig_files . append ( fname )
return tx_fpaths , orig_files |
def init ( script = sys . argv [ 0 ] , base = 'lib' , append = True , ignore = [ '/' , '/usr' ] , realpath = False , pythonpath = False , throw = False ) :
"""Parameters :
* ` script ` : Path to script file . Default is currently running script file
* ` base ` : Name of base module directory to add to sys . path . Default is " lib " .
* ` append ` : Append module directory to the end of sys . path , or insert at the beginning ? Default is to append .
* ` ignore ` : List of directories to ignore during the module search . Default is to ignore " / " and " / usr " .
* ` realpath ` : Should symlinks be resolved first ? Default is False .
* ` pythonpath ` : Should the modules directory be added to the PYTHONPATH environment variable ? Default is False .
* ` throw ` : Should an exception be thrown if no modules directory was found ? Default is False .
Returns :
* The path to the modules directory if it was found , otherwise None .""" | if type ( ignore ) is str :
ignore = [ ignore ]
script = os . path . realpath ( script ) if realpath else os . path . abspath ( script )
path = os . path . dirname ( script )
while os . path . dirname ( path ) != path and ( path in ignore or not os . path . isdir ( os . path . join ( path , base ) ) ) :
path = os . path . dirname ( path )
modules_dir = os . path . join ( path , base )
if path not in ignore and os . path . isdir ( modules_dir ) :
if append :
sys . path . append ( modules_dir )
else :
sys . path . insert ( 1 , modules_dir )
if pythonpath :
if 'PYTHONPATH' not in os . environ :
os . environ [ 'PYTHONPATH' ] = ''
if not append :
os . environ [ 'PYTHONPATH' ] += modules_dir
if os . environ [ 'PYTHONPATH' ] != '' :
os . environ [ 'PYTHONPATH' ] += os . pathsep
if append :
os . environ [ 'PYTHONPATH' ] += modules_dir
return modules_dir
elif throw :
raise Exception ( "Could not find modules directory {} relative to {}" % ( base , script ) )
return None |
def post_process ( self , tagnum2name ) :
"""Map the tag name instead of tag number to the tag value .""" | for tag , value in self . raw_ifd . items ( ) :
try :
tag_name = tagnum2name [ tag ]
except KeyError : # Ok , we don ' t recognize this tag . Just use the numeric id .
msg = 'Unrecognized Exif tag ({tag}).' . format ( tag = tag )
warnings . warn ( msg , UserWarning )
tag_name = tag
self . processed_ifd [ tag_name ] = value |
def x_build_targets_target ( self , node ) :
'''Process the target dependency DAG into an ancestry tree so we can look up
which top - level library and test targets specific build actions correspond to .''' | target_node = node
name = self . get_child_data ( target_node , tag = 'name' , strip = True )
path = self . get_child_data ( target_node , tag = 'path' , strip = True )
jam_target = self . get_child_data ( target_node , tag = 'jam-target' , strip = True )
# ~ Map for jam targets to virtual targets .
self . target [ jam_target ] = { 'name' : name , 'path' : path }
# ~ Create the ancestry .
dep_node = self . get_child ( self . get_child ( target_node , tag = 'dependencies' ) , tag = 'dependency' )
while dep_node :
child = self . get_data ( dep_node , strip = True )
child_jam_target = '<p%s>%s' % ( path , child . split ( '//' , 1 ) [ 1 ] )
self . parent [ child_jam_target ] = jam_target
dep_node = self . get_sibling ( dep_node . nextSibling , tag = 'dependency' )
return None |
def natural_sorted ( iterable ) :
"""Return human sorted list of strings .
E . g . for sorting file names .
> > > natural _ sorted ( [ ' f1 ' , ' f2 ' , ' f10 ' ] )
[ ' f1 ' , ' f2 ' , ' f10 ' ]""" | def sortkey ( x ) :
return [ ( int ( c ) if c . isdigit ( ) else c ) for c in re . split ( numbers , x ) ]
numbers = re . compile ( r'(\d+)' )
return sorted ( iterable , key = sortkey ) |
def checkFuelPosition ( obs , agent_host ) :
'''Make sure our coal , if we have any , is in slot 0.''' | # ( We need to do this because the furnace crafting commands - cooking the potato and the rabbit -
# take the first available item of fuel in the inventory . If this isn ' t the coal , it could end up burning the wood
# that we need for making the bowl . )
for i in range ( 1 , 39 ) :
key = 'InventorySlot_' + str ( i ) + '_item'
if key in obs :
item = obs [ key ]
if item == 'coal' :
agent_host . sendCommand ( "swapInventoryItems 0 " + str ( i ) )
return |
def _run_wes ( args ) :
"""Run CWL using a Workflow Execution Service ( WES ) endpoint""" | main_file , json_file , project_name = _get_main_and_json ( args . directory )
main_file = _pack_cwl ( main_file )
if args . host and "stratus" in args . host :
_run_wes_stratus ( args , main_file , json_file )
else :
opts = [ "--no-wait" ]
if args . host :
opts += [ "--host" , args . host ]
if args . auth :
opts += [ "--auth" , args . auth ]
cmd = [ "wes-client" ] + opts + [ main_file , json_file ]
_run_tool ( cmd ) |
def get_all_tables ( self , dataset_id , project_id = None ) :
"""Retrieve a list of tables for the dataset .
Parameters
dataset _ id : str
The dataset to retrieve table data for .
project _ id : str
Unique ` ` str ` ` identifying the BigQuery project contains the dataset
Returns
A ` ` list ` ` with all table names""" | tables_data = self . _get_all_tables_for_dataset ( dataset_id , project_id )
tables = [ ]
for table in tables_data . get ( 'tables' , [ ] ) :
table_name = table . get ( 'tableReference' , { } ) . get ( 'tableId' )
if table_name :
tables . append ( table_name )
return tables |
def write_sources_file ( ) :
"""Write a sources . yaml file to current working dir .""" | file_content = ( 'schemes: ' 'https://github.com/chriskempson/base16-schemes-source.git\n' 'templates: ' 'https://github.com/chriskempson/base16-templates-source.git' )
file_path = rel_to_cwd ( 'sources.yaml' )
with open ( file_path , 'w' ) as file_ :
file_ . write ( file_content ) |
def get_config_dict ( config ) :
'''获取配置数据字典
对传入的配置包进行格式化处理 , 生成一个字典对象
: param object config : 配置模块
: return : 配置数据字典
: rtype : dict''' | dst = { }
tmp = config . __dict__
key_list = dir ( config )
key_list . remove ( 'os' )
for k , v in tmp . items ( ) :
if k in key_list and not k . startswith ( '_' ) :
dst [ k ] = v
return dst |
def dispatch ( argdict ) :
'''Call the command - specific function , depending on the command .''' | cmd = argdict [ 'command' ]
ftc = getattr ( THIS_MODULE , 'do_' + cmd )
ftc ( argdict ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.