signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get ( self , file_id , session = None ) :
"""Get a file from GridFS by ` ` " _ id " ` ` .
Returns an instance of : class : ` ~ gridfs . grid _ file . GridOut ` ,
which provides a file - like interface for reading .
: Parameters :
- ` file _ id ` : ` ` " _ id " ` ` of the file to get
- ` session ` ( optional ) : a
: class : ` ~ pymongo . client _ session . ClientSession `
. . versionchanged : : 3.6
Added ` ` session ` ` parameter .""" | gout = GridOut ( self . __collection , file_id , session = session )
# Raise NoFile now , instead of on first attribute access .
gout . _ensure_file ( )
return gout |
def _sanitize_numbers ( uncleaned_numbers ) :
"""Convert strings to integers if possible""" | cleaned_numbers = [ ]
for x in uncleaned_numbers :
try :
cleaned_numbers . append ( int ( x ) )
except ValueError :
cleaned_numbers . append ( x )
return cleaned_numbers |
def calc_cat_clust_order ( net , inst_rc ) :
'''cluster category subset of data''' | from . __init__ import Network
from copy import deepcopy
from . import calc_clust , run_filter
inst_keys = list ( net . dat [ 'node_info' ] [ inst_rc ] . keys ( ) )
all_cats = [ x for x in inst_keys if 'cat-' in x ]
if len ( all_cats ) > 0 :
for inst_name_cat in all_cats :
tmp_name = 'dict_' + inst_name_cat . replace ( '-' , '_' )
dict_cat = net . dat [ 'node_info' ] [ inst_rc ] [ tmp_name ]
unordered_cats = dict_cat . keys ( )
ordered_cats = order_categories ( unordered_cats )
# this is the ordering of the columns based on their category , not
# including their clustering ordering within category
all_cat_orders = [ ]
tmp_names_list = [ ]
for inst_cat in ordered_cats :
inst_nodes = dict_cat [ inst_cat ]
tmp_names_list . extend ( inst_nodes )
# cat _ net = deepcopy ( Network ( ) )
# cat _ net . dat [ ' mat ' ] = deepcopy ( net . dat [ ' mat ' ] )
# cat _ net . dat [ ' nodes ' ] = deepcopy ( net . dat [ ' nodes ' ] )
# cat _ df = cat _ net . dat _ to _ df ( )
# sub _ df = { }
# if inst _ rc = = ' col ' :
# sub _ df [ ' mat ' ] = cat _ df [ ' mat ' ] [ inst _ nodes ]
# elif inst _ rc = = ' row ' :
# # need to transpose df
# cat _ df [ ' mat ' ] = cat _ df [ ' mat ' ] . transpose ( )
# sub _ df [ ' mat ' ] = cat _ df [ ' mat ' ] [ inst _ nodes ]
# sub _ df [ ' mat ' ] = sub _ df [ ' mat ' ] . transpose ( )
# # filter matrix before clustering
# threshold = 0.0001
# sub _ df = run _ filter . df _ filter _ row _ sum ( sub _ df , threshold )
# sub _ df = run _ filter . df _ filter _ col _ sum ( sub _ df , threshold )
# # load back to dat
# cat _ net . df _ to _ dat ( sub _ df )
# cat _ mat _ shape = cat _ net . dat [ ' mat ' ] . shape
# print ( ' * * * * * ' )
# try :
# if cat _ mat _ shape [ 0 ] > 1 and cat _ mat _ shape [ 1 ] > 1 and all _ are _ numbers = = False :
# calc _ clust . cluster _ row _ and _ col ( cat _ net , ' cos ' )
# inst _ cat _ order = cat _ net . dat [ ' node _ info ' ] [ inst _ rc ] [ ' clust ' ]
# else :
# inst _ cat _ order = list ( range ( len ( cat _ net . dat [ ' nodes ' ] [ inst _ rc ] ) ) )
# except :
# inst _ cat _ order = list ( range ( len ( cat _ net . dat [ ' nodes ' ] [ inst _ rc ] ) ) )
# prev _ order _ len = len ( all _ cat _ orders )
# # add prev order length to the current order number
# inst _ cat _ order = [ i + prev _ order _ len for i in inst _ cat _ order ]
# all _ cat _ orders . extend ( inst _ cat _ order )
# # generate ordered list of row / col names , which will be used to
# # assign the order to specific nodes
# names _ clust _ list = [ x for ( y , x ) in sorted ( zip ( all _ cat _ orders ,
# tmp _ names _ list ) ) ]
names_clust_list = tmp_names_list
# calc category - cluster order
final_order = [ ]
for i in range ( len ( net . dat [ 'nodes' ] [ inst_rc ] ) ) :
inst_node_name = net . dat [ 'nodes' ] [ inst_rc ] [ i ]
inst_node_num = names_clust_list . index ( inst_node_name )
final_order . append ( inst_node_num )
inst_index_cat = inst_name_cat . replace ( '-' , '_' ) + '_index'
net . dat [ 'node_info' ] [ inst_rc ] [ inst_index_cat ] = final_order |
def notify ( self , new_jobs_count ) :
"""We just queued new _ jobs _ count jobs on this queue , wake up the workers if needed""" | if not self . use_notify ( ) :
return
# Not really useful to send more than 100 notifs ( to be configured )
count = min ( new_jobs_count , 100 )
notify_key = redis_key ( "notify" , self )
context . connections . redis . lpush ( notify_key , * ( [ 1 ] * count ) )
context . connections . redis . expire ( notify_key , max ( 1 , int ( context . get_current_config ( ) [ "max_latency" ] * 2 ) ) ) |
def eval ( self , expr ) :
"""Evaluate an expression .
This does * * not * * add its argument ( or its result ) as an element of me !
That is the responsibility of the code that created the object . This
means that you need to : meth : ` Environment . rec _ new ` any expression you
get from user input before evaluating it .
This , and any wrappers around it , are the * * only * * entry points to
expression evaluation you should call from ordinary code ( i . e . , code
that isn ' t part of a extension ) .
Args :
expr ( LispVal ) : The expression to evaluate .
Returns :
LispVal : The result of evaluating the expression .
Raises :
~ parthial . errs . LimitationError : If evaluating the expression would
require more nesting , more time , or the allocation of more
values than is permissible .""" | if self . depth >= self . max_depth :
raise LimitationError ( 'too much nesting' )
if self . steps >= self . max_steps :
raise LimitationError ( 'too many steps' )
self . depth += 1
self . steps += 1
res = expr . eval ( self )
self . depth -= 1
return res |
def lex ( filename ) :
"""Generates tokens from an nginx config file""" | with io . open ( filename , mode = 'r' , encoding = 'utf-8' ) as f :
it = _lex_file_object ( f )
it = _balance_braces ( it , filename )
for token , line , quoted in it :
yield ( token , line , quoted ) |
def get_env_key ( obj , key = None ) :
"""Return environment variable key to use for lookups within a
namespace represented by the package name .
For example , any varialbes for predix . security . uaa are stored
as PREDIX _ SECURITY _ UAA _ KEY""" | return str . join ( '_' , [ obj . __module__ . replace ( '.' , '_' ) . upper ( ) , key . upper ( ) ] ) |
def _get_initialized_channels_for_service ( self , org_id , service_id ) :
'''return [ channel ]''' | channels_dict = self . _get_initialized_channels_dict_for_service ( org_id , service_id )
return list ( channels_dict . values ( ) ) |
def list_from_metadata ( cls , url , metadata ) :
'''return a list of DatalakeRecords for the url and metadata''' | key = cls . _get_key ( url )
metadata = Metadata ( ** metadata )
ct = cls . _get_create_time ( key )
time_buckets = cls . get_time_buckets_from_metadata ( metadata )
return [ cls ( url , metadata , t , ct , key . size ) for t in time_buckets ] |
def _convert_to_namecheap ( self , record ) :
"""converts from lexicon format record to namecheap format record ,
suitable to sending through the api to namecheap""" | name = record [ 'name' ]
if name . endswith ( '.' ) :
name = name [ : - 1 ]
short_name = name [ : name . find ( self . domain ) - 1 ]
processed_record = { 'Type' : record [ 'type' ] , 'Name' : short_name , 'TTL' : record [ 'ttl' ] , 'Address' : record [ 'content' ] , 'HostId' : record [ 'id' ] }
return processed_record |
def expand_args ( ** args_to_expand ) :
"""Expand the given lists into the length of the layers .
This is used as a convenience so that the user does not need to specify the
complete list of parameters for model initialization .
IE the user can just specify one parameter and this function will expand it""" | layers = args_to_expand [ 'layers' ]
try :
items = args_to_expand . iteritems ( )
except AttributeError :
items = args_to_expand . items ( )
for key , val in items :
if isinstance ( val , list ) and len ( val ) != len ( layers ) :
args_to_expand [ key ] = [ val [ 0 ] for _ in layers ]
return args_to_expand |
def main ( argString = None ) :
"""The main function of the module .
: param argString : the options .
: type argString : list""" | # Getting and checking the options
args = parseArgs ( argString )
checkArgs ( args )
merge_related_samples ( args . ibs_related , args . out , args . no_status ) |
def coordinates ( self , x , y ) :
'''return coordinates of a pixel in the map''' | state = self . state
return state . mt . coord_from_area ( x , y , state . lat , state . lon , state . width , state . ground_width ) |
def _to_dict ( self ) :
"""Return a json dictionary representing this model .""" | _dict = { }
if hasattr ( self , 'name' ) and self . name is not None :
_dict [ 'name' ] = self . name
if hasattr ( self , 'limit' ) and self . limit is not None :
_dict [ 'limit' ] = self . limit
return _dict |
def parse_qs ( self , qs ) :
"""Parse query string , but enforce one instance of each variable .
Return a dict with the variables on success
Return None on parse error""" | qs_state = urllib2 . urlparse . parse_qs ( qs )
ret = { }
for qs_var , qs_value_list in qs_state . items ( ) :
if len ( qs_value_list ) > 1 :
return None
ret [ qs_var ] = qs_value_list [ 0 ]
return ret |
def prepData4Call ( self , * args , ** kwargs ) :
"""NAME :
prepData4Call
PURPOSE :
prepare stream data for the _ _ call _ _ method
INPUT :
_ _ call _ _ inputs
OUTPUT :
( dOmega , dangle ) ; wrt the progenitor ; each [ 3 , nobj ]
HISTORY :
2013-12-04 - Written - Bovy ( IAS )""" | # First calculate the actionAngle coordinates if they ' re not given
# as such
freqsAngles = self . _parse_call_args ( * args , ** kwargs )
dOmega = freqsAngles [ : 3 , : ] - numpy . tile ( self . _progenitor_Omega . T , ( freqsAngles . shape [ 1 ] , 1 ) ) . T
dangle = freqsAngles [ 3 : , : ] - numpy . tile ( self . _progenitor_angle . T , ( freqsAngles . shape [ 1 ] , 1 ) ) . T
# Assuming single wrap , resolve large angle differences ( wraps should be marginalized over )
dangle [ ( dangle < - 4. ) ] += 2. * numpy . pi
dangle [ ( dangle > 4. ) ] -= 2. * numpy . pi
return ( dOmega , dangle ) |
def _reformat ( p , buf ) :
"""Apply format of ` ` p ` ` to data in 1 - d array ` ` buf ` ` .""" | if numpy . ndim ( buf ) != 1 :
raise ValueError ( "Buffer ``buf`` must be 1-d." )
if hasattr ( p , 'keys' ) :
ans = _gvar . BufferDict ( p )
if ans . size != len ( buf ) :
raise ValueError ( "p, buf size mismatch: %d, %d" % ( ans . size , len ( buf ) ) )
ans = _gvar . BufferDict ( ans , buf = buf )
else :
if numpy . size ( p ) != len ( buf ) :
raise ValueError ( "p, buf size mismatch: %d, %d" % ( numpy . size ( p ) , len ( buf ) ) )
ans = numpy . array ( buf ) . reshape ( numpy . shape ( p ) )
return ans |
def _extract_links_from_asset_tags_in_text ( self , text ) :
"""Scan the text and extract asset tags and links to corresponding
files .
@ param text : Page text .
@ type text : str
@ return : @ see CourseraOnDemand . _ extract _ links _ from _ text""" | # Extract asset tags from instructions text
asset_tags_map = self . _extract_asset_tags ( text )
ids = list ( iterkeys ( asset_tags_map ) )
if not ids :
return { }
# asset tags contain asset names and ids . We need to make another
# HTTP request to get asset URL .
asset_urls = self . _extract_asset_urls ( ids )
supplement_links = { }
# Build supplement links , providing nice titles along the way
for asset in asset_urls :
title = clean_filename ( asset_tags_map [ asset [ 'id' ] ] [ 'name' ] , self . _unrestricted_filenames )
extension = clean_filename ( asset_tags_map [ asset [ 'id' ] ] [ 'extension' ] . strip ( ) , self . _unrestricted_filenames )
url = asset [ 'url' ] . strip ( )
if extension not in supplement_links :
supplement_links [ extension ] = [ ]
supplement_links [ extension ] . append ( ( url , title ) )
return supplement_links |
def pop ( self , idx = - 1 ) :
"""Remove and return item at * idx * ( default last ) . Raises IndexError if
list is empty or index is out of range . Negative indices are supported ,
as for slice indices .""" | # pylint : disable = arguments - differ
if not self . _len :
raise IndexError ( 'pop index out of range' )
_lists = self . _lists
if idx == 0 :
val = _lists [ 0 ] [ 0 ]
self . _delete ( 0 , 0 )
return val
if idx == - 1 :
pos = len ( _lists ) - 1
loc = len ( _lists [ pos ] ) - 1
val = _lists [ pos ] [ loc ]
self . _delete ( pos , loc )
return val
if 0 <= idx < len ( _lists [ 0 ] ) :
val = _lists [ 0 ] [ idx ]
self . _delete ( 0 , idx )
return val
len_last = len ( _lists [ - 1 ] )
if - len_last < idx < 0 :
pos = len ( _lists ) - 1
loc = len_last + idx
val = _lists [ pos ] [ loc ]
self . _delete ( pos , loc )
return val
pos , idx = self . _pos ( idx )
val = _lists [ pos ] [ idx ]
self . _delete ( pos , idx )
return val |
def _iadd_spmatrix ( self , X , alpha = 1.0 ) :
"""Add a sparse matrix : math : ` X ` to : py : class : ` cspmatrix ` .""" | assert self . is_factor is False , "cannot add spmatrix to a cspmatrix factor"
n = self . symb . n
snptr = self . symb . snptr
snode = self . symb . snode
relptr = self . symb . relptr
snrowidx = self . symb . snrowidx
sncolptr = self . symb . sncolptr
blkptr = self . symb . blkptr
blkval = self . blkval
if self . symb . p is not None :
Xp = tril ( perm ( symmetrize ( X ) , self . symb . p ) )
else :
Xp = tril ( X )
cp , ri , val = Xp . CCS
# for each block . . .
for k in range ( self . symb . Nsn ) :
nn = snptr [ k + 1 ] - snptr [ k ]
na = relptr [ k + 1 ] - relptr [ k ]
nj = nn + na
r = list ( snrowidx [ sncolptr [ k ] : sncolptr [ k + 1 ] ] )
# copy cols from A to block
for i in range ( nn ) :
j = snode [ snptr [ k ] + i ]
offset = blkptr [ k ] + nj * i
# extract correct indices and add values
I = [ offset + r . index ( idx ) for idx in ri [ cp [ j ] : cp [ j + 1 ] ] ]
blkval [ I ] += alpha * val [ cp [ j ] : cp [ j + 1 ] ]
return |
def create_unsigned_tx ( inputs , outputs , change_address = None , include_tosigntx = False , verify_tosigntx = False , min_confirmations = 0 , preference = 'high' , coin_symbol = 'btc' , api_key = None ) :
'''Create a new transaction to sign . Doesn ' t ask for or involve private keys .
Behind the scenes , blockcypher will :
1 ) Fetch unspent outputs
2 ) Decide which make the most sense to consume for the given transaction
3 ) Return an unsigned transaction for you to sign
min _ confirmations is the minimum number of confirmations an unspent output
must have in order to be included in a transaction
tosign _ tx is the raw tx which can be decoded to verify the transaction
you ' re signing matches what you want to sign . You can also verify :
sha256 ( sha256 ( tosign _ tx ) ) = = tosign
verify _ tosigntx will take the raw tx data in tosign _ tx and run the
verification for you and protect you against a malicious or compromised
blockcypher server
Inputs is a list of either :
- { ' address ' : ' 1abcxyz . . . ' } that will be included in the TX
- { ' pubkeys ' : [ pubkey1 , pubkey2 , pubkey3 ] , " script _ type " : " multisig - 2 - of - 3 " }
- { ' wallet _ name ' : ' bar ' , ' wallet _ token ' : ' yourtoken ' } that was previously registered and will be used
to choose which addresses / inputs are included in the TX
Note that for consistency with the API ` inputs ` is always a list .
Currently , it is a singleton list , but it is possible it could have more elements in future versions .
Details here : http : / / dev . blockcypher . com / # generic _ transactions''' | # Lots of defensive checks
assert isinstance ( inputs , list ) , inputs
assert isinstance ( outputs , list ) , outputs
assert len ( inputs ) >= 1 , inputs
assert len ( outputs ) >= 1 , outputs
inputs_cleaned = [ ]
for input_obj in inputs : # ` input ` is a reserved word
if 'address' in input_obj :
address = input_obj [ 'address' ]
assert is_valid_address_for_coinsymbol ( b58_address = address , coin_symbol = coin_symbol , ) , address
inputs_cleaned . append ( { 'addresses' : [ address , ] , } )
elif 'pubkeys' in input_obj and input_obj . get ( 'script_type' , '' ) . startswith ( 'multisig-' ) :
for pubkey in input_obj [ 'pubkeys' ] : # TODO : better pubkey test
assert uses_only_hash_chars ( pubkey ) , pubkey
inputs_cleaned . append ( { 'addresses' : input_obj [ 'pubkeys' ] , 'script_type' : input_obj [ 'script_type' ] , } )
elif 'wallet_name' in input_obj and 'wallet_token' in input_obj : # good behavior
inputs_cleaned . append ( input_obj )
else :
raise Exception ( 'Invalid Input: %s' % input_obj )
outputs_cleaned = [ ]
sweep_funds = False
for output in outputs :
clean_output = { }
assert 'value' in output , output
assert isinstance ( output [ 'value' ] , int ) , output [ 'value' ]
if output [ 'value' ] == - 1 :
sweep_funds = True
assert not change_address , 'Change Address Supplied for Sweep TX'
clean_output [ 'value' ] = output [ 'value' ]
# no address required for null - data outputs
if output . get ( 'script_type' ) == 'null-data' :
assert output [ 'value' ] == 0
assert 'script' in output , output
clean_output [ 'script_type' ] = 'null-data'
clean_output [ 'script' ] = output [ 'script' ]
# but note that API requires the singleton list ' addresses ' which is
# intentionally hidden away from the user here
else :
assert 'address' in output , output
assert is_valid_address_for_coinsymbol ( b58_address = output [ 'address' ] , coin_symbol = coin_symbol , )
clean_output [ 'addresses' ] = [ output [ 'address' ] ]
outputs_cleaned . append ( clean_output )
if change_address :
assert is_valid_address_for_coinsymbol ( b58_address = change_address , coin_symbol = coin_symbol ) , change_address
assert preference in ( 'high' , 'medium' , 'low' , 'zero' ) , preference
# Beginning of method code
url = make_url ( coin_symbol , ** dict ( txs = 'new' ) )
data = { 'inputs' : inputs_cleaned , 'outputs' : outputs_cleaned , 'preference' : preference , }
if min_confirmations :
data [ 'confirmations' ] = min_confirmations
if change_address :
data [ 'change_address' ] = change_address
if include_tosigntx or verify_tosigntx :
params = { 'includeToSignTx' : 'true' }
# Nasty hack
else :
params = { }
# Nasty Hack - remove when API updated
if 'wallet_token' in inputs [ 0 ] :
params [ 'token' ] = inputs [ 0 ] [ 'wallet_token' ]
elif api_key :
params [ 'token' ] = api_key
else :
raise Exception ( 'No API Token Supplied' )
r = requests . post ( url , json = data , params = params , verify = True , timeout = TIMEOUT_IN_SECONDS )
unsigned_tx = get_valid_json ( r )
if verify_tosigntx :
tx_is_correct , err_msg = verify_unsigned_tx ( unsigned_tx = unsigned_tx , inputs = inputs , outputs = outputs , sweep_funds = sweep_funds , change_address = change_address , coin_symbol = coin_symbol , )
if not tx_is_correct :
print ( unsigned_tx )
# for debug
raise Exception ( 'TX Verification Error: %s' % err_msg )
return unsigned_tx |
def cmd ( send , msg , _ ) :
"""Converts morse to ascii .
Syntax : { command } < text >""" | demorse_codes = { '.----' : '1' , '-.--' : 'y' , '..-' : 'u' , '...' : 's' , '-.-.' : 'c' , '.-.-.' : '+' , '--..--' : ',' , '-.-' : 'k' , '.--.' : 'p' , '----.' : '9' , '-----' : '0' , ' ' : ' ' , '...--' : '3' , '-....-' : '-' , '...-..-' : '$' , '..---' : '2' , '.--.-.' : '@' , '-...-' : '=' , '-....' : '6' , '...-' : 'v' , '.----.' : "'" , '....' : 'h' , '.....' : '5' , '....-' : '4' , '.' : 'e' , '.-.-.-' : '.' , '-' : 't' , '.-..' : 'l' , '..' : 'i' , '.-' : 'a' , '-..-' : 'x' , '-...' : 'b' , '-.' : 'n' , '.-..-.' : '"' , '.--' : 'w' , '-.--.-' : ')' , '--...' : '7' , '.-.' : 'r' , '.---' : 'j' , '---..' : '8' , '--' : 'm' , '-.-.-.' : ';' , '-.-.--' : '!' , '-..' : 'd' , '-.--.' : '(' , '..-.' : 'f' , '---...' : ':' , '-..-.' : '/' , '..--.-' : '_' , '.-...' : '&' , '..--..' : '?' , '--.' : 'g' , '--..' : 'z' , '--.-' : 'q' , '---' : 'o' }
demorse = ""
if not msg :
send ( "demorse what?" )
return
for word in msg . lower ( ) . split ( " " ) :
for c in word . split ( ) :
if c in demorse_codes :
demorse += demorse_codes [ c ]
else :
demorse += "?"
demorse += " "
send ( demorse ) |
def channels_archive ( self , room_id , ** kwargs ) :
"""Archives a channel .""" | return self . __call_api_post ( 'channels.archive' , roomId = room_id , kwargs = kwargs ) |
def set_special ( index , color , iterm_name = "h" , alpha = 100 ) :
"""Convert a hex color to a special sequence .""" | if OS == "Darwin" and iterm_name :
return "\033]P%s%s\033\\" % ( iterm_name , color . strip ( "#" ) )
if index in [ 11 , 708 ] and alpha != "100" :
return "\033]%s;[%s]%s\033\\" % ( index , alpha , color )
return "\033]%s;%s\033\\" % ( index , color ) |
def bubble_to_dot ( bblfile : str , dotfile : str = None , render : bool = False , oriented : bool = False ) :
"""Write in dotfile a graph equivalent to those depicted in bubble file""" | tree = BubbleTree . from_bubble_file ( bblfile , oriented = bool ( oriented ) )
return tree_to_dot ( tree , dotfile , render = render ) |
def set_sound_mode_dict ( self , sound_mode_dict ) :
"""Set the matching dictionary used to match the raw sound mode .""" | error_msg = ( "Syntax of sound mode dictionary not valid, " "use: OrderedDict([('COMMAND', ['VALUE1','VALUE2'])])" )
if isinstance ( sound_mode_dict , dict ) :
mode_list = list ( sound_mode_dict . values ( ) )
for sublist in mode_list :
if isinstance ( sublist , list ) :
for element in sublist :
if not isinstance ( element , str ) :
_LOGGER . error ( error_msg )
return False
else :
_LOGGER . error ( error_msg )
return False
else :
_LOGGER . error ( error_msg )
return False
self . _sound_mode_dict = sound_mode_dict
self . _sm_match_dict = self . construct_sm_match_dict ( )
return True |
def run_command ( self , cmd , sudo = False , capture = True , quiet = None , return_result = False ) :
'''run _ command is a wrapper for the global run _ command , checking first
for sudo and exiting on error if needed . The message is returned as
a list of lines for the calling function to parse , and stdout uses
the parent process so it appears for the user .
Parameters
cmd : the command to run
sudo : does the command require sudo ?
quiet : if quiet set by function , overrides client setting .
return _ result : return the result , if not successful ( default False ) .
On success , returns result .''' | # First preference to function , then to client setting
if quiet == None :
quiet = self . quiet
result = run_cmd ( cmd , sudo = sudo , capture = capture , quiet = quiet )
# If one line is returned , squash dimension
if len ( result [ 'message' ] ) == 1 :
result [ 'message' ] = result [ 'message' ] [ 0 ]
# If the user wants to return the result , just return it
if return_result is True :
return result
# On success , return result
if result [ 'return_code' ] == 0 :
return result [ 'message' ]
return result |
def get_by_id ( self , institution_id , _options = None ) :
'''Fetch a single institution by id .
: param str institution _ id :''' | options = _options or { }
return self . client . post_public_key ( '/institutions/get_by_id' , { 'institution_id' : institution_id , 'options' : options , } ) |
def getfullfilename ( file_path ) :
'''Get full filename ( with extension )''' | warnings . warn ( "getfullfilename() is deprecated and will be removed in near future. Use chirptext.io.write_file() instead" , DeprecationWarning )
if file_path :
return os . path . basename ( file_path )
else :
return '' |
def is_line_in_file ( filename : str , line : str ) -> bool :
"""Detects whether a line is present within a file .
Args :
filename : file to check
line : line to search for ( as an exact match )""" | assert "\n" not in line
with open ( filename , "r" ) as file :
for fileline in file :
if fileline == line :
return True
return False |
def _draw ( self , data ) :
"""Draw text""" | self . _cursor . clearSelection ( )
self . _cursor . setPosition ( self . _last_cursor_pos )
if '\x07' in data . txt :
print ( '\a' )
txt = data . txt . replace ( '\x07' , '' )
if '\x08' in txt :
parts = txt . split ( '\x08' )
else :
parts = [ txt ]
for i , part in enumerate ( parts ) :
if part :
part = part . replace ( '\r\r' , '\r' )
if len ( part ) >= 80 * 24 * 8 : # big output , process it in one step ( \ r and \ n will not be handled )
self . _draw_chars ( data , part )
continue
to_draw = ''
for n , char in enumerate ( part ) :
if char == '\n' :
self . _draw_chars ( data , to_draw )
to_draw = ''
self . _linefeed ( )
elif char == '\r' :
self . _draw_chars ( data , to_draw )
to_draw = ''
self . _erase_in_line ( 0 )
try :
nchar = part [ n + 1 ]
except IndexError :
nchar = None
if self . _cursor . positionInBlock ( ) > 80 and self . flg_bash and nchar != '\n' :
self . _linefeed ( )
self . _cursor . movePosition ( self . _cursor . StartOfBlock )
self . _text_edit . setTextCursor ( self . _cursor )
else :
to_draw += char
if to_draw :
self . _draw_chars ( data , to_draw )
if i != len ( parts ) - 1 :
self . _cursor_back ( 1 )
self . _last_cursor_pos = self . _cursor . position ( )
self . _prefix_len = self . _cursor . positionInBlock ( )
self . _text_edit . setTextCursor ( self . _cursor ) |
def from_id ( self , tiger , queue , state , task_id , load_executions = 0 ) :
"""Loads a task with the given ID from the given queue in the given
state . An integer may be passed in the load _ executions parameter
to indicate how many executions should be loaded ( starting from the
latest ) . If the task doesn ' t exist , None is returned .""" | if load_executions :
pipeline = tiger . connection . pipeline ( )
pipeline . get ( tiger . _key ( 'task' , task_id ) )
pipeline . lrange ( tiger . _key ( 'task' , task_id , 'executions' ) , - load_executions , - 1 )
serialized_data , serialized_executions = pipeline . execute ( )
else :
serialized_data = tiger . connection . get ( tiger . _key ( 'task' , task_id ) )
serialized_executions = [ ]
# XXX : No timestamp for now
if serialized_data :
data = json . loads ( serialized_data )
executions = [ json . loads ( e ) for e in serialized_executions if e ]
return Task ( tiger , queue = queue , _data = data , _state = state , _executions = executions )
else :
raise TaskNotFound ( 'Task {} not found.' . format ( task_id ) ) |
def stop ( name , call = None ) :
"""Stop a running machine .
@ param name : Machine to stop
@ type name : str
@ param call : Must be " action "
@ type call : str""" | if call != 'action' :
raise SaltCloudSystemExit ( 'The instance action must be called with -a or --action.' )
log . info ( "Stopping machine: %s" , name )
vb_stop_vm ( name )
machine = vb_get_machine ( name )
del machine [ "name" ]
return treat_machine_dict ( machine ) |
def operations ( * operations ) :
'''Decorator for marking Resource methods as HTTP operations .
This decorator does a number of different things :
- It transfer onto itself docstring and annotations from the decorated
method , so as to be " transparent " with regards to introspection .
- It tranform the method so as to make it a classmethod .
- It invokes the method within a try - except condition , so as to
intercept and populate the Fail ( < code > ) conditions .''' | def decorator ( method ) :
def wrapper ( cls , request , start_response , ** kwargs ) :
result_cache = [ ]
try :
yield from method ( cls , request , ** kwargs )
except Respond as e : # Inject messages as taken from signature
status = e . status
msg = utils . parse_return_annotation ( method ) [ status ] [ 'message' ]
if status / 100 == 2 : # All 2xx HTTP codes
e . description = msg
raise e
else : # HTTP Errors - - > use werkzeug exceptions
raise CODES_TO_EXCEPTIONS [ status ] ( msg )
# Add operation - specific attributes to the method .
method . swagger_ops = operations
method . signature = inspect . signature ( method )
method . source = inspect . getsource ( method )
method . path_vars = utils . extract_pathvars ( method )
# " Backport " the method introspective attributes to the wrapper .
wrapper . __name__ = method . __name__
wrapper . __doc__ = method . __doc__
wrapper . __annotations__ = method . __annotations__
wrapper . swagger_ops = method . swagger_ops
wrapper . signature = method . signature
wrapper . source = method . source
wrapper . path_vars = method . path_vars
return classmethod ( wrapper )
return decorator |
def optimal_parameters ( reconstruction , fom , phantoms , data , initial = None , univariate = False ) :
r"""Find the optimal parameters for a reconstruction method .
Notes
For a forward operator : math : ` A : X \ to Y ` , a reconstruction operator
parametrized by : math : ` \ theta ` is some operator
: math : ` R _ \ theta : Y \ to X `
such that
. . math : :
R _ \ theta ( A ( x ) ) \ approx x .
The optimal choice of : math : ` \ theta ` is given by
. . math : :
\ theta = \ arg \ min _ \ theta fom ( R ( A ( x ) + noise ) , x )
where : math : ` fom : X \ times X \ to \ mathbb { R } ` is a figure of merit .
Parameters
reconstruction : callable
Function that takes two parameters :
* data : The data to be reconstructed
* parameters : Parameters of the reconstruction method
The function should return the reconstructed image .
fom : callable
Function that takes two parameters :
* reconstructed _ image
* true _ image
and returns a scalar figure of merit .
phantoms : sequence
True images .
data : sequence
The data to reconstruct from .
initial : array - like or pair
Initial guess for the parameters . It is
- a required array in the multivariate case
- an optional pair in the univariate case .
univariate : bool , optional
Whether to use a univariate solver
Returns
parameters : ' numpy . ndarray '
The optimal parameters for the reconstruction problem .""" | def func ( lam ) : # Function to be minimized by scipy
return sum ( fom ( reconstruction ( datai , lam ) , phantomi ) for phantomi , datai in zip ( phantoms , data ) )
# Pick resolution to fit the one used by the space
tol = np . finfo ( phantoms [ 0 ] . space . dtype ) . resolution * 10
if univariate : # We use a faster optimizer for the one parameter case
result = scipy . optimize . minimize_scalar ( func , bracket = initial , tol = tol , bounds = None , options = { 'disp' : False } )
return result . x
else : # Use a gradient free method to find the best parameters
initial = np . asarray ( initial )
parameters = scipy . optimize . fmin_powell ( func , initial , xtol = tol , ftol = tol , disp = False )
return parameters |
def within ( self , lat , lon , radius ) :
'''Convenience method to apply a $ loc / $ within / $ center filter . Radius is in meters .''' | return self . filter ( filter_helpers . within_ ( lat , lon , radius ) ) |
def send_video_note ( self , * args , ** kwargs ) :
"""See : func : ` send _ video `""" | return send_video_note ( * args , ** self . _merge_overrides ( ** kwargs ) ) . run ( ) |
def create_requests ( self , request_to_create ) :
"""CreateRequests .
[ Preview API ] Create a new symbol request .
: param : class : ` < Request > < azure . devops . v5_0 . symbol . models . Request > ` request _ to _ create : The symbol request to create .
: rtype : : class : ` < Request > < azure . devops . v5_0 . symbol . models . Request > `""" | content = self . _serialize . body ( request_to_create , 'Request' )
response = self . _send ( http_method = 'POST' , location_id = 'ebc09fe3-1b20-4667-abc5-f2b60fe8de52' , version = '5.0-preview.1' , content = content )
return self . _deserialize ( 'Request' , response ) |
def _d_helper ( vec , vname , color , bw , titlesize , xt_labelsize , linewidth , markersize , credible_interval , point_estimate , hpd_markers , outline , shade , ax , ) :
"""Plot an individual dimension .
Parameters
vec : array
1D array from trace
vname : str
variable name
color : str
matplotlib color
bw : float
Bandwidth scaling factor . Should be larger than 0 . The higher this number the smoother the
KDE will be . Defaults to 4.5 which is essentially the same as the Scott ' s rule of thumb
( the default used rule by SciPy ) .
titlesize : float
font size for title
xt _ labelsize : float
fontsize for xticks
linewidth : float
Thickness of lines
markersize : float
Size of markers
credible _ interval : float
Credible intervals . Defaults to 0.94
point _ estimate : str or None
' mean ' or ' median '
shade : float
Alpha blending value for the shaded area under the curve , between 0 ( no shade ) and 1
( opaque ) . Defaults to 0.
ax : matplotlib axes""" | if vec . dtype . kind == "f" :
if credible_interval != 1 :
hpd_ = hpd ( vec , credible_interval )
new_vec = vec [ ( vec >= hpd_ [ 0 ] ) & ( vec <= hpd_ [ 1 ] ) ]
else :
new_vec = vec
density , xmin , xmax = _fast_kde ( new_vec , bw = bw )
density *= credible_interval
x = np . linspace ( xmin , xmax , len ( density ) )
ymin = density [ 0 ]
ymax = density [ - 1 ]
if outline :
ax . plot ( x , density , color = color , lw = linewidth )
ax . plot ( [ xmin , xmin ] , [ - ymin / 100 , ymin ] , color = color , ls = "-" , lw = linewidth )
ax . plot ( [ xmax , xmax ] , [ - ymax / 100 , ymax ] , color = color , ls = "-" , lw = linewidth )
if shade :
ax . fill_between ( x , density , color = color , alpha = shade )
else :
xmin , xmax = hpd ( vec , credible_interval )
bins = range ( xmin , xmax + 2 )
if outline :
ax . hist ( vec , bins = bins , color = color , histtype = "step" , align = "left" )
if shade :
ax . hist ( vec , bins = bins , color = color , alpha = shade )
if hpd_markers :
ax . plot ( xmin , 0 , hpd_markers , color = color , markeredgecolor = "k" , markersize = markersize )
ax . plot ( xmax , 0 , hpd_markers , color = color , markeredgecolor = "k" , markersize = markersize )
if point_estimate is not None :
if point_estimate == "mean" :
est = np . mean ( vec )
elif point_estimate == "median" :
est = np . median ( vec )
ax . plot ( est , 0 , "o" , color = color , markeredgecolor = "k" , markersize = markersize )
ax . set_yticks ( [ ] )
ax . set_title ( vname , fontsize = titlesize , wrap = True )
for pos in [ "left" , "right" , "top" ] :
ax . spines [ pos ] . set_visible ( False )
ax . tick_params ( labelsize = xt_labelsize ) |
def DownloadCollection ( coll_path , target_path , token = None , overwrite = False , dump_client_info = False , flatten = False , max_threads = 10 ) :
"""Iterate through a Collection object downloading all files .
Args :
coll _ path : Path to an AFF4 collection .
target _ path : Base directory to write to .
token : Token for access .
overwrite : If True , overwrite existing files .
dump _ client _ info : If True , this will detect client paths , and dump a yaml
version of the client object to the root path . This is useful for seeing
the hostname / users of the machine the client id refers to .
flatten : If True , produce a " files " flat folder with links to all the found
files .
max _ threads : Use this many threads to do the downloads .""" | completed_clients = set ( )
coll = _OpenCollectionPath ( coll_path )
if coll is None :
logging . error ( "%s is not a valid collection. Typo? " "Are you sure something was written to it?" , coll_path )
return
thread_pool = threadpool . ThreadPool . Factory ( "Downloader" , max_threads )
thread_pool . Start ( )
# Extract the client id from the source urn . This code makes me
# quite sad but there is just no concept of passing a client id in
# the export tool ( and this would be unsafe anyways since the user
# could then download files from arbitrary machines easily ) . The
# export tool is on the way to deprecation so we decided to do this
# instead of fixing the obsolete code .
try :
collection_urn = coll . collection_id
except AttributeError :
collection_urn = coll . urn
try :
original_client_id = rdf_client . ClientURN ( collection_urn . Split ( ) [ 0 ] )
except IOError :
original_client_id = None
logging . info ( "Expecting to download %s files" , len ( coll ) )
# Collections can include anything they want , but we only handle RDFURN and
# StatEntry entries in this function .
for grr_message in coll :
source = None
# If a raw message , work out the type .
if isinstance ( grr_message , rdf_flows . GrrMessage ) :
source = grr_message . source
grr_message = grr_message . payload
if isinstance ( grr_message , rdfvalue . RDFURN ) :
urn = grr_message
elif isinstance ( grr_message , rdf_client_fs . StatEntry ) :
urn = rdfvalue . RDFURN ( grr_message . AFF4Path ( source or original_client_id ) )
elif isinstance ( grr_message , rdf_file_finder . FileFinderResult ) :
urn = rdfvalue . RDFURN ( grr_message . stat_entry . AFF4Path ( source or original_client_id ) )
elif isinstance ( grr_message , collectors . ArtifactFilesDownloaderResult ) :
if grr_message . HasField ( "downloaded_file" ) :
urn = grr_message . downloaded_file . AFF4Path ( source or original_client_id )
else :
continue
elif isinstance ( grr_message , rdfvalue . RDFBytes ) :
try :
os . makedirs ( target_path )
except OSError :
pass
try : # We just dump out bytes and carry on .
client_id = source . Split ( ) [ 0 ]
with open ( os . path . join ( target_path , client_id ) , "wb" ) as fd :
fd . write ( str ( grr_message ) )
except AttributeError :
pass
continue
else :
continue
# Handle dumping client info , but only once per client .
if dump_client_info :
client_id = urn . Split ( ) [ 0 ]
re_match = aff4_grr . VFSGRRClient . CLIENT_ID_RE . match ( client_id )
if re_match and client_id not in completed_clients :
args = ( rdf_client . ClientURN ( client_id ) , target_path , token , overwrite )
thread_pool . AddTask ( target = DumpClientYaml , args = args , name = "ClientYamlDownloader" )
completed_clients . add ( client_id )
# Now queue downloading the actual files .
args = ( urn , target_path , token , overwrite )
if flatten :
target = CopyAndSymlinkAFF4ToLocal
else :
target = CopyAFF4ToLocal
thread_pool . AddTask ( target = target , args = args , name = "Downloader" )
# Join and stop the threadpool .
thread_pool . Stop ( join_timeout = THREADPOOL_JOIN_TIMEOUT ) |
def _get_larger_chroms ( ref_file ) :
"""Retrieve larger chromosomes , avoiding the smaller ones for plotting .""" | from scipy . cluster . vq import kmeans , vq
all_sizes = [ ]
for c in ref . file_contigs ( ref_file ) :
all_sizes . append ( float ( c . size ) )
all_sizes . sort ( )
if len ( all_sizes ) > 5 : # separate out smaller chromosomes and haplotypes with kmeans
centroids , _ = kmeans ( np . array ( all_sizes ) , 2 )
idx , _ = vq ( np . array ( all_sizes ) , centroids )
little_sizes = tz . first ( tz . partitionby ( lambda xs : xs [ 0 ] , zip ( idx , all_sizes ) ) )
little_sizes = [ x [ 1 ] for x in little_sizes ]
# create one more cluster with the smaller , removing the haplotypes
centroids2 , _ = kmeans ( np . array ( little_sizes ) , 2 )
idx2 , _ = vq ( np . array ( little_sizes ) , centroids2 )
little_sizes2 = tz . first ( tz . partitionby ( lambda xs : xs [ 0 ] , zip ( idx2 , little_sizes ) ) )
little_sizes2 = [ x [ 1 ] for x in little_sizes2 ]
# get any chromosomes not in haplotype / random bin
thresh = max ( little_sizes2 )
else :
thresh = 0
larger_chroms = [ ]
for c in ref . file_contigs ( ref_file ) :
if c . size > thresh :
larger_chroms . append ( c . name )
return larger_chroms |
def reset ( self ) :
"""Resets the foreground and background color value back to the original
when initialised .""" | self . _fgcolor = self . default_fgcolor
self . _bgcolor = self . default_bgcolor |
def relations_to ( self , target , include_object = False ) :
'''list all relations pointing at an object''' | relations = self . _get_item_node ( target ) . incoming
if include_object :
for k in relations :
for v in relations [ k ] :
if hasattr ( v , 'obj' ) : # filter dead links
yield v . obj , k
else :
yield from relations |
def _get_ids_from_name_private ( self , name ) :
"""Get private images which match the given name .""" | results = self . list_private_images ( name = name )
return [ result [ 'id' ] for result in results ] |
def _support_diag_dump ( self ) :
'''Collect log info for debug''' | # check insights config
cfg_block = [ ]
pconn = InsightsConnection ( self . config )
logger . info ( 'Insights version: %s' , get_nvr ( ) )
reg_check = registration_check ( pconn )
cfg_block . append ( 'Registration check:' )
for key in reg_check :
cfg_block . append ( key + ': ' + str ( reg_check [ key ] ) )
lastupload = 'never'
if os . path . isfile ( constants . lastupload_file ) :
with open ( constants . lastupload_file ) as upl_file :
lastupload = upl_file . readline ( ) . strip ( )
cfg_block . append ( '\nLast successful upload was ' + lastupload )
cfg_block . append ( 'auto_config: ' + str ( self . config . auto_config ) )
if self . config . proxy :
obfuscated_proxy = re . sub ( r'(.*)(:)(.*)(@.*)' , r'\1\2********\4' , self . config . proxy )
else :
obfuscated_proxy = 'None'
cfg_block . append ( 'proxy: ' + obfuscated_proxy )
logger . info ( '\n' . join ( cfg_block ) )
logger . info ( 'python-requests: %s' , requests . __version__ )
succ = pconn . test_connection ( )
if succ == 0 :
logger . info ( 'Connection test: PASS\n' )
else :
logger . info ( 'Connection test: FAIL\n' )
# run commands
commands = [ 'uname -a' , 'cat /etc/redhat-release' , 'env' , 'sestatus' , 'subscription-manager identity' , 'systemctl cat insights-client.timer' , 'systemctl cat insights-client.service' , 'systemctl status insights-client.timer' , 'systemctl status insights-client.service' ]
for cmd in commands :
logger . info ( "Running command: %s" , cmd )
try :
proc = Popen ( shlex . split ( cmd ) , shell = False , stdout = PIPE , stderr = STDOUT , close_fds = True )
stdout , stderr = proc . communicate ( )
except OSError as o :
if 'systemctl' not in cmd : # suppress output for systemctl cmd failures
logger . info ( 'Error running command "%s": %s' , cmd , o )
except Exception as e : # unknown error
logger . info ( "Process failed: %s" , e )
logger . info ( "Process output: \n%s" , stdout )
# check available disk space for / var / tmp
tmp_dir = '/var/tmp'
dest_dir_stat = os . statvfs ( tmp_dir )
dest_dir_size = ( dest_dir_stat . f_bavail * dest_dir_stat . f_frsize )
logger . info ( 'Available space in %s:\t%s bytes\t%.1f 1K-blocks\t%.1f MB' , tmp_dir , dest_dir_size , dest_dir_size / 1024.0 , ( dest_dir_size / 1024.0 ) / 1024.0 ) |
def dmail_delete ( self , dmail_id ) :
"""Delete a dmail . You can only delete dmails you own ( Requires login ) .
Parameters :
dmail _ id ( int ) : where dmail _ id is the dmail id .""" | return self . _get ( 'dmails/{0}.json' . format ( dmail_id ) , method = 'DELETE' , auth = True ) |
def _init_map ( self , record_types = None , ** kwargs ) :
"""Initialize form map""" | osid_objects . OsidObjectForm . _init_map ( self , record_types = record_types )
self . _my_map [ 'outputScore' ] = self . _output_score_default
self . _my_map [ 'gradeSystemId' ] = str ( kwargs [ 'grade_system_id' ] )
self . _my_map [ 'inputScoreEndRange' ] = self . _input_score_end_range_default
self . _my_map [ 'inputScoreStartRange' ] = self . _input_score_start_range_default
self . _my_map [ 'assignedGradebookIds' ] = [ str ( kwargs [ 'gradebook_id' ] ) ] |
def _format_firewall_stdout ( cmd_ret ) :
'''Helper function to format the stdout from the get _ firewall _ status function .
cmd _ ret
The return dictionary that comes from a cmd . run _ all call .''' | ret_dict = { 'success' : True , 'rulesets' : { } }
for line in cmd_ret [ 'stdout' ] . splitlines ( ) :
if line . startswith ( 'Name' ) :
continue
if line . startswith ( '---' ) :
continue
ruleset_status = line . split ( )
ret_dict [ 'rulesets' ] [ ruleset_status [ 0 ] ] = bool ( ruleset_status [ 1 ] )
return ret_dict |
def __is_valid_value_for_arg ( self , arg , value , check_extension = True ) :
"""Check if value is allowed for arg
Some commands only allow a limited set of values . The method
always returns True for methods that do not provide such a
set .
: param arg : the argument ' s name
: param value : the value to check
: param check _ extension : check if value requires an extension
: return : True on succes , False otherwise""" | if "values" not in arg and "extension_values" not in arg :
return True
if "values" in arg and value . lower ( ) in arg [ "values" ] :
return True
if "extension_values" in arg :
extension = arg [ "extension_values" ] . get ( value . lower ( ) )
if extension :
condition = ( check_extension and extension not in RequireCommand . loaded_extensions )
if condition :
raise ExtensionNotLoaded ( extension )
return True
return False |
def export_vms ( self , vms_names = None , standalone = False , export_dir = '.' , compress = False , init_file_name = 'LagoInitFile' , out_format = YAMLOutFormatPlugin ( ) , collect_only = False , with_threads = True , ) :
"""Export vm images disks and init file .
The exported images and init file can be used to recreate
the environment .
Args :
vms _ names ( list of str ) : Names of the vms to export , if None
export all the vms in the env ( default = None )
standalone ( bool ) : If false , export a layered image
( default = False )
export _ dir ( str ) : Dir to place the exported images and init file
compress ( bool ) : If True compress the images with xz
( default = False )
init _ file _ name ( str ) : The name of the exported init file
( default = ' LagoInitfile ' )
out _ format ( : class : ` lago . plugins . output . OutFormatPlugin ` ) :
The type of the exported init file ( the default is yaml )
collect _ only ( bool ) : If True , return only a mapping from vm name
to the disks that will be exported . ( default = False )
with _ threads ( bool ) : If True , run the export in parallel
( default = True )
Returns
Unless collect _ only = = True , a mapping between vms ' disks .""" | return self . virt_env . export_vms ( vms_names , standalone , export_dir , compress , init_file_name , out_format , collect_only , with_threads ) |
def get_source ( fileobj ) :
"""Translate fileobj into file contents .
fileobj is either a string or a dict . If it ' s a string , that ' s the
file contents . If it ' s a string , then the filename key contains
the name of the file whose contents we are to use .
If the dict contains a true value for the key delete _ after _ use ,
the file should be deleted once read .""" | if not isinstance ( fileobj , dict ) :
return fileobj
else :
try :
with io . open ( fileobj [ "filename" ] , encoding = "utf-8" , errors = "ignore" ) as f :
return f . read ( )
finally :
if fileobj . get ( 'delete_after_use' ) :
try :
os . remove ( fileobj [ "filename" ] )
except : # pragma : no cover
pass |
def check_mod_enabled ( mod ) :
'''Checks to see if the specific mod symlink is in / etc / apache2 / mods - enabled .
This will only be functional on Debian - based operating systems ( Ubuntu ,
Mint , etc ) .
CLI Examples :
. . code - block : : bash
salt ' * ' apache . check _ mod _ enabled status
salt ' * ' apache . check _ mod _ enabled status . load
salt ' * ' apache . check _ mod _ enabled status . conf''' | if mod . endswith ( '.load' ) or mod . endswith ( '.conf' ) :
mod_file = mod
else :
mod_file = '{0}.load' . format ( mod )
return os . path . islink ( '/etc/apache2/mods-enabled/{0}' . format ( mod_file ) ) |
def complement ( self , alphabet ) :
"""Returns the complement of DFA
Args :
alphabet ( list ) : The input alphabet
Returns :
None""" | states = sorted ( self . states , key = attrgetter ( 'initial' ) , reverse = True )
for state in states :
if state . final :
state . final = False
else :
state . final = True |
def rsp_process ( rsp , sampling_rate = 1000 ) :
"""Automated processing of RSP signals .
Parameters
rsp : list or array
Respiratory ( RSP ) signal array .
sampling _ rate : int
Sampling rate ( samples / second ) .
Returns
processed _ rsp : dict
Dict containing processed RSP features .
Contains the RSP raw signal , the filtered signal , the respiratory cycles onsets , and respiratory phases ( inspirations and expirations ) .
Example
> > > import neurokit as nk
> > > processed _ rsp = nk . rsp _ process ( rsp _ signal )
Notes
* Authors *
- Dominique Makowski ( https : / / github . com / DominiqueMakowski )
* Dependencies *
- biosppy
- numpy
- pandas
* See Also *
- BioSPPY : https : / / github . com / PIA - Group / BioSPPy""" | processed_rsp = { "df" : pd . DataFrame ( { "RSP_Raw" : np . array ( rsp ) } ) }
biosppy_rsp = dict ( biosppy . signals . resp . resp ( rsp , sampling_rate = sampling_rate , show = False ) )
processed_rsp [ "df" ] [ "RSP_Filtered" ] = biosppy_rsp [ "filtered" ]
# RSP Rate
rsp_rate = biosppy_rsp [ "resp_rate" ] * 60
# Get RSP rate value ( in cycles per minute )
rsp_times = biosppy_rsp [ "resp_rate_ts" ]
# the time ( in sec ) of each rsp rate value
rsp_times = np . round ( rsp_times * sampling_rate ) . astype ( int )
# Convert to timepoints
try :
rsp_rate = interpolate ( rsp_rate , rsp_times , sampling_rate )
# Interpolation using 3rd order spline
processed_rsp [ "df" ] [ "RSP_Rate" ] = rsp_rate
except TypeError :
print ( "NeuroKit Warning: rsp_process(): Sequence too short to compute respiratory rate." )
processed_rsp [ "df" ] [ "RSP_Rate" ] = np . nan
# RSP Cycles
rsp_cycles = rsp_find_cycles ( biosppy_rsp [ "filtered" ] )
processed_rsp [ "df" ] [ "RSP_Inspiration" ] = rsp_cycles [ "RSP_Inspiration" ]
processed_rsp [ "RSP" ] = { }
processed_rsp [ "RSP" ] [ "Cycles_Onsets" ] = rsp_cycles [ "RSP_Cycles_Onsets" ]
processed_rsp [ "RSP" ] [ "Expiration_Onsets" ] = rsp_cycles [ "RSP_Expiration_Onsets" ]
processed_rsp [ "RSP" ] [ "Cycles_Length" ] = rsp_cycles [ "RSP_Cycles_Length" ] / sampling_rate
# RSP Variability
rsp_diff = processed_rsp [ "RSP" ] [ "Cycles_Length" ]
processed_rsp [ "RSP" ] [ "Respiratory_Variability" ] = { }
processed_rsp [ "RSP" ] [ "Respiratory_Variability" ] [ "RSPV_SD" ] = np . std ( rsp_diff )
processed_rsp [ "RSP" ] [ "Respiratory_Variability" ] [ "RSPV_RMSSD" ] = np . sqrt ( np . mean ( rsp_diff ** 2 ) )
processed_rsp [ "RSP" ] [ "Respiratory_Variability" ] [ "RSPV_RMSSD_Log" ] = np . log ( processed_rsp [ "RSP" ] [ "Respiratory_Variability" ] [ "RSPV_RMSSD" ] )
return ( processed_rsp ) |
def py_to_go_cookie ( py_cookie ) :
'''Convert a python cookie to the JSON - marshalable Go - style cookie form .''' | # TODO ( perhaps ) :
# HttpOnly
# Creation
# LastAccess
# Updated
# not done properly : CanonicalHost .
go_cookie = { 'Name' : py_cookie . name , 'Value' : py_cookie . value , 'Domain' : py_cookie . domain , 'HostOnly' : not py_cookie . domain_specified , 'Persistent' : not py_cookie . discard , 'Secure' : py_cookie . secure , 'CanonicalHost' : py_cookie . domain , }
if py_cookie . path_specified :
go_cookie [ 'Path' ] = py_cookie . path
if py_cookie . expires is not None :
unix_time = datetime . datetime . fromtimestamp ( py_cookie . expires )
# Note : fromtimestamp bizarrely produces a time without
# a time zone , so we need to use accept _ naive .
go_cookie [ 'Expires' ] = pyrfc3339 . generate ( unix_time , accept_naive = True )
return go_cookie |
def fetchMore ( self , parentIndex ) : # TODO : Make LazyLoadRepoTreeModel ?
"""Fetches any available data for the items with the parent specified by the parent index .""" | parentItem = self . getItem ( parentIndex )
if not parentItem :
return
if not parentItem . canFetchChildren ( ) :
return
# TODO : implement InsertItems to optimize ?
for childItem in parentItem . fetchChildren ( ) :
self . insertItem ( childItem , parentIndex = parentIndex )
# Check that Rti implementation correctly sets canFetchChildren
assert not parentItem . canFetchChildren ( ) , "not all children fetched: {}" . format ( parentItem ) |
def save_location ( self , filename : str , location : PostLocation , mtime : datetime ) -> None :
"""Save post location name and Google Maps link .""" | filename += '_location.txt'
location_string = ( location . name + "\n" + "https://maps.google.com/maps?q={0},{1}&ll={0},{1}\n" . format ( location . lat , location . lng ) )
with open ( filename , 'wb' ) as text_file :
shutil . copyfileobj ( BytesIO ( location_string . encode ( ) ) , text_file )
os . utime ( filename , ( datetime . now ( ) . timestamp ( ) , mtime . timestamp ( ) ) )
self . context . log ( 'geo' , end = ' ' , flush = True ) |
def flatten_errors ( cfg , res , levels = None , results = None ) :
"""An example function that will turn a nested dictionary of results
( as returned by ` ` ConfigObj . validate ` ` ) into a flat list .
` ` cfg ` ` is the ConfigObj instance being checked , ` ` res ` ` is the results
dictionary returned by ` ` validate ` ` .
( This is a recursive function , so you shouldn ' t use the ` ` levels ` ` or
` ` results ` ` arguments - they are used by the function . )
Returns a list of keys that failed . Each member of the list is a tuple : :
( [ list of sections . . . ] , key , result )
If ` ` validate ` ` was called with ` ` preserve _ errors = False ` ` ( the default )
then ` ` result ` ` will always be ` ` False ` ` .
* list of sections * is a flattened list of sections that the key was found
in .
If the section was missing ( or a section was expected and a scalar provided
- or vice - versa ) then key will be ` ` None ` ` .
If the value ( or section ) was missing then ` ` result ` ` will be ` ` False ` ` .
If ` ` validate ` ` was called with ` ` preserve _ errors = True ` ` and a value
was present , but failed the check , then ` ` result ` ` will be the exception
object returned . You can use this as a string that describes the failure .
For example * The value " 3 " is of the wrong type * .""" | if levels is None : # first time called
levels = [ ]
results = [ ]
if res == True :
return results
if res == False or isinstance ( res , Exception ) :
results . append ( ( levels [ : ] , None , res ) )
if levels :
levels . pop ( )
return results
for ( key , val ) in res . items ( ) :
if val == True :
continue
if isinstance ( cfg . get ( key ) , dict ) : # Go down one level
levels . append ( key )
flatten_errors ( cfg [ key ] , val , levels , results )
continue
results . append ( ( levels [ : ] , key , val ) )
# Go up one level
if levels :
levels . pop ( )
return results |
def _get_factors ( self , element ) :
"""Get factors for categorical axes .""" | xdim , ydim = element . dimensions ( ) [ : 2 ]
xvals , yvals = [ element . dimension_values ( i , False ) for i in range ( 2 ) ]
coords = tuple ( [ v if vals . dtype . kind in 'SU' else dim . pprint_value ( v ) for v in vals ] for dim , vals in [ ( xdim , xvals ) , ( ydim , yvals ) ] )
if self . invert_axes :
coords = coords [ : : - 1 ]
return coords |
def _find_geophysical_vars ( self , ds , refresh = False ) :
'''Returns a list of geophysical variables . Modifies
` self . _ geophysical _ vars `
: param netCDF4 . Dataset ds : An open netCDF dataset
: param bool refresh : if refresh is set to True , the cache is
invalidated .
: rtype : list
: return : A list containing strings with geophysical variable
names .''' | if self . _geophysical_vars . get ( ds , None ) and refresh is False :
return self . _geophysical_vars [ ds ]
self . _geophysical_vars [ ds ] = cfutil . get_geophysical_variables ( ds )
return self . _geophysical_vars [ ds ] |
def list_subscriptions ( self , service ) :
"""Asks for a list of all subscribed accounts and devices , along with their statuses .""" | data = { 'service' : service , }
return self . _perform_post_request ( self . list_subscriptions_endpoint , data , self . token_header ) |
def rank_for_in ( self , leaderboard_name , member ) :
'''Retrieve the rank for a member in the named leaderboard .
@ param leaderboard _ name [ String ] Name of the leaderboard .
@ param member [ String ] Member name .
@ return the rank for a member in the leaderboard .''' | member_score = str ( float ( self . score_for_in ( leaderboard_name , member ) ) )
if self . order == self . ASC :
try :
return self . redis_connection . zcount ( leaderboard_name , '-inf' , '(%s' % member_score ) + 1
except :
return None
else :
try :
return self . redis_connection . zcount ( leaderboard_name , '(%s' % member_score , '+inf' ) + 1
except :
return None |
def _sincedb_init ( self ) :
"""Initializes the sincedb schema in an sqlite db""" | if not self . _sincedb_path :
return
if not os . path . exists ( self . _sincedb_path ) :
self . _log_debug ( 'initializing sincedb sqlite schema' )
conn = sqlite3 . connect ( self . _sincedb_path , isolation_level = None )
conn . execute ( """
create table sincedb (
fid text primary key,
filename text,
position integer default 1
);
""" )
conn . close ( ) |
def _get_redis_server ( opts = None ) :
'''Return the Redis server instance .
Caching the object instance .''' | global REDIS_SERVER
if REDIS_SERVER :
return REDIS_SERVER
if not opts :
opts = _get_redis_cache_opts ( )
if opts [ 'cluster_mode' ] :
REDIS_SERVER = StrictRedisCluster ( startup_nodes = opts [ 'startup_nodes' ] , skip_full_coverage_check = opts [ 'skip_full_coverage_check' ] )
else :
REDIS_SERVER = redis . StrictRedis ( opts [ 'host' ] , opts [ 'port' ] , unix_socket_path = opts [ 'unix_socket_path' ] , db = opts [ 'db' ] , password = opts [ 'password' ] )
return REDIS_SERVER |
def _operatorOperands ( tokenlist ) :
"generator to extract operators and operands in pairs" | it = iter ( tokenlist )
while 1 :
try :
yield ( it . next ( ) , it . next ( ) )
except StopIteration :
break |
def get_field_resolver ( self , field_resolver : GraphQLFieldResolver ) -> GraphQLFieldResolver :
"""Wrap the provided resolver with the middleware .
Returns a function that chains the middleware functions with the provided
resolver function .""" | if self . _middleware_resolvers is None :
return field_resolver
if field_resolver not in self . _cached_resolvers :
self . _cached_resolvers [ field_resolver ] = reduce ( lambda chained_fns , next_fn : partial ( next_fn , chained_fns ) , self . _middleware_resolvers , field_resolver , )
return self . _cached_resolvers [ field_resolver ] |
def evolve ( self , rho : Density ) -> Density :
"""Apply the action of this gate upon a density""" | # TODO : implement without explicit channel creation ?
chan = self . aschannel ( )
return chan . evolve ( rho ) |
def read_epub ( name , options = None ) :
"""Creates new instance of EpubBook with the content defined in the input file .
> > > book = ebooklib . read _ epub ( ' book . epub ' )
: Args :
- name : full path to the input file
- options : extra options as dictionary ( optional )
: Returns :
Instance of EpubBook .""" | reader = EpubReader ( name , options )
book = reader . load ( )
reader . process ( )
return book |
def _list_subnets_by_identifier ( self , identifier ) :
"""Returns a list of IDs of the subnet matching the identifier .
: param string identifier : The identifier to look up
: returns : List of matching IDs""" | identifier = identifier . split ( '/' , 1 ) [ 0 ]
results = self . list_subnets ( identifier = identifier , mask = 'id' )
return [ result [ 'id' ] for result in results ] |
def parse_lit ( self , lines ) :
'''Parse a string line - by - line delineating comments and code
: returns : An tuple of boolean / list - of - string pairs . True designates a
comment ; False designates code .''' | comment_char = '#'
# TODO : move this into a directive option
comment = re . compile ( r'^\s*{0}[ \n]' . format ( comment_char ) )
section_test = lambda val : bool ( comment . match ( val ) )
sections = [ ]
for is_doc , group in itertools . groupby ( lines , section_test ) :
if is_doc :
text = [ comment . sub ( '' , i ) . rstrip ( '\r\n' ) for i in group ]
else :
text = [ i . rstrip ( '\r\n' ) for i in group ]
sections . append ( ( is_doc , text ) )
return sections |
def formatMessage ( self , record : logging . LogRecord ) -> str :
"""Convert the already filled log record to a string .""" | level_color = "0"
text_color = "0"
fmt = ""
if record . levelno <= logging . DEBUG :
fmt = "\033[0;37m" + logging . BASIC_FORMAT + "\033[0m"
elif record . levelno <= logging . INFO :
level_color = "1;36"
lmsg = record . message . lower ( )
if self . GREEN_RE . search ( lmsg ) :
text_color = "1;32"
elif record . levelno <= logging . WARNING :
level_color = "1;33"
elif record . levelno <= logging . CRITICAL :
level_color = "1;31"
if not fmt :
fmt = "\033[" + level_color + "m%(levelname)s\033[0m:%(rthread)s:%(name)s:\033[" + text_color + "m%(message)s\033[0m"
fmt = _fest + fmt
record . rthread = reduce_thread_id ( record . thread )
return fmt % record . __dict__ |
def scale_axes_from_data ( self ) :
"""Restrict data limits for Y - axis based on what you can see""" | # get tight limits for X - axis
if self . args . xmin is None :
self . args . xmin = min ( fs . xspan [ 0 ] for fs in self . spectra )
if self . args . xmax is None :
self . args . xmax = max ( fs . xspan [ 1 ] for fs in self . spectra )
# autoscale view for Y - axis
cropped = [ fs . crop ( self . args . xmin , self . args . xmax ) for fs in self . spectra ]
ymin = min ( fs . value . min ( ) for fs in cropped )
ymax = max ( fs . value . max ( ) for fs in cropped )
self . plot . gca ( ) . yaxis . set_data_interval ( ymin , ymax , ignore = True )
self . plot . gca ( ) . autoscale_view ( scalex = False ) |
def init_app ( self , app ) :
"""Setup the logging handlers , level and formatters .
Level ( DEBUG , INFO , CRITICAL , etc ) is determined by the
app . config [ ' FLASK _ LOG _ LEVEL ' ] setting , and defaults to
` ` None ` ` / ` ` logging . NOTSET ` ` .""" | config_log_level = app . config . get ( 'FLASK_LOG_LEVEL' , None )
# Set up format for default logging
hostname = platform . node ( ) . split ( '.' ) [ 0 ]
formatter = ( '[%(asctime)s] %(levelname)s %(process)d [%(name)s] ' '%(filename)s:%(lineno)d - ' '[{hostname}] - %(message)s' ) . format ( hostname = hostname )
config_log_int = None
set_level = None
if config_log_level :
config_log_int = getattr ( logging , config_log_level . upper ( ) , None )
if not isinstance ( config_log_int , int ) :
raise ValueError ( 'Invalid log level: {0}' . format ( config_log_level ) )
set_level = config_log_int
# Set to NotSet if we still aren ' t set yet
if not set_level :
set_level = config_log_int = logging . NOTSET
self . log_level = set_level
# Setup basic StreamHandler logging with format and level ( do
# setup in case we are main , or change root logger if we aren ' t .
logging . basicConfig ( format = formatter )
root_logger = logging . getLogger ( )
root_logger . setLevel ( set_level )
# Get everything ready to setup the syslog handlers
address = None
if os . path . exists ( '/dev/log' ) :
address = '/dev/log'
elif os . path . exists ( '/var/run/syslog' ) :
address = '/var/run/syslog'
else :
address = ( '127.0.0.1' , 514 )
# Add syslog handler before adding formatters
root_logger . addHandler ( SysLogHandler ( address = address , facility = SysLogHandler . LOG_LOCAL0 ) )
self . set_formatter ( formatter )
return config_log_int |
def apply_computation ( cls , state : BaseState , message : Message , transaction_context : BaseTransactionContext ) -> 'BaseComputation' :
"""Perform the computation that would be triggered by the VM message .""" | with cls ( state , message , transaction_context ) as computation : # Early exit on pre - compiles
if message . code_address in computation . precompiles :
computation . precompiles [ message . code_address ] ( computation )
return computation
show_debug2 = computation . logger . show_debug2
for opcode in computation . code :
opcode_fn = computation . get_opcode_fn ( opcode )
if show_debug2 :
computation . logger . debug2 ( "OPCODE: 0x%x (%s) | pc: %s" , opcode , opcode_fn . mnemonic , max ( 0 , computation . code . pc - 1 ) , )
try :
opcode_fn ( computation = computation )
except Halt :
break
return computation |
def can_fetch_pool ( self , request : Request ) :
'''Return whether the request can be fetched based on the pool .''' | url_info = request . url_info
user_agent = request . fields . get ( 'User-agent' , '' )
if self . _robots_txt_pool . has_parser ( url_info ) :
return self . _robots_txt_pool . can_fetch ( url_info , user_agent )
else :
raise NotInPoolError ( ) |
def load_slice ( self , state , start , end ) :
"""Return the memory objects overlapping with the provided slice .
: param start : the start address
: param end : the end address ( non - inclusive )
: returns : tuples of ( starting _ addr , memory _ object )""" | items = [ ]
if start > self . _page_addr + self . _page_size or end < self . _page_addr :
l . warning ( "Calling load_slice on the wrong page." )
return items
for addr in range ( max ( start , self . _page_addr ) , min ( end , self . _page_addr + self . _page_size ) ) :
i = addr - self . _page_addr
mo = self . _storage [ i ]
if mo is None and hasattr ( self , "from_dbg" ) :
byte_val = get_debugger ( ) . get_byte ( addr )
mo = SimMemoryObject ( claripy . BVV ( byte_val , 8 ) , addr )
self . _storage [ i ] = mo
if mo is not None and ( not items or items [ - 1 ] [ 1 ] is not mo ) :
items . append ( ( addr , mo ) )
# print filter ( lambda x : x ! = None , self . _ storage )
return items |
def expectation ( self , observables , statistics , lag_multiple = 1 , observables_mean_free = False , statistics_mean_free = False ) :
r"""Compute future expectation of observable or covariance using the approximated Koopman operator .
Parameters
observables : np . ndarray ( ( input _ dimension , n _ observables ) )
Coefficients that express one or multiple observables in
the basis of the input features .
statistics : np . ndarray ( ( input _ dimension , n _ statistics ) ) , optional
Coefficients that express one or multiple statistics in
the basis of the input features .
This parameter can be None . In that case , this method
returns the future expectation value of the observable ( s ) .
lag _ multiple : int
If > 1 , extrapolate to a multiple of the estimator ' s lag
time by assuming Markovianity of the approximated Koopman
operator .
observables _ mean _ free : bool , default = False
If true , coefficients in ` observables ` refer to the input
features with feature means removed .
If false , coefficients in ` observables ` refer to the
unmodified input features .
statistics _ mean _ free : bool , default = False
If true , coefficients in ` statistics ` refer to the input
features with feature means removed .
If false , coefficients in ` statistics ` refer to the
unmodified input features .
Notes
A " future expectation " of a observable g is the average of g computed
over a time window that has the same total length as the input data
from which the Koopman operator was estimated but is shifted
by lag _ multiple * tau time steps into the future ( where tau is the lag
time ) .
It is computed with the equation :
. . math : :
\ mathbb { E } [ g ] _ { \ rho _ { n } } = \ mathbf { q } ^ { T } \ mathbf { P } ^ { n - 1 } \ mathbf { e } _ { 1}
where
. . math : :
P _ { ij } = \ sigma _ { i } \ langle \ psi _ { i } , \ phi _ { j } \ rangle _ { \ rho _ { 1 } }
and
. . math : :
q _ { i } = \ langle g , \ phi _ { i } \ rangle _ { \ rho _ { 1 } }
and : math : ` \ mathbf { e } _ { 1 } ` is the first canonical unit vector .
A model prediction of time - lagged covariances between the
observable f and the statistic g at a lag - time of lag _ multiple * tau
is computed with the equation :
. . math : :
\ mathrm { cov } [ g , \ , f ; n \ tau ] = \ mathbf { q } ^ { T } \ mathbf { P } ^ { n - 1 } \ boldsymbol { \ Sigma } \ mathbf { r }
where : math : ` r _ { i } = \ langle \ psi _ { i } , f \ rangle _ { \ rho _ { 0 } } ` and
: math : ` \ boldsymbol { \ Sigma } = \ mathrm { diag ( \ boldsymbol { \ sigma } ) } ` .""" | # TODO : implement the case lag _ multiple = 0
dim = self . dimension ( )
S = np . diag ( np . concatenate ( ( [ 1.0 ] , self . singular_values [ 0 : dim ] ) ) )
V = self . V [ : , 0 : dim ]
U = self . U [ : , 0 : dim ]
m_0 = self . mean_0
m_t = self . mean_t
assert lag_multiple >= 1 , 'lag_multiple = 0 not implemented'
if lag_multiple == 1 :
P = S
else :
p = np . zeros ( ( dim + 1 , dim + 1 ) )
p [ 0 , 0 ] = 1.0
p [ 1 : , 0 ] = U . T . dot ( m_t - m_0 )
p [ 1 : , 1 : ] = U . T . dot ( self . Ctt ) . dot ( V )
P = np . linalg . matrix_power ( S . dot ( p ) , lag_multiple - 1 ) . dot ( S )
Q = np . zeros ( ( observables . shape [ 1 ] , dim + 1 ) )
if not observables_mean_free :
Q [ : , 0 ] = observables . T . dot ( m_t )
Q [ : , 1 : ] = observables . T . dot ( self . Ctt ) . dot ( V )
if statistics is not None : # compute covariance
R = np . zeros ( ( statistics . shape [ 1 ] , dim + 1 ) )
if not statistics_mean_free :
R [ : , 0 ] = statistics . T . dot ( m_0 )
R [ : , 1 : ] = statistics . T . dot ( self . C00 ) . dot ( U )
if statistics is not None : # compute lagged covariance
return Q . dot ( P ) . dot ( R . T )
# TODO : discuss whether we want to return this or the transpose
# TODO : from MSMs one might expect to first index to refer to the statistics , here it is the other way round
else : # compute future expectation
return Q . dot ( P ) [ : , 0 ] |
def dump_submission_data ( self ) :
"""Dumps the current submission data to the submission file .""" | # renew the dashboard config
self . submission_data [ "dashboard_config" ] = self . dashboard . get_persistent_config ( )
# write the submission data to the output file
self . _outputs [ "submission" ] . dump ( self . submission_data , formatter = "json" , indent = 4 ) |
def _get_cache_name ( function ) :
"""returns a name for the module ' s cache db .""" | module_name = _inspect . getfile ( function )
module_name = _os . path . abspath ( module_name )
cache_name = module_name
# fix for ' < string > ' or ' < stdin > ' in exec or interpreter usage .
cache_name = cache_name . replace ( '<' , '_lt_' )
cache_name = cache_name . replace ( '>' , '_gt_' )
tmpdir = _os . getenv ( 'TMPDIR' ) or _os . getenv ( 'TEMP' ) or _os . getenv ( 'TMP' )
if tmpdir :
cache_name = tmpdir + '/filecache_' + cache_name . replace ( _os . sep , '@' )
cache_name += '.cache'
return cache_name |
def add_permission_for_apigateway ( self , function_name , region_name , account_id , rest_api_id , random_id = None ) : # type : ( str , str , str , str , Optional [ str ] ) - > None
"""Authorize API gateway to invoke a lambda function is needed .
This method will first check if API gateway has permission to call
the lambda function , and only if necessary will it invoke
` ` self . add _ permission _ for _ apigateway ( . . . ) .""" | source_arn = self . _build_source_arn_str ( region_name , account_id , rest_api_id )
self . _add_lambda_permission_if_needed ( source_arn = source_arn , function_arn = function_name , service_name = 'apigateway' , ) |
def load_saved_records ( self , status , records ) :
"""Load ALDB records from a set of saved records .""" | if isinstance ( status , ALDBStatus ) :
self . _status = status
else :
self . _status = ALDBStatus ( status )
for mem_addr in records :
rec = records [ mem_addr ]
control_flags = int ( rec . get ( 'control_flags' , 0 ) )
group = int ( rec . get ( 'group' , 0 ) )
rec_addr = rec . get ( 'address' , '000000' )
data1 = int ( rec . get ( 'data1' , 0 ) )
data2 = int ( rec . get ( 'data2' , 0 ) )
data3 = int ( rec . get ( 'data3' , 0 ) )
self [ int ( mem_addr ) ] = ALDBRecord ( int ( mem_addr ) , control_flags , group , rec_addr , data1 , data2 , data3 )
if self . _status == ALDBStatus . LOADED :
keys = list ( self . _records . keys ( ) )
keys . sort ( reverse = True )
first_key = keys [ 0 ]
self . _mem_addr = first_key |
def _set_xml_from_keys ( self , root , item , ** kwargs ) :
"""Create SubElements of root with kwargs .
Args :
root : Element to add SubElements to .
item : Tuple key / value pair from self . data _ keys to add .
kwargs :
For each item in self . data _ keys , if it has a
corresponding kwarg , create a SubElement at root with
the kwarg ' s value .
Int and bool values will be cast to string . ( Int 10,
bool False become string values " 10 " and " false " ) .
Dicts will be recursively added to their key ' s Element .""" | key , val = item
target_key = root . find ( key )
if target_key is None :
target_key = ElementTree . SubElement ( root , key )
if isinstance ( val , dict ) :
for dict_item in val . items ( ) :
self . _set_xml_from_keys ( target_key , dict_item , ** kwargs )
return
# Convert kwarg data to the appropriate string .
if key in kwargs :
kwarg = kwargs [ key ]
if isinstance ( kwarg , bool ) :
kwargs [ key ] = str ( kwargs [ key ] ) . lower ( )
elif kwarg is None :
kwargs [ key ] = ""
elif isinstance ( kwarg , int ) :
kwargs [ key ] = str ( kwargs [ key ] )
elif isinstance ( kwarg , JSSObject ) :
kwargs [ key ] = kwargs [ key ] . name
target_key . text = kwargs . get ( key , val ) |
def surrogate_escape ( error ) :
"""Simulate the Python 3 ` ` surrogateescape ` ` handler , but for Python 2 only .""" | chars = error . object [ error . start : error . end ]
assert len ( chars ) == 1
val = ord ( chars )
val += 0xdc00
return __builtin__ . unichr ( val ) , error . end |
def strip_prefixes ( g : Graph ) :
"""Remove the prefixes from the graph for aesthetics""" | return re . sub ( r'^@prefix .* .\n' , '' , g . serialize ( format = "turtle" ) . decode ( ) , flags = re . MULTILINE ) . strip ( ) |
def mask ( self , dims = None , base = None , fill = 'deeppink' , stroke = 'black' , background = None ) :
"""Create a mask image with colored regions .
Parameters
dims : tuple , optional , default = None
Dimensions of embedding image ,
will be ignored if background image is provided .
base : array - like , optional , default = None
Base image , can provide a 2d or 3d array ,
if unspecified will be white .
fill : str or array - like , optional , default = ' pink '
String color specifier , or RGB value
stroke : str or array - like , optional , default = None
String color specifier , or RGB value
background : str or array - like , optional , default = None
String color specifier , or RGB value""" | fill = getcolor ( fill )
stroke = getcolor ( stroke )
background = getcolor ( background )
if dims is None and base is None :
region = one ( self . coordinates - self . bbox [ 0 : 2 ] )
else :
region = self
base = getbase ( base = base , dims = dims , extent = self . extent , background = background )
if fill is not None :
for channel in range ( 3 ) :
inds = asarray ( [ [ c [ 0 ] , c [ 1 ] , channel ] for c in region . coordinates ] )
base [ inds . T . tolist ( ) ] = fill [ channel ]
if stroke is not None :
mn = [ 0 , 0 ]
mx = [ base . shape [ 0 ] , base . shape [ 1 ] ]
edge = region . outline ( 0 , 1 ) . coordinates
edge = [ e for e in edge if all ( e >= mn ) and all ( e < mx ) ]
if len ( edge ) > 0 :
for channel in range ( 3 ) :
inds = asarray ( [ [ c [ 0 ] , c [ 1 ] , channel ] for c in edge ] )
base [ inds . T . tolist ( ) ] = stroke [ channel ]
return base |
def remove_header_search_paths ( self , paths , target_name = None , configuration_name = None ) :
"""Removes the given search paths from the HEADER _ SEARCH _ PATHS section of the target on the configurations
: param paths : A string or array of strings
: param target _ name : Target name or list of target names to remove the flag from or None for every target
: param configuration _ name : Configuration name to add the flag to or None for every configuration
: return : void""" | self . remove_search_paths ( XCBuildConfigurationFlags . HEADER_SEARCH_PATHS , paths , target_name , configuration_name ) |
def add_param ( self , param_name , layer_index , blob_index ) :
"""Add a param to the . params file""" | blobs = self . layers [ layer_index ] . blobs
self . dict_param [ param_name ] = mx . nd . array ( caffe . io . blobproto_to_array ( blobs [ blob_index ] ) ) |
def text_filter ( regex_base , value ) :
"""Helper method to regex replace images with captions in different markups""" | regex = regex_base % { 're_cap' : r'[a-zA-Z0-9\.\,:;/_ \(\)\-\!\?"]+' , 're_img' : r'[a-zA-Z0-9\.:/_\-\% ]+' }
images = re . findall ( regex , value )
for i in images :
image = i [ 1 ]
if image . startswith ( settings . MEDIA_URL ) :
image = image [ len ( settings . MEDIA_URL ) : ]
im = get_thumbnail ( image , str ( sorl_settings . THUMBNAIL_FILTER_WIDTH ) )
value = value . replace ( i [ 1 ] , im . url )
return value |
def GetEntries ( self , parser_mediator , top_level = None , ** unused_kwargs ) :
"""Extracts relevant install history entries .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
top _ level ( dict [ str , object ] ) : plist top - level key .""" | for entry in top_level :
datetime_value = entry . get ( 'date' , None )
package_identifiers = entry . get ( 'packageIdentifiers' , [ ] )
if not datetime_value or not package_identifiers :
continue
display_name = entry . get ( 'displayName' , '<UNKNOWN>' )
display_version = entry . get ( 'displayVersion' , '<DISPLAY_VERSION>' )
process_name = entry . get ( 'processName' , '<PROCESS_NAME>' )
package_identifiers = ', ' . join ( package_identifiers )
event_data = plist_event . PlistTimeEventData ( )
event_data . desc = ( 'Installation of [{0:s} {1:s}] using [{2:s}]. Packages: ' '{3:s}.' ) . format ( display_name , display_version , process_name , package_identifiers )
event_data . key = ''
event_data . root = '/item'
event = time_events . PythonDatetimeEvent ( datetime_value , definitions . TIME_DESCRIPTION_WRITTEN )
parser_mediator . ProduceEventWithEventData ( event , event_data ) |
def user_record ( uid , type = 0 ) :
"""获取用户的播放列表 , 必须登录
: param uid : 用户的ID , 可通过登录或者其他接口获取
: param type : ( optional ) 数据类型 , 0 : 获取所有记录 , 1 : 获取 weekData""" | if uid is None :
raise ParamsError ( )
r = NCloudBot ( )
r . method = 'USER_RECORD'
r . data = { 'type' : type , 'uid' : uid , "csrf_token" : "" }
r . send ( )
return r . response |
def agg_grid ( grid , agg = None ) :
"""Many functions return a 2d list with a complex data type in each cell .
For instance , grids representing environments have a set of resources ,
while reading in multiple data files at once will yield a list
containing the values for that cell from each file . In order to visualize
these data types it is helpful to summarize the more complex data types
with a single number . For instance , you might want to take the length
of a resource set to see how many resource types are present . Alternately ,
you might want to take the mode of a list to see the most common phenotype
in a cell .
This function facilitates this analysis by calling the given aggregation
function ( agg ) on each cell of the given grid and returning the result .
agg - A function indicating how to summarize grid contents . Default : len .""" | grid = deepcopy ( grid )
if agg is None :
if type ( grid [ 0 ] [ 0 ] ) is list and type ( grid [ 0 ] [ 0 ] [ 0 ] ) is str :
agg = string_avg
else :
agg = mode
for i in range ( len ( grid ) ) :
for j in range ( len ( grid [ i ] ) ) :
grid [ i ] [ j ] = agg ( grid [ i ] [ j ] )
return grid |
def reload_webservers ( ) :
"""Reload apache2 and nginx""" | if env . verbosity :
print env . host , "RELOADING apache2"
with settings ( warn_only = True ) :
a = sudo ( "/etc/init.d/apache2 reload" )
if env . verbosity :
print '' , a
if env . verbosity : # Reload used to fail on Ubuntu but at least in 10.04 it works
print env . host , "RELOADING nginx"
with settings ( warn_only = True ) :
s = run ( "/etc/init.d/nginx status" )
if 'running' in s :
n = sudo ( "/etc/init.d/nginx reload" )
else :
n = sudo ( "/etc/init.d/nginx start" )
if env . verbosity :
print ' *' , n
return True |
def reload_config ( self , async = True , verbose = False ) :
'''Initiate a config reload . This may take a while on large installations .''' | # If we ' re using an API version older than 4.5.0 , don ' t use async
api_version = float ( self . api_version ( ) [ 'api_version' ] )
if api_version < 4.5 :
async = False
url = '{}/{}{}' . format ( self . rest_url , 'reload' , '?asynchronous=1' if async else '' )
return self . __auth_req_post ( url , verbose = verbose ) |
def _all_dims ( x , default_dims = None ) :
"""Returns a list of dims in x or default _ dims if the rank is unknown .""" | if x . get_shape ( ) . ndims is not None :
return list ( xrange ( x . get_shape ( ) . ndims ) )
else :
return default_dims |
def from_master_password ( cls , password , network = BitcoinMainNet ) :
"""Generate a new key from a master password .
This password is hashed via a single round of sha256 and is highly
breakable , but it ' s the standard brainwallet approach .
See ` PrivateKey . from _ master _ password _ slow ` for a slightly more
secure generation method ( which will still be subject to a rainbow
table attack : \ )""" | password = ensure_bytes ( password )
key = sha256 ( password ) . hexdigest ( )
return cls . from_hex_key ( key , network ) |
def latinize ( mapping , bind , values ) :
"""Transliterate a given string into the latin alphabet .""" | for v in values :
if isinstance ( v , six . string_types ) :
v = transliterate ( v )
yield v |
def build_content_handler ( parent , filter_func ) :
"""Build a ` ~ xml . sax . handler . ContentHandler ` with a given filter""" | from ligo . lw . lsctables import use_in
class _ContentHandler ( parent ) : # pylint : disable = too - few - public - methods
def __init__ ( self , document ) :
super ( _ContentHandler , self ) . __init__ ( document , filter_func )
return use_in ( _ContentHandler ) |
def _onerror ( self , result ) :
"""To execute on execution failure
: param cdumay _ result . Result result : Execution result
: return : Execution result
: rtype : cdumay _ result . Result""" | self . _set_status ( "FAILED" , result )
logger . error ( "{}.Failed: {}[{}]: {}" . format ( self . __class__ . __name__ , self . __class__ . path , self . uuid , result ) , extra = dict ( kmsg = Message ( self . uuid , entrypoint = self . __class__ . path , params = self . params ) . dump ( ) , kresult = ResultSchema ( ) . dump ( result ) if result else dict ( ) ) )
return self . onerror ( result ) |
def handle_default_args ( args ) :
"""Include handling of any default arguments that all commands should
implement here ( for example , specifying the pythonpath ) .""" | if hasattr ( args , 'pythonpath' ) :
if args . pythonpath :
sys . path . insert ( 0 , args . pythonpath ) |
def stop_program ( self , turn_off_load = True ) :
"""Stops running programmed test sequence
: return : None""" | self . __set_buffer_start ( self . CMD_STOP_PROG )
self . __set_checksum ( )
self . __send_buffer ( )
if turn_off_load and self . load_on :
self . load_on = False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.