signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get_composition_search_session ( self , proxy ) :
"""Gets a composition search session .
arg proxy ( osid . proxy . Proxy ) : a proxy
return : ( osid . repository . CompositionSearchSession ) - a
CompositionSearchSession
raise : OperationFailed - unable to complete request
raise : Unimplemented - supports _ composition _ search ( ) is false
compliance : optional - This method must be implemented if
supports _ composition _ search ( ) is true .""" | if not self . supports_composition_search ( ) :
raise Unimplemented ( )
try :
from . import sessions
except ImportError :
raise
# OperationFailed ( )
proxy = self . _convert_proxy ( proxy )
try :
session = sessions . CompositionSearchSession ( proxy , runtime = self . _runtime )
except AttributeError :
raise
# OperationFailed ( )
return session |
def _process_status ( self , status ) :
"""Process latest status update .""" | self . _screen_id = status . get ( ATTR_SCREEN_ID )
self . status_update_event . set ( ) |
def pick ( self , connections ) :
"""Picks a connection with the earliest backoff time .
As a result , the first connection is picked
for as long as it has no backoff time .
Otherwise , the connections are tried in a round robin fashion .
Args :
connections ( : obj : list ) : List of
: class : ` ~ bigchaindb _ driver . connection . Connection ` instances .""" | if len ( connections ) == 1 :
return connections [ 0 ]
def key ( conn ) :
return ( datetime . min if conn . backoff_time is None else conn . backoff_time )
return min ( * connections , key = key ) |
def _inherit_parent_kwargs ( self , kwargs ) :
"""Extract any necessary attributes from parent serializer to
propagate down to child serializer .""" | if not self . parent or not self . _is_dynamic :
return kwargs
if 'request_fields' not in kwargs : # If ' request _ fields ' isn ' t explicitly set , pull it from the
# parent serializer .
request_fields = self . _get_request_fields_from_parent ( )
if request_fields is None : # Default to ' id _ only ' for nested serializers .
request_fields = True
kwargs [ 'request_fields' ] = request_fields
if self . embed and kwargs . get ( 'request_fields' ) is True : # If ' embed ' then make sure we fetch the full object .
kwargs [ 'request_fields' ] = { }
if hasattr ( self . parent , 'sideloading' ) :
kwargs [ 'sideloading' ] = self . parent . sideloading
if hasattr ( self . parent , 'debug' ) :
kwargs [ 'debug' ] = self . parent . debug
return kwargs |
def version ( i ) :
"""Input : { }
Output : {
output from function ' get _ version '""" | o = i . get ( 'out' , '' )
r = get_version ( { } )
if r [ 'return' ] > 0 :
return r
version_str = r [ 'version_str' ]
if o == 'con' :
out ( 'V' + version_str )
return r |
def decode_str ( s , free = False ) :
"""Decodes a SymbolicStr""" | try :
if s . len == 0 :
return u""
return ffi . unpack ( s . data , s . len ) . decode ( "utf-8" , "replace" )
finally :
if free :
lib . semaphore_str_free ( ffi . addressof ( s ) ) |
def get_readable ( self ) :
'''Gets human - readable representation of the url ( as unicode string ,
IRI according RFC3987)''' | query = ( u'?' + u'&' . join ( u'{}={}' . format ( urlquote ( k ) , urlquote ( v ) ) for k , v in six . iteritems ( self . query ) ) if self . query else '' )
hash_part = ( u'#' + self . fragment ) if self . fragment is not None else u''
path , query , hash_part = uri_to_iri_parts ( self . path , query , hash_part )
if self . host :
port = u':' + self . port if self . port else u''
return u'' . join ( ( self . scheme , '://' , self . host , port , path , query , hash_part ) )
else :
return path + query + hash_part |
def filter_matrix_columns ( A , theta ) :
"""Filter each column of A with tol .
i . e . , drop all entries in column k where
abs ( A [ i , k ] ) < tol max ( abs ( A [ : , k ] ) )
Parameters
A : sparse _ matrix
theta : float
In range [ 0,1 ) and defines drop - tolerance used to filter the columns
of A
Returns
A _ filter : sparse _ matrix
Each column has been filtered by dropping all entries where
abs ( A [ i , k ] ) < tol max ( abs ( A [ : , k ] ) )
Examples
> > > from pyamg . gallery import poisson
> > > from pyamg . util . utils import filter _ matrix _ columns
> > > from scipy import array
> > > from scipy . sparse import csr _ matrix
> > > A = csr _ matrix ( array ( [ [ 0.24 , 1 . , 0 . ] ,
. . . [ - 0.5 , 1 . , - 0.5 ] ,
. . . [ 0 . , 0.49 , 1 . ] ,
. . . [ 0 . , 0 . , - 0.5 ] ] ) )
> > > filter _ matrix _ columns ( A , 0.5 ) . todense ( )
matrix ( [ [ 0 . , 1 . , 0 . ] ,
[ - 0.5 , 1 . , - 0.5 ] ,
[ 0 . , 0 . , 1 . ] ,
[ 0 . , 0 . , - 0.5 ] ] )""" | if not isspmatrix ( A ) :
raise ValueError ( "Sparse matrix input needed" )
if isspmatrix_bsr ( A ) :
blocksize = A . blocksize
Aformat = A . format
if ( theta < 0 ) or ( theta >= 1.0 ) :
raise ValueError ( "theta must be in [0,1)" )
# Apply drop - tolerance to each column of A , which is most easily
# accessed by converting to CSC . We apply the drop - tolerance with
# amg _ core . classical _ strength _ of _ connection ( ) , which ignores
# diagonal entries , thus necessitating the trick where we add
# A . shape [ 1 ] to each of the column indices
A = A . copy ( ) . tocsc ( )
A_filter = A . copy ( )
A . indices += A . shape [ 1 ]
A_filter . indices += A . shape [ 1 ]
# classical _ strength _ of _ connection takes an absolute value internally
pyamg . amg_core . classical_strength_of_connection_abs ( A . shape [ 1 ] , theta , A . indptr , A . indices , A . data , A_filter . indptr , A_filter . indices , A_filter . data )
A_filter . indices [ : A_filter . indptr [ - 1 ] ] -= A_filter . shape [ 1 ]
A_filter = csc_matrix ( ( A_filter . data [ : A_filter . indptr [ - 1 ] ] , A_filter . indices [ : A_filter . indptr [ - 1 ] ] , A_filter . indptr ) , shape = A_filter . shape )
del A
if Aformat == 'bsr' :
A_filter = A_filter . tobsr ( blocksize )
else :
A_filter = A_filter . asformat ( Aformat )
return A_filter |
def add_version_info ( matrix , version ) :
"""Adds the version information to the matrix , for versions < 7 this is a no - op .
ISO / IEC 18004:2015 ( E ) - - 7.10 Version information ( page 58)""" | # module 0 = least significant bit
# module 17 = most significant bit
# Figure 27 — Version information positioning ( page 58)
# Lower left Upper right
# 0 3 6 9 12 15 0 1 2
# 1 4 7 10 13 16 3 4 5
# 2 5 8 11 14 17 6 7 8
# 9 10 11
# 12 13 14
# 15 16 17
if version < 7 :
return
version_info = consts . VERSION_INFO [ version - 7 ]
for i in range ( 6 ) :
bit1 = ( version_info >> ( i * 3 ) ) & 0x01
bit2 = ( version_info >> ( ( i * 3 ) + 1 ) ) & 0x01
bit3 = ( version_info >> ( ( i * 3 ) + 2 ) ) & 0x01
# Lower left
matrix [ - 11 ] [ i ] = bit1
matrix [ - 10 ] [ i ] = bit2
matrix [ - 9 ] [ i ] = bit3
# Upper right
matrix [ i ] [ - 11 ] = bit1
matrix [ i ] [ - 10 ] = bit2
matrix [ i ] [ - 9 ] = bit3 |
def uninitialize_ui ( self ) :
"""Uninitializes the Component ui .""" | raise foundations . exceptions . ProgrammingError ( "{0} | '{1}' Component ui cannot be uninitialized!" . format ( self . __class__ . __name__ , self . name ) ) |
def rmse ( params1 , params2 ) :
r"""Compute the root - mean - squared error between two models .
Parameters
params1 : array _ like
Parameters of the first model .
params2 : array _ like
Parameters of the second model .
Returns
error : float
Root - mean - squared error .""" | assert len ( params1 ) == len ( params2 )
params1 = np . asarray ( params1 ) - np . mean ( params1 )
params2 = np . asarray ( params2 ) - np . mean ( params2 )
sqrt_n = math . sqrt ( len ( params1 ) )
return np . linalg . norm ( params1 - params2 , ord = 2 ) / sqrt_n |
def derivatives ( self , x , y , sigma0 , Rs , e1 , e2 , center_x = 0 , center_y = 0 ) :
"""returns df / dx and df / dy of the function ( integral of NFW )""" | phi_G , q = param_util . ellipticity2phi_q ( e1 , e2 )
x_shift = x - center_x
y_shift = y - center_y
cos_phi = np . cos ( phi_G )
sin_phi = np . sin ( phi_G )
e = abs ( 1 - q )
x_ = ( cos_phi * x_shift + sin_phi * y_shift ) * np . sqrt ( 1 - e )
y_ = ( - sin_phi * x_shift + cos_phi * y_shift ) * np . sqrt ( 1 + e )
f_x_prim , f_y_prim = self . spherical . derivatives ( x_ , y_ , sigma0 , Rs )
f_x_prim *= np . sqrt ( 1 - e )
f_y_prim *= np . sqrt ( 1 + e )
f_x = cos_phi * f_x_prim - sin_phi * f_y_prim
f_y = sin_phi * f_x_prim + cos_phi * f_y_prim
return f_x , f_y |
def parity_discover_next_available_nonce ( web3 : Web3 , address : AddressHex , ) -> Nonce :
"""Returns the next available nonce for ` address ` .""" | next_nonce_encoded = web3 . manager . request_blocking ( 'parity_nextNonce' , [ address ] )
return Nonce ( int ( next_nonce_encoded , 16 ) ) |
def main ( ) :
"""NAME
dayplot _ magic . py
DESCRIPTION
makes ' day plots ' ( Day et al . 1977 ) and squareness / coercivity ,
plots ' linear mixing ' curve from Dunlop and Carter - Stiglitz ( 2006 ) .
squareness coercivity of remanence ( Neel , 1955 ) plots after
Tauxe et al . ( 2002)
SYNTAX
dayplot _ magic . py [ command line options ]
OPTIONS
- h prints help message and quits
- f : specify input hysteresis file , default is specimens . txt
- fmt [ svg , png , jpg ] format for output plots , default svg
- sav saves plots and quits quietly""" | args = sys . argv
if "-h" in args :
print ( main . __doc__ )
sys . exit ( )
dir_path = pmag . get_named_arg ( '-WD' , '.' )
fmt = pmag . get_named_arg ( '-fmt' , 'svg' )
save_plots = False
interactive = True
if '-sav' in sys . argv :
save_plots = True
interactive = False
infile = pmag . get_named_arg ( "-f" , "specimens.txt" )
ipmag . dayplot_magic ( dir_path , infile , save = save_plots , fmt = fmt , interactive = interactive ) |
def _process_converter ( self , f , filt = None ) :
"""Take a conversion function and possibly recreate the frame .""" | if filt is None :
filt = lambda col , c : True
needs_new_obj = False
new_obj = dict ( )
for i , ( col , c ) in enumerate ( self . obj . iteritems ( ) ) :
if filt ( col , c ) :
new_data , result = f ( col , c )
if result :
c = new_data
needs_new_obj = True
new_obj [ i ] = c
if needs_new_obj : # possibly handle dup columns
new_obj = DataFrame ( new_obj , index = self . obj . index )
new_obj . columns = self . obj . columns
self . obj = new_obj |
def sanitize_words ( self , words ) :
"""Guarantees that all textual symbols are unicode .
Note :
We do not convert numbers , only strings to unicode .
We assume that the strings are encoded in utf - 8.""" | _words = [ ]
for w in words :
if isinstance ( w , string_types ) and not isinstance ( w , unicode ) :
_words . append ( unicode ( w , encoding = "utf-8" ) )
else :
_words . append ( w )
return _words |
def canonical_request ( self ) :
"""The AWS SigV4 canonical request given parameters from an HTTP request .
This process is outlined here :
http : / / docs . aws . amazon . com / general / latest / gr / sigv4 - create - canonical - request . html
The canonical request is :
request _ method + ' \n ' +
canonical _ uri _ path + ' \n ' +
canonical _ query _ string + ' \n ' +
signed _ headers + ' \n ' +
sha256 ( body ) . hexdigest ( )""" | signed_headers = self . signed_headers
header_lines = "" . join ( [ "%s:%s\n" % item for item in iteritems ( signed_headers ) ] )
header_keys = ";" . join ( [ key for key in iterkeys ( self . signed_headers ) ] )
return ( self . request_method + "\n" + self . canonical_uri_path + "\n" + self . canonical_query_string + "\n" + header_lines + "\n" + header_keys + "\n" + sha256 ( self . body ) . hexdigest ( ) ) |
def compile_geo ( d ) :
"""Compile top - level Geography dictionary .
: param d :
: return :""" | logger_excel . info ( "enter compile_geo" )
d2 = OrderedDict ( )
# get max number of sites , or number of coordinate points given .
num_loc = _get_num_locations ( d )
# if there ' s one more than one location put it in a collection
if num_loc > 1 :
d2 [ "type" ] = "FeatureCollection"
features = [ ]
for idx in range ( 0 , num_loc ) : # Do process for one site
site = _parse_geo_locations ( d , idx )
features . append ( site )
d2 [ "features" ] = features
# if there ' s only one location
elif num_loc == 1 :
d2 = _parse_geo_location ( d )
logger_excel . info ( "exit compile_geo" )
return d2 |
def serialize ( self , now = None ) :
"""Serialize this amdSec and all children to lxml Element and return it .
: param str now : Default value for CREATED in children if none set
: return : amdSec Element with all children""" | if self . _tree is not None :
return self . _tree
el = etree . Element ( utils . lxmlns ( "mets" ) + self . tag , ID = self . id_string )
self . subsections . sort ( )
for child in self . subsections :
el . append ( child . serialize ( now ) )
return el |
def rlmb_base_stochastic_discrete ( ) :
"""Base setting with stochastic discrete model .""" | hparams = rlmb_base ( )
hparams . learning_rate_bump = 1.0
hparams . grayscale = False
hparams . generative_model = "next_frame_basic_stochastic_discrete"
hparams . generative_model_params = "next_frame_basic_stochastic_discrete"
# The parameters below are the same as base , but repeated for easier reading .
hparams . ppo_epoch_length = 50
hparams . simulated_rollout_length = 50
hparams . simulated_batch_size = 16
return hparams |
def yesno ( self , s , yesno_callback , casual = False ) :
"""Ask a question and prepare to receive a yes - or - no answer .""" | self . write ( s )
self . yesno_callback = yesno_callback
self . yesno_casual = casual |
def descr_prototype ( self , buf ) :
"""Describe the prototype ( " head " ) of the function .""" | state = "define" if self . blocks else "declare"
ret = self . return_value
args = ", " . join ( str ( a ) for a in self . args )
name = self . get_reference ( )
attrs = self . attributes
if any ( self . args ) :
vararg = ', ...' if self . ftype . var_arg else ''
else :
vararg = '...' if self . ftype . var_arg else ''
linkage = self . linkage
cconv = self . calling_convention
prefix = " " . join ( str ( x ) for x in [ state , linkage , cconv , ret ] if x )
metadata = self . _stringify_metadata ( )
prototype = "{prefix} {name}({args}{vararg}) {attrs}{metadata}\n" . format ( prefix = prefix , name = name , args = args , vararg = vararg , attrs = attrs , metadata = metadata )
buf . append ( prototype ) |
def execute ( self , query_string , params = None ) :
"""Executes a query . Returns the resulting cursor .
: query _ string : the parameterized query string
: params : can be either a tuple or a dictionary , and must match the parameterization style of the
query
: return : a cursor object""" | cr = self . connection . cursor ( )
logger . info ( "SQL: %s (%s)" , query_string , params )
self . last_query = ( query_string , params )
t0 = time . time ( )
cr . execute ( query_string , params or self . core . empty_params )
ms = ( time . time ( ) - t0 ) * 1000
logger . info ( "RUNTIME: %.2f ms" , ms )
self . _update_cursor_stats ( cr )
return cr |
def _log ( self , message ) :
"""Log a debug message prefixed with order book name .
: param message : Debug message .
: type message : str | unicode""" | self . _logger . debug ( "{}: {}" . format ( self . name , message ) ) |
def getAllNodeUids ( self ) :
'''getAllNodeUids - Returns all the unique internal IDs from getAllChildNodeUids , but also includes this tag ' s uid
@ return set < uuid . UUID > A set of uuid objects''' | # Start with a set including this tag ' s uuid
ret = { self . uid }
ret . update ( self . getAllChildNodeUids ( ) )
return ret |
def uptodate ( name , software = True , drivers = False , skip_hidden = False , skip_mandatory = False , skip_reboot = True , categories = None , severities = None , ) :
'''Ensure Microsoft Updates that match the passed criteria are installed .
Updates will be downloaded if needed .
This state allows you to update a system without specifying a specific
update to apply . All matching updates will be installed .
Args :
name ( str ) :
The name has no functional value and is only used as a tracking
reference
software ( bool ) :
Include software updates in the results ( default is True )
drivers ( bool ) :
Include driver updates in the results ( default is False )
skip _ hidden ( bool ) :
Skip updates that have been hidden . Default is False .
skip _ mandatory ( bool ) :
Skip mandatory updates . Default is False .
skip _ reboot ( bool ) :
Skip updates that require a reboot . Default is True .
categories ( list ) :
Specify the categories to list . Must be passed as a list . All
categories returned by default .
Categories include the following :
* Critical Updates
* Definition Updates
* Drivers ( make sure you set drivers = True )
* Feature Packs
* Security Updates
* Update Rollups
* Updates
* Update Rollups
* Windows 7
* Windows 8.1
* Windows 8.1 drivers
* Windows 8.1 and later drivers
* Windows Defender
severities ( list ) :
Specify the severities to include . Must be passed as a list . All
severities returned by default .
Severities include the following :
* Critical
* Important
Returns :
dict : A dictionary containing the results of the update
CLI Example :
. . code - block : : yaml
# Update the system using the state defaults
update _ system :
wua . uptodate
# Update the drivers
update _ drivers :
wua . uptodate :
- software : False
- drivers : True
- skip _ reboot : False
# Apply all critical updates
update _ critical :
wua . uptodate :
- severities :
- Critical''' | ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : '' }
wua = salt . utils . win_update . WindowsUpdateAgent ( )
available_updates = wua . available ( skip_hidden = skip_hidden , skip_installed = True , skip_mandatory = skip_mandatory , skip_reboot = skip_reboot , software = software , drivers = drivers , categories = categories , severities = severities )
# No updates found
if available_updates . count ( ) == 0 :
ret [ 'comment' ] = 'No updates found'
return ret
updates = list ( available_updates . list ( ) . keys ( ) )
# Search for updates
install_list = wua . search ( updates )
# List of updates to download
download = salt . utils . win_update . Updates ( )
for item in install_list . updates :
if not salt . utils . data . is_true ( item . IsDownloaded ) :
download . updates . Add ( item )
# List of updates to install
install = salt . utils . win_update . Updates ( )
for item in install_list . updates :
if not salt . utils . data . is_true ( item . IsInstalled ) :
install . updates . Add ( item )
# Return comment of changes if test .
if __opts__ [ 'test' ] :
ret [ 'result' ] = None
ret [ 'comment' ] = 'Updates will be installed:'
for update in install . updates :
ret [ 'comment' ] += '\n'
ret [ 'comment' ] += ': ' . join ( [ update . Identity . UpdateID , update . Title ] )
return ret
# Download updates
wua . download ( download )
# Install updates
wua . install ( install )
# Refresh windows update info
wua . refresh ( )
post_info = wua . updates ( ) . list ( )
# Verify the installation
for item in install . list ( ) :
if not salt . utils . data . is_true ( post_info [ item ] [ 'Installed' ] ) :
ret [ 'changes' ] [ 'failed' ] = { item : { 'Title' : post_info [ item ] [ 'Title' ] [ : 40 ] + '...' , 'KBs' : post_info [ item ] [ 'KBs' ] } }
ret [ 'result' ] = False
else :
ret [ 'changes' ] [ 'installed' ] = { item : { 'Title' : post_info [ item ] [ 'Title' ] [ : 40 ] + '...' , 'NeedsReboot' : post_info [ item ] [ 'NeedsReboot' ] , 'KBs' : post_info [ item ] [ 'KBs' ] } }
if ret [ 'changes' ] . get ( 'failed' , False ) :
ret [ 'comment' ] = 'Updates failed'
else :
ret [ 'comment' ] = 'Updates installed successfully'
return ret |
def total_clicks ( self , url ) :
"""Total clicks implementation for Bit . ly
Args :
url : the URL you want to get the total clicks count
Returns :
An int containing the total clicks count
Raises :
BadAPIResponseException : If the API Returns an error as response""" | url = self . clean_url ( url )
clicks_url = f'{self.api_url}v3/link/clicks'
params = { 'link' : url , 'access_token' : self . api_key , 'format' : 'txt' }
response = self . _get ( clicks_url , params = params )
if not response . ok :
raise BadAPIResponseException ( response . content )
try :
total_clicks = int ( response . text )
except ( KeyError , TypeError ) as e :
logger . warning ( 'Bad value from total_clicks response: %s' , e )
return 0
return total_clicks |
def parse_dm_header ( f , outdata = None ) :
"""This is the start of the DM file . We check for some
magic values and then treat the next entry as a tag _ root
If outdata is supplied , we write instead of read using the dictionary outdata as a source
Hopefully parse _ dm _ header ( newf , outdata = parse _ dm _ header ( f ) ) copies f to newf""" | # filesize is sizeondisk - 16 . But we have 8 bytes of zero at the end of
# the file .
if outdata is not None : # this means we ' re WRITING to the file
if verbose :
print ( "write_dm_header start" , f . tell ( ) )
ver , file_size , endianness = 3 , - 1 , 1
put_into_file ( f , "> l l l" , ver , file_size , endianness )
start = f . tell ( )
parse_dm_tag_root ( f , outdata )
end = f . tell ( )
# start is end of 3 long header . We want to write 2nd long
f . seek ( start - 8 )
# the real file size . We started counting after 12 - byte version , fs , end
# and we need to subtract 16 total :
put_into_file ( f , "> l" , end - start + 4 )
f . seek ( end )
enda , endb = 0 , 0
put_into_file ( f , "> l l" , enda , endb )
if verbose :
print ( "write_dm_header end" , f . tell ( ) )
else :
if verbose :
print ( "read_dm_header start" , f . tell ( ) )
ver = get_from_file ( f , "> l" )
assert ver in [ 3 , 4 ] , "Version must be 3 or 4, not %s" % ver
# argh . why a global ?
global size_type , version
if ver == 3 :
size_type = 'L'
# may be Q ?
version = 3
if ver == 4 :
size_type = 'Q'
# may be Q ?
version = 4
file_size , endianness = get_from_file ( f , ">%c l" % size_type )
assert endianness == 1 , "Endianness must be 1, not %s" % endianness
start = f . tell ( )
ret = parse_dm_tag_root ( f , outdata )
end = f . tell ( )
# print ( " fs " , file _ size , end - start , ( end - start ) % 8)
# mfm 2013-07-11 the file _ size value is not always
# end - start , sometimes there seems to be an extra 4 bytes ,
# other times not . Let ' s just ignore it for the moment
# assert ( file _ size = = end - start )
enda , endb = get_from_file ( f , "> l l" )
assert ( enda == endb == 0 )
if verbose :
print ( "read_dm_header end" , f . tell ( ) )
return ret |
def get_event_description ( event_type , event_code ) :
"""Retrieves the human - readable description of an LRR event .
: param event _ type : Base LRR event type . Use LRR _ EVENT _ TYPE . *
: type event _ type : int
: param event _ code : LRR event code
: type event _ code : int
: returns : string""" | description = 'Unknown'
lookup_map = LRR_TYPE_MAP . get ( event_type , None )
if lookup_map is not None :
description = lookup_map . get ( event_code , description )
return description |
def save_book ( self , book_form , * args , ** kwargs ) :
"""Pass through to provider BookAdminSession . update _ book""" | # Implemented from kitosid template for -
# osid . resource . BinAdminSession . update _ bin
if book_form . is_for_update ( ) :
return self . update_book ( book_form , * args , ** kwargs )
else :
return self . create_book ( book_form , * args , ** kwargs ) |
def get_parameter_values ( self ) :
"""Return a dictionary of variables with ` type ` : class : ` CFNType ` .
Returns :
dict : variables that need to be submitted as CloudFormation
Parameters . Will be a dictionary of < parameter name > :
< parameter value > .""" | variables = self . get_variables ( )
output = { }
for key , value in variables . items ( ) :
try :
output [ key ] = value . to_parameter_value ( )
except AttributeError :
continue
return output |
def uptime_check_config_path ( cls , project , uptime_check_config ) :
"""Return a fully - qualified uptime _ check _ config string .""" | return google . api_core . path_template . expand ( "projects/{project}/uptimeCheckConfigs/{uptime_check_config}" , project = project , uptime_check_config = uptime_check_config , ) |
def serve ( request , server ) :
"""Twisted Web adapter . It has two arguments :
# . ` ` request ` ` is a Twisted Web request object ,
# . ` ` server ` ` is a pyws server object .
First one is the context of an application , function ` ` serve ` ` transforms
it into a pyws request object . Then it feeds the request to the server ,
gets the response , sets header ` ` Content - Type ` ` and returns response text .""" | request_ = Request ( '/' . join ( request . postpath ) , request . content . read ( ) if not request . method == 'GET' else '' , request . args , request . args , { } )
response = server . process_request ( request_ )
request . setHeader ( 'Content-Type' , response . content_type )
request . setResponseCode ( get_http_response_code_num ( response ) )
return response . text |
def fw_create ( self , data , fw_name = None , cache = False ) :
"""Top level FW create function .""" | LOG . debug ( "FW create %s" , data )
try :
self . _fw_create ( fw_name , data , cache )
except Exception as exc :
LOG . error ( "Exception in fw_create %s" , str ( exc ) ) |
def start_service ( addr , n ) :
"""Start a service""" | s = Service ( addr )
started = time . time ( )
for _ in range ( n ) :
msg = s . socket . recv ( )
s . socket . send ( msg )
s . socket . close ( )
duration = time . time ( ) - started
print ( 'Raw REP service stats:' )
util . print_stats ( n , duration )
return |
def clear_components ( self ) :
"""Clear all of the registered components""" | ComponentRegistry . _component_overlays = { }
for key in self . list_components ( ) :
self . remove_component ( key ) |
def simple_layer_stack ( include_encdec_attention , num_layers = 6 , d_ff = 2048 , num_heads = 8 , d_kv = 128 , dropout_rate = 0.1 ) :
"""Create a layer stack .
Args :
include _ encdec _ attention : a boolean
num _ layers : an integer
d _ ff : an integer
num _ heads : an integer
d _ kv : an integer
dropout _ rate : a float
Returns :
a LayerStack""" | ret = [ ]
for _ in xrange ( num_layers ) :
ret . append ( transformer_layers . SelfAttention ( num_heads = num_heads , key_value_size = d_kv , attention_kwargs = { "dropout_rate" : dropout_rate } ) )
if include_encdec_attention :
ret . append ( transformer_layers . EncDecAttention ( num_heads = num_heads , key_value_size = d_kv , attention_kwargs = { "dropout_rate" : dropout_rate } ) )
ret . append ( transformer_layers . DenseReluDense ( hidden_size = d_ff , dropout_rate = dropout_rate ) )
return transformer . LayerStack ( ret ) |
def prepare_url ( self , url , params ) :
"""Prepares the given HTTP URL .""" | url = to_native_string ( url )
# Don ' t do any URL preparation for non - HTTP schemes like ` mailto ` ,
# ` data ` etc to work around exceptions from ` url _ parse ` , which
# handles RFC 3986 only .
if ':' in url and not url . lower ( ) . startswith ( 'http' ) :
self . url = url
return
# Support for unicode domain names and paths .
scheme , auth , host , port , path , query , fragment = parse_url ( url )
if not scheme :
raise MissingSchema ( "Invalid URL {0!r}: No schema supplied. " "Perhaps you meant http://{0}?" . format ( url ) )
if not host :
raise InvalidURL ( "Invalid URL %r: No host supplied" % url )
# Only want to apply IDNA to the hostname
try :
host = host . encode ( 'idna' ) . decode ( 'utf-8' )
except UnicodeError :
raise InvalidURL ( 'URL has an invalid label.' )
# Carefully reconstruct the network location
netloc = auth or ''
if netloc :
netloc += '@'
netloc += host
if port :
netloc += ':' + str ( port )
# Bare domains aren ' t valid URLs .
if not path :
path = '/'
if is_py2 :
if isinstance ( scheme , str ) :
scheme = scheme . encode ( 'utf-8' )
if isinstance ( netloc , str ) :
netloc = netloc . encode ( 'utf-8' )
if isinstance ( path , str ) :
path = path . encode ( 'utf-8' )
if isinstance ( query , str ) :
query = query . encode ( 'utf-8' )
if isinstance ( fragment , str ) :
fragment = fragment . encode ( 'utf-8' )
enc_params = self . _encode_params ( params )
if enc_params :
if query :
query = '%s&%s' % ( query , enc_params )
else :
query = enc_params
url = requote_uri ( urlunparse ( [ scheme , netloc , path , None , query , fragment ] ) )
self . url = url |
def filter_instance ( self , inst , plist ) :
"""Remove properties from an instance that aren ' t in the PropertyList
inst - - The pywbem . CIMInstance
plist - - The property List , or None . The list items must be all
lowercase .""" | if plist is not None :
for pname in inst . properties . keys ( ) :
if pname . lower ( ) not in plist and pname :
if inst . path is not None and pname in inst . path . keybindings :
continue
del inst . properties [ pname ] |
def GetMessages ( self , formatter_mediator , event ) :
"""Determines the formatted message strings for an event object .
Args :
formatter _ mediator ( FormatterMediator ) : mediates the interactions
between formatters and other components , such as storage and Windows
EventLog resources .
event ( EventObject ) : event .
Returns :
tuple ( str , str ) : formatted message string and short message string .
Raises :
WrongFormatter : if the event object cannot be formatted by the formatter .""" | if self . DATA_TYPE != event . data_type :
raise errors . WrongFormatter ( 'Unsupported data type: {0:s}.' . format ( event . data_type ) )
event_values = event . CopyToDict ( )
file_reference = event_values . get ( 'file_reference' , None )
if file_reference :
event_values [ 'file_reference' ] = '{0:d}-{1:d}' . format ( file_reference & 0xffffffffffff , file_reference >> 48 )
parent_file_reference = event_values . get ( 'parent_file_reference' , None )
if parent_file_reference :
event_values [ 'parent_file_reference' ] = '{0:d}-{1:d}' . format ( parent_file_reference & 0xffffffffffff , parent_file_reference >> 48 )
update_reason_flags = event_values . get ( 'update_reason_flags' , 0 )
update_reasons = [ ]
for bitmask , description in sorted ( self . _USN_REASON_FLAGS . items ( ) ) :
if bitmask & update_reason_flags :
update_reasons . append ( description )
event_values [ 'update_reason' ] = ', ' . join ( update_reasons )
update_source_flags = event_values . get ( 'update_source_flags' , 0 )
update_sources = [ ]
for bitmask , description in sorted ( self . _USN_SOURCE_FLAGS . items ( ) ) :
if bitmask & update_source_flags :
update_sources . append ( description )
event_values [ 'update_source' ] = ', ' . join ( update_sources )
return self . _ConditionalFormatMessages ( event_values ) |
def picknthweekday ( year , month , dayofweek , hour , minute , whichweek ) :
"""dayofweek = = 0 means Sunday , whichweek 5 means last instance""" | first = datetime . datetime ( year , month , 1 , hour , minute )
weekdayone = first . replace ( day = ( ( dayofweek - first . isoweekday ( ) ) % 7 + 1 ) )
for n in range ( whichweek ) :
dt = weekdayone + ( whichweek - n ) * ONEWEEK
if dt . month == month :
return dt |
def furtherArgsProcessing ( args ) :
"""Converts args , and deals with incongruities that argparse couldn ' t handle""" | if isinstance ( args , str ) :
unprocessed = args . strip ( ) . split ( ' ' )
if unprocessed [ 0 ] == 'cyther' :
del unprocessed [ 0 ]
args = parser . parse_args ( unprocessed ) . __dict__
elif isinstance ( args , argparse . Namespace ) :
args = args . __dict__
elif isinstance ( args , dict ) :
pass
else :
raise CytherError ( "Args must be a instance of str or argparse.Namespace, not '{}'" . format ( str ( type ( args ) ) ) )
if args [ 'watch' ] :
args [ 'timestamp' ] = True
args [ 'watch_stats' ] = { 'counter' : 0 , 'errors' : 0 , 'compiles' : 0 , 'polls' : 0 }
args [ 'print_args' ] = True
return args |
def get_nodes_by_qualified_name ( self , graph , qualified_name_selector ) :
"""Yield all nodes in the graph that match the qualified _ name _ selector .
: param str qualified _ name _ selector : The selector or node name""" | qualified_name = qualified_name_selector . split ( "." )
package_names = get_package_names ( graph )
for node , real_node in self . parsed_nodes ( graph ) :
if _node_is_match ( qualified_name , package_names , real_node . fqn ) :
yield node |
def auth ( self , transport , account_name , password ) :
"""Authenticates using username and password .""" | auth_token = AuthToken ( )
auth_token . account_name = account_name
attrs = { sconstant . A_BY : sconstant . V_NAME }
account = SOAPpy . Types . stringType ( data = account_name , attrs = attrs )
params = { sconstant . E_ACCOUNT : account , sconstant . E_PASSWORD : password }
self . log . debug ( 'Authenticating account %s' % account_name )
try :
res = transport . invoke ( zconstant . NS_ZIMBRA_ACC_URL , sconstant . AuthRequest , params , auth_token )
except SoapException as exc :
raise AuthException ( unicode ( exc ) , exc )
auth_token . token = res . authToken
if hasattr ( res , 'sessionId' ) :
auth_token . session_id = res . sessionId
self . log . info ( 'Authenticated account %s, session id %s' % ( account_name , auth_token . session_id ) )
return auth_token |
def check_pending_labels ( ast ) :
"""Iteratively traverses the node looking for ID with no class set ,
marks them as labels , and check they ' ve been declared .
This way we avoid stack overflow for high line - numbered listings .""" | result = True
visited = set ( )
pending = [ ast ]
while pending :
node = pending . pop ( )
if node is None or node in visited : # Avoid recursive infinite - loop
continue
visited . add ( node )
for x in node . children :
pending . append ( x )
if node . token != 'VAR' or ( node . token == 'VAR' and node . class_ is not CLASS . unknown ) :
continue
tmp = global_ . SYMBOL_TABLE . get_entry ( node . name )
if tmp is None or tmp . class_ is CLASS . unknown :
syntax_error ( node . lineno , 'Undeclared identifier "%s"' % node . name )
else :
assert tmp . class_ == CLASS . label
node . to_label ( node )
result = result and tmp is not None
return result |
def get_prep_value ( self , value ) :
"""Convert value to JSON string before save""" | try :
return json . dumps ( value , cls = DjangoJSONEncoder )
except Exception as e :
raise ValidationError ( str ( e ) ) |
def deconstruct ( name ) :
'''Deconstruct a queue - name to a set of arguments''' | name = coerce_unicode ( name , _c . FSQ_CHARSET )
new_arg = sep = u''
args = [ ]
# can ' t get delimiter , if string is empty
if 1 > len ( name ) :
raise FSQMalformedEntryError ( errno . EINVAL , u'cannot derive delimiter' u'from: {0}' . format ( name ) )
delimiter , encodeseq = delimiter_encodeseq ( name [ 0 ] , _c . FSQ_ENCODE , _c . FSQ_CHARSET )
# edge case , no args
if 1 == len ( name ) :
return delimiter , args
# normal case
encoding_trg = sep
for c in name [ 1 : ] :
if 3 == len ( encoding_trg ) :
encoding_trg = sep
if c == encodeseq or len ( encoding_trg ) :
encoding_trg = sep . join ( [ encoding_trg , c ] )
elif c == delimiter : # at delimiter , append and reset working arg
args . append ( decode ( new_arg , delimiter = delimiter , encodeseq = encodeseq ) )
new_arg = sep
continue
new_arg = sep . join ( [ new_arg , c ] )
# append our last arg
args . append ( decode ( new_arg , delimiter = delimiter , encodeseq = encodeseq ) )
return delimiter , args |
def set_document_unit ( self , unit ) :
"""Use specified unit for width and height of generated SVG file .
See ` ` SVG _ UNIT _ * ` ` enumerated values for a list of available unit
values that can be used here .
This function can be called at any time before generating the SVG file .
However to minimize the risk of ambiguities it ' s recommended to call it
before any drawing operations have been performed on the given surface ,
to make it clearer what the unit used in the drawing operations is .
The simplest way to do this is to call this function immediately after
creating the SVG surface .
Note if this function is never called , the default unit for SVG
documents generated by cairo will be " pt " . This is for historical
reasons .
: param unit : SVG unit .
* New in cairo 1.16 . *
* New in cairocffi 0.9 . *""" | cairo . cairo_svg_surface_set_document_unit ( self . _pointer , unit )
self . _check_status ( ) |
def get_password ( prompt = 'Password: ' , confirm = False ) :
"""< Purpose >
Return the password entered by the user . If ' confirm ' is True , the user is
asked to enter the previously entered password once again . If they match ,
the password is returned to the caller .
< Arguments >
prompt :
The text of the password prompt that is displayed to the user .
confirm :
Boolean indicating whether the user should be prompted for the password
a second time . The two entered password must match , otherwise the
user is again prompted for a password .
< Exceptions >
None .
< Side Effects >
None .
< Returns >
The password entered by the user .""" | # Are the arguments the expected type ?
# If not , raise ' securesystemslib . exceptions . FormatError ' .
securesystemslib . formats . TEXT_SCHEMA . check_match ( prompt )
securesystemslib . formats . BOOLEAN_SCHEMA . check_match ( confirm )
while True : # getpass ( ) prompts the user for a password without echoing
# the user input .
password = getpass . getpass ( prompt , sys . stderr )
if not confirm :
return password
password2 = getpass . getpass ( 'Confirm: ' , sys . stderr )
if password == password2 :
return password
else :
print ( 'Mismatch; try again.' ) |
def listdir ( dir_name , get_dirs = None , get_files = None , hide_ignored = False ) :
"""Return list of all dirs and files inside given dir .
Also can filter contents to return only dirs or files .
Args :
- dir _ name : Which directory we need to scan ( relative )
- get _ dirs : Return dirs list
- get _ files : Return files list
- hide _ ignored : Exclude files and dirs with initial underscore""" | if get_dirs is None and get_files is None :
get_dirs = True
get_files = True
source_dir = os . path . join ( settings . BASE_DIR , 'app' , dir_name )
dirs = [ ]
for dir_or_file_name in os . listdir ( source_dir ) :
path = os . path . join ( source_dir , dir_or_file_name )
if hide_ignored and dir_or_file_name . startswith ( '_' ) :
continue
is_dir = os . path . isdir ( path )
if get_dirs and is_dir or get_files and not is_dir :
dirs . append ( dir_or_file_name )
return dirs |
def _cmp_by_local_origin ( path1 , path2 ) :
"""Select locally originating path as best path .
Locally originating routes are network routes , redistributed routes ,
or aggregated routes . For now we are going to prefer routes received
through a Flexinet - Peer as locally originating route compared to routes
received from a BGP peer .
Returns None if given paths have same source .""" | # If both paths are from same sources we cannot compare them here .
if path1 . source == path2 . source :
return None
# Here we consider prefix from NC as locally originating static route .
# Hence it is preferred .
if path1 . source is None :
return path1
if path2 . source is None :
return path2
return None |
def lookup_default ( self , name ) :
"""Looks up the default for a parameter name . This by default
looks into the : attr : ` default _ map ` if available .""" | if self . default_map is not None :
rv = self . default_map . get ( name )
if callable ( rv ) :
rv = rv ( )
return rv |
def resolve_address ( endpoint_type = PUBLIC , override = True ) :
"""Return unit address depending on net config .
If unit is clustered with vip ( s ) and has net splits defined , return vip on
correct network . If clustered with no nets defined , return primary vip .
If not clustered , return unit address ensuring address is on configured net
split if one is configured , or a Juju 2.0 extra - binding has been used .
: param endpoint _ type : Network endpoing type
: param override : Accept hostname overrides or not""" | resolved_address = None
if override :
resolved_address = _get_address_override ( endpoint_type )
if resolved_address :
return resolved_address
vips = config ( 'vip' )
if vips :
vips = vips . split ( )
net_type = ADDRESS_MAP [ endpoint_type ] [ 'config' ]
net_addr = config ( net_type )
net_fallback = ADDRESS_MAP [ endpoint_type ] [ 'fallback' ]
binding = ADDRESS_MAP [ endpoint_type ] [ 'binding' ]
clustered = is_clustered ( )
if clustered and vips :
if net_addr :
for vip in vips :
if is_address_in_network ( net_addr , vip ) :
resolved_address = vip
break
else : # NOTE : endeavour to check vips against network space
# bindings
try :
bound_cidr = resolve_network_cidr ( network_get_primary_address ( binding ) )
for vip in vips :
if is_address_in_network ( bound_cidr , vip ) :
resolved_address = vip
break
except ( NotImplementedError , NoNetworkBinding ) : # If no net - splits configured and no support for extra
# bindings / network spaces so we expect a single vip
resolved_address = vips [ 0 ]
else :
if config ( 'prefer-ipv6' ) :
fallback_addr = get_ipv6_addr ( exc_list = vips ) [ 0 ]
else :
fallback_addr = unit_get ( net_fallback )
if net_addr :
resolved_address = get_address_in_network ( net_addr , fallback_addr )
else : # NOTE : only try to use extra bindings if legacy network
# configuration is not in use
try :
resolved_address = network_get_primary_address ( binding )
except ( NotImplementedError , NoNetworkBinding ) :
resolved_address = fallback_addr
if resolved_address is None :
raise ValueError ( "Unable to resolve a suitable IP address based on " "charm state and configuration. (net_type=%s, " "clustered=%s)" % ( net_type , clustered ) )
return resolved_address |
def get_parent_id ( element ) :
"""returns the ID of the parent of the given element""" | if 'parent' in element . attrib :
return element . attrib [ 'parent' ]
else :
return element . getparent ( ) . attrib [ add_ns ( 'id' ) ] |
def frequency ( data , output = 'spectraldensity' , scaling = 'power' , sides = 'one' , taper = None , halfbandwidth = 3 , NW = None , duration = None , overlap = 0.5 , step = None , detrend = 'linear' , n_fft = None , log_trans = False , centend = 'mean' ) :
"""Compute the
power spectral density ( PSD , output = ' spectraldensity ' , scaling = ' power ' ) , or
energy spectral density ( ESD , output = ' spectraldensity ' , scaling = ' energy ' ) or
the complex fourier transform ( output = ' complex ' , sides = ' two ' )
Parameters
data : instance of ChanTime
one of the datatypes
detrend : str
None ( no detrending ) , ' constant ' ( remove mean ) , ' linear ' ( remove linear
trend )
output : str
' spectraldensity ' or ' csd ' or ' complex '
' spectraldensity ' meaning the autospectrum or auto - spectral density ,
a special case of ' csd ' ( cross - spectral density ) , where the signal is
cross - correlated with itself
if ' csd ' , both channels in data are used as input
sides : str
' one ' or ' two ' , where ' two ' implies negative frequencies
scaling : str
' power ' ( units : V * * 2 / Hz ) , ' energy ' ( units : V * * 2 ) , ' fieldtrip ' ,
' chronux '
taper : str
Taper to use , commonly used tapers are ' boxcar ' , ' hann ' , ' dpss '
halfbandwidth : int
( only if taper = ' dpss ' ) Half bandwidth ( in Hz ) , frequency smoothing will
be from + halfbandwidth to - halfbandwidth
NW : int
( only if taper = ' dpss ' ) Normalized half bandwidth
( NW = halfbandwidth * dur ) . Number of DPSS tapers is 2 * NW - 1.
If specified , NW takes precedence over halfbandwidth
duration : float , in s
If not None , it divides the signal in epochs of this length ( in seconds )
and then average over the PSD / ESD ( not the complex result )
overlap : float , between 0 and 1
The amount of overlap between epochs ( 0.5 = 50 % , 0.95 = almost complete
overlap ) .
step : float , in s
step in seconds between epochs ( alternative to overlap )
n _ fft : int
Length of FFT , in samples . If less than input axis , input is cropped .
If longer than input axis , input is padded with zeros . If None , FFT
length set to axis length .
log _ trans : bool
If True , spectral values will be natural log - transformed . The
transformation is applied before averaging ( or taking the median ) .
centend : str
( only if duration is not None ) . Central tendency measure to use , either
mean ( arithmetic ) or median .
Returns
instance of ChanFreq
If output = ' complex ' , there is an additional dimension ( ' taper ' ) which
is useful for ' dpss ' but it ' s also present for all the other tapers .
Raises
TypeError
If the data does not have a ' time ' axis . It might work in the
future on other axes , but I cannot imagine how .
ValueError
If you use duration ( to create multiple epochs ) and output = ' complex ' ,
because it does not average the complex output of multiple epochs .
Notes
See extensive notes at wonambi . trans . frequency . _ fft
It uses sampling frequency as specified in s _ freq , it does not
recompute the sampling frequency based on the time axis .
Use of log or median for Welch ' s method is included based on
recommendations from Izhikevich et al . , bioRxiv , 2018.""" | if output not in ( 'spectraldensity' , 'complex' , 'csd' ) :
raise TypeError ( f'output can be "spectraldensity", "complex" or "csd",' ' not "{output}"' )
if 'time' not in data . list_of_axes :
raise TypeError ( '\'time\' is not in the axis ' + str ( data . list_of_axes ) )
if len ( data . list_of_axes ) != data . index_of ( 'time' ) + 1 :
raise TypeError ( '\'time\' should be the last axis' )
# this might be improved
if duration is not None and output == 'complex' :
raise ValueError ( 'cannot average the complex spectrum over multiple epochs' )
if output == 'csd' and data . number_of ( 'chan' ) != 2 :
raise ValueError ( 'CSD can only be computed between two channels' )
if duration is not None :
nperseg = int ( duration * data . s_freq )
if step is not None :
nstep = int ( step * data . s_freq )
else :
nstep = nperseg - int ( overlap * nperseg )
freq = ChanFreq ( )
freq . attr = deepcopy ( data . attr )
freq . s_freq = data . s_freq
freq . start_time = data . start_time
freq . axis [ 'chan' ] = copy ( data . axis [ 'chan' ] )
freq . axis [ 'freq' ] = empty ( data . number_of ( 'trial' ) , dtype = 'O' )
if output == 'complex' :
freq . axis [ 'taper' ] = empty ( data . number_of ( 'trial' ) , dtype = 'O' )
freq . data = empty ( data . number_of ( 'trial' ) , dtype = 'O' )
for i in range ( data . number_of ( 'trial' ) ) :
x = data ( trial = i )
if duration is not None :
x = _create_subepochs ( x , nperseg , nstep )
f , Sxx = _fft ( x , s_freq = data . s_freq , detrend = detrend , taper = taper , output = output , sides = sides , scaling = scaling , halfbandwidth = halfbandwidth , NW = NW , n_fft = n_fft )
if log_trans :
Sxx = log ( Sxx )
if duration is not None :
if centend == 'mean' :
Sxx = Sxx . mean ( axis = - 2 )
elif centend == 'median' :
Sxx = median ( Sxx , axis = - 2 )
else :
raise ValueError ( 'Invalid central tendency measure. ' 'Use mean or median.' )
freq . axis [ 'freq' ] [ i ] = f
if output == 'complex' :
freq . axis [ 'taper' ] [ i ] = arange ( Sxx . shape [ - 1 ] )
if output == 'csd' :
newchan = ' * ' . join ( freq . axis [ 'chan' ] [ i ] )
freq . axis [ 'chan' ] [ i ] = asarray ( [ newchan ] , dtype = 'U' )
freq . data [ i ] = Sxx
return freq |
def export ( group , bucket , prefix , start , end , role , poll_period = 120 , session = None , name = "" , region = None ) :
"""export a given log group to s3""" | start = start and isinstance ( start , six . string_types ) and parse ( start ) or start
end = ( end and isinstance ( start , six . string_types ) and parse ( end ) or end or datetime . now ( ) )
start = start . replace ( tzinfo = tzlocal ( ) ) . astimezone ( tzutc ( ) )
end = end . replace ( tzinfo = tzlocal ( ) ) . astimezone ( tzutc ( ) )
if session is None :
session = get_session ( role , region )
client = session . client ( 'logs' )
paginator = client . get_paginator ( 'describe_log_groups' )
for p in paginator . paginate ( ) :
found = False
for _group in p [ 'logGroups' ] :
if _group [ 'logGroupName' ] == group :
group = _group
found = True
break
if found :
break
if not found :
raise ValueError ( "Log group %s not found." % group )
if prefix :
prefix = "%s/%s" % ( prefix . rstrip ( '/' ) , group [ 'logGroupName' ] . strip ( '/' ) )
else :
prefix = group [ 'logGroupName' ]
named_group = "%s:%s" % ( name , group [ 'logGroupName' ] )
log . info ( "Log exporting group:%s start:%s end:%s bucket:%s prefix:%s size:%s" , named_group , start . strftime ( '%Y/%m/%d' ) , end . strftime ( '%Y/%m/%d' ) , bucket , prefix , group [ 'storedBytes' ] )
t = time . time ( )
days = [ ( start + timedelta ( i ) ) . replace ( minute = 0 , hour = 0 , second = 0 , microsecond = 0 ) for i in range ( ( end - start ) . days ) ]
day_count = len ( days )
s3 = boto3 . Session ( ) . client ( 's3' )
days = filter_extant_exports ( s3 , bucket , prefix , days , start , end )
log . info ( "Group:%s filtering s3 extant keys from %d to %d start:%s end:%s" , named_group , day_count , len ( days ) , days [ 0 ] if days else '' , days [ - 1 ] if days else '' )
t = time . time ( )
retry = get_retry ( ( 'SlowDown' , ) )
for idx , d in enumerate ( days ) :
date = d . replace ( minute = 0 , microsecond = 0 , hour = 0 )
export_prefix = "%s%s" % ( prefix , date . strftime ( "/%Y/%m/%d" ) )
params = { 'taskName' : "%s-%s" % ( "c7n-log-exporter" , date . strftime ( "%Y-%m-%d" ) ) , 'logGroupName' : group [ 'logGroupName' ] , 'fromTime' : int ( time . mktime ( date . replace ( minute = 0 , microsecond = 0 , hour = 0 ) . timetuple ( ) ) * 1000 ) , 'to' : int ( time . mktime ( date . replace ( minute = 59 , hour = 23 , microsecond = 0 ) . timetuple ( ) ) * 1000 ) , 'destination' : bucket , 'destinationPrefix' : export_prefix }
# if stream _ prefix :
# params [ ' logStreamPrefix ' ] = stream _ prefix
try :
s3 . head_object ( Bucket = bucket , Key = prefix )
except ClientError as e :
if e . response [ 'Error' ] [ 'Code' ] != '404' : # Not Found
raise
s3 . put_object ( Bucket = bucket , Key = prefix , Body = json . dumps ( { } ) , ACL = "bucket-owner-full-control" , ServerSideEncryption = "AES256" )
t = time . time ( )
counter = 0
while True :
counter += 1
try :
result = client . create_export_task ( ** params )
except ClientError as e :
if e . response [ 'Error' ] [ 'Code' ] == 'LimitExceededException' :
time . sleep ( poll_period )
# log every 30m of export waiting
if counter % 6 == 0 :
log . debug ( "group:%s day:%s waiting for %0.2f minutes" , named_group , d . strftime ( '%Y-%m-%d' ) , ( counter * poll_period ) / 60.0 )
continue
raise
retry ( s3 . put_object_tagging , Bucket = bucket , Key = prefix , Tagging = { 'TagSet' : [ { 'Key' : 'LastExport' , 'Value' : d . isoformat ( ) } ] } )
break
log . info ( "Log export time:%0.2f group:%s day:%s bucket:%s prefix:%s task:%s" , time . time ( ) - t , named_group , d . strftime ( "%Y-%m-%d" ) , bucket , params [ 'destinationPrefix' ] , result [ 'taskId' ] )
log . info ( ( "Exported log group:%s time:%0.2f days:%d start:%s" " end:%s bucket:%s prefix:%s" ) , named_group , time . time ( ) - t , len ( days ) , start . strftime ( '%Y/%m/%d' ) , end . strftime ( '%Y/%m/%d' ) , bucket , prefix ) |
def iec2bytes ( size_spec , only_positive = True ) :
"""Convert a size specification , optionally containing a scaling
unit in IEC notation , to a number of bytes .
Parameters :
size _ spec ( str ) : Number , optionally followed by a unit .
only _ positive ( bool ) : Allow only positive values ?
Return :
Numeric bytes size .
Raises :
ValueError : Unknown unit specifiers , or bad leading integer .""" | scale = 1
try :
size = int ( 0 + size_spec )
# return numeric values as - is
except ( TypeError , ValueError ) :
spec = size_spec . strip ( ) . lower ( )
for exp , iec_unit in enumerate ( IEC_UNITS [ 1 : ] , 1 ) :
iec_unit = iec_unit . lower ( )
if spec . endswith ( iec_unit ) :
spec = spec [ : - len ( iec_unit ) ]
scale = 2 ** ( 10 * exp )
break
elif spec . endswith ( iec_unit [ 0 ] ) :
spec = spec [ : - 1 ]
scale = 2 ** ( 10 * exp )
break
else :
if spec . endswith ( 'b' ) :
spec = spec [ : - 1 ]
try :
if '.' in spec :
size = float ( spec . strip ( ) )
else :
size = int ( spec . strip ( ) , base = 0 )
except ( TypeError , ValueError ) as cause :
raise ValueError ( 'Invalid bytes size specification {!r}: {}' . format ( size_spec , cause ) )
if only_positive and size < 0 :
raise ValueError ( 'Invalid negative bytes size specification {!r}' . format ( size_spec ) )
return int ( size * scale ) |
def move_node ( self , parent , from_index , to_index ) :
"""Moves given parent child to given index .
: param to _ index : Index to .
: type to _ index : int
: param from _ index : Index from .
: type from _ index : int
: return : Method success .
: rtype : bool""" | # TODO : Should be refactored once this ticket is fixed :
# https : / / bugreports . qt - project . org / browse / PYSIDE - 78
if not from_index >= 0 or not from_index < parent . children_count ( ) or not to_index >= 0 or not to_index < parent . children_count ( ) :
return False
parent_index = self . get_node_index ( parent )
self . beginRemoveRows ( parent_index , from_index , from_index )
child = parent . remove_child ( from_index )
self . endRemoveRows ( )
start_index = parent . children_count ( ) - 1
end_index = to_index - 1
tail = [ ]
for i in range ( start_index , end_index , - 1 ) :
self . beginRemoveRows ( parent_index , i , i )
tail . append ( parent . remove_child ( i ) )
self . endRemoveRows ( )
tail = list ( reversed ( tail ) )
tail . insert ( 0 , child )
for node in tail :
row = parent . children_count ( )
self . beginInsertRows ( parent_index , row , row )
parent . add_child ( node )
self . endInsertRows ( )
return True |
def get_true_sponsors ( self ) :
"""Get the sponsors for the scheduled activity , taking into account activity defaults and
overrides .""" | sponsors = self . sponsors . all ( )
if len ( sponsors ) > 0 :
return sponsors
else :
return self . activity . sponsors . all ( ) |
def format_bar ( self ) :
"""Builds the progress bar""" | pct = floor ( round ( self . progress / self . size , 2 ) * 100 )
pr = floor ( pct * .33 )
bar = "" . join ( [ "‒" for x in range ( pr ) ] + [ "↦" ] + [ " " for o in range ( self . _barsize - pr - 1 ) ] )
subprogress = self . format_parent_bar ( ) if self . parent_bar else ""
message = "Loading{} ={}{} ({}%)" . format ( subprogress , bar , "☉" , pct )
return message . ljust ( len ( message ) + 5 ) |
def start ( ) :
'''Start the server loop''' | from . import app
root , apiopts , conf = app . get_app ( __opts__ )
if not apiopts . get ( 'disable_ssl' , False ) :
if 'ssl_crt' not in apiopts or 'ssl_key' not in apiopts :
logger . error ( "Not starting '%s'. Options 'ssl_crt' and " "'ssl_key' are required if SSL is not disabled." , __name__ )
return None
verify_certs ( apiopts [ 'ssl_crt' ] , apiopts [ 'ssl_key' ] )
cherrypy . server . ssl_module = 'builtin'
cherrypy . server . ssl_certificate = apiopts [ 'ssl_crt' ]
cherrypy . server . ssl_private_key = apiopts [ 'ssl_key' ]
if 'ssl_chain' in apiopts . keys ( ) :
cherrypy . server . ssl_certificate_chain = apiopts [ 'ssl_chain' ]
cherrypy . quickstart ( root , apiopts . get ( 'root_prefix' , '/' ) , conf ) |
def GetMessages ( self , formatter_mediator , event ) :
"""Determines the formatted message strings for an event object .
Args :
formatter _ mediator ( FormatterMediator ) : mediates the interactions
between formatters and other components , such as storage and Windows
EventLog resources .
event ( EventObject ) : event .
Returns :
tuple ( str , str ) : formatted message string and short message string .
Raises :
WrongFormatter : if the event object cannot be formatted by the formatter .""" | if self . DATA_TYPE != event . data_type :
raise errors . WrongFormatter ( 'Unsupported data type: {0:s}.' . format ( event . data_type ) )
event_values = event . CopyToDict ( )
return self . _ConditionalFormatMessages ( event_values ) |
def prev_img ( self , loop = True ) :
"""Go to the previous image in the channel .""" | channel = self . get_current_channel ( )
if channel is None :
self . show_error ( "Please create a channel." , raisetab = True )
return
channel . prev_image ( )
return True |
def avro_name ( url ) : # type : ( AnyStr ) - > AnyStr
"""Turn a URL into an Avro - safe name .
If the URL has no fragment , return this plain URL .
Extract either the last part of the URL fragment past the slash , otherwise
the whole fragment .""" | frg = urllib . parse . urldefrag ( url ) [ 1 ]
if frg != '' :
if '/' in frg :
return frg [ frg . rindex ( '/' ) + 1 : ]
return frg
return url |
def _release_info ( ) :
"""Check latest fastfood release info from PyPI .""" | pypi_url = 'http://pypi.python.org/pypi/fastfood/json'
headers = { 'Accept' : 'application/json' , }
request = urllib . Request ( pypi_url , headers = headers )
response = urllib . urlopen ( request ) . read ( ) . decode ( 'utf_8' )
data = json . loads ( response )
return data |
def read_mesh ( fname ) :
"""Read mesh data from file .
Parameters
fname : str
File name to read . Format will be inferred from the filename .
Currently only ' . obj ' and ' . obj . gz ' are supported .
Returns
vertices : array
Vertices .
faces : array | None
Triangle face definitions .
normals : array
Normals for the mesh .
texcoords : array | None
Texture coordinates .""" | # Check format
fmt = op . splitext ( fname ) [ 1 ] . lower ( )
if fmt == '.gz' :
fmt = op . splitext ( op . splitext ( fname ) [ 0 ] ) [ 1 ] . lower ( )
if fmt in ( '.obj' ) :
return WavefrontReader . read ( fname )
elif not format :
raise ValueError ( 'read_mesh needs could not determine format.' )
else :
raise ValueError ( 'read_mesh does not understand format %s.' % fmt ) |
def get_objects ( self ) :
"""Return a list of all content objects in this distribution .
: rtype : list of : class : ` boto . cloudfront . object . Object `
: return : The content objects""" | bucket = self . _get_bucket ( )
objs = [ ]
for key in bucket :
objs . append ( key )
return objs |
def exists ( self , file_path , check_link = False ) :
"""Return true if a path points to an existing file system object .
Args :
file _ path : The path to examine .
Returns :
( bool ) True if the corresponding object exists .
Raises :
TypeError : if file _ path is None .""" | if check_link and self . islink ( file_path ) :
return True
file_path = make_string_path ( file_path )
if file_path is None :
raise TypeError
if not file_path :
return False
if file_path == self . dev_null . name :
return not self . is_windows_fs
try :
if self . is_filepath_ending_with_separator ( file_path ) :
return False
file_path = self . resolve_path ( file_path )
except ( IOError , OSError ) :
return False
if file_path == self . root . name :
return True
path_components = self . _path_components ( file_path )
current_dir = self . root
for component in path_components :
current_dir = self . _directory_content ( current_dir , component ) [ 1 ]
if not current_dir :
return False
return True |
def toArray ( self ) :
"""Return an numpy . ndarray
> > > m = DenseMatrix ( 2 , 2 , range ( 4 ) )
> > > m . toArray ( )
array ( [ [ 0 . , 2 . ] ,
[ 1 . , 3 . ] ] )""" | if self . isTransposed :
return np . asfortranarray ( self . values . reshape ( ( self . numRows , self . numCols ) ) )
else :
return self . values . reshape ( ( self . numRows , self . numCols ) , order = 'F' ) |
def from_bulk_and_miller ( cls , structure , miller_index , min_slab_size = 8.0 , min_vacuum_size = 10.0 , max_normal_search = None , center_slab = True , selective_dynamics = False , undercoord_threshold = 0.09 ) :
"""This method constructs the adsorbate site finder from a bulk
structure and a miller index , which allows the surface sites
to be determined from the difference in bulk and slab coordination ,
as opposed to the height threshold .
Args :
structure ( Structure ) : structure from which slab
input to the ASF is constructed
miller _ index ( 3 - tuple or list ) : miller index to be used
min _ slab _ size ( float ) : min slab size for slab generation
min _ vacuum _ size ( float ) : min vacuum size for slab generation
max _ normal _ search ( int ) : max normal search for slab generation
center _ slab ( bool ) : whether to center slab in slab generation
selective dynamics ( bool ) : whether to assign surface sites
to selective dynamics
undercoord _ threshold ( float ) : threshold of " undercoordation "
to use for the assignment of surface sites . Default is
0.1 , for which surface sites will be designated if they
are 10 % less coordinated than their bulk counterpart""" | # TODO : for some reason this works poorly with primitive cells
# may want to switch the coordination algorithm eventually
vnn_bulk = VoronoiNN ( tol = 0.05 )
bulk_coords = [ len ( vnn_bulk . get_nn ( structure , n ) ) for n in range ( len ( structure ) ) ]
struct = structure . copy ( site_properties = { 'bulk_coordinations' : bulk_coords } )
slabs = generate_all_slabs ( struct , max_index = max ( miller_index ) , min_slab_size = min_slab_size , min_vacuum_size = min_vacuum_size , max_normal_search = max_normal_search , center_slab = center_slab )
slab_dict = { slab . miller_index : slab for slab in slabs }
if miller_index not in slab_dict :
raise ValueError ( "Miller index not in slab dict" )
this_slab = slab_dict [ miller_index ]
vnn_surface = VoronoiNN ( tol = 0.05 , allow_pathological = True )
surf_props , undercoords = [ ] , [ ]
this_mi_vec = get_mi_vec ( this_slab )
mi_mags = [ np . dot ( this_mi_vec , site . coords ) for site in this_slab ]
average_mi_mag = np . average ( mi_mags )
for n , site in enumerate ( this_slab ) :
bulk_coord = this_slab . site_properties [ 'bulk_coordinations' ] [ n ]
slab_coord = len ( vnn_surface . get_nn ( this_slab , n ) )
mi_mag = np . dot ( this_mi_vec , site . coords )
undercoord = ( bulk_coord - slab_coord ) / bulk_coord
undercoords += [ undercoord ]
if undercoord > undercoord_threshold and mi_mag > average_mi_mag :
surf_props += [ 'surface' ]
else :
surf_props += [ 'subsurface' ]
new_site_properties = { 'surface_properties' : surf_props , 'undercoords' : undercoords }
new_slab = this_slab . copy ( site_properties = new_site_properties )
return cls ( new_slab , selective_dynamics ) |
def raw_snapshot_data ( self , name ) :
"""GET / : login / machines / : id / snapshots / : name
: param name : identifier for snapshot
: type name : : py : class : ` basestring `
: rtype : : py : class : ` dict `
Used internally to get a raw dict of a single machine snapshot .""" | j , _ = self . datacenter . request ( 'GET' , self . path + '/snapshots/' + str ( name ) )
return j |
def canonicalize_id ( reference_id ) :
"""Returns the canonicalized form of the provided reference _ id .
WikiLeaks provides some malformed cable identifiers . If the provided ` reference _ id `
is not valid , this method returns the valid reference identifier equivalent .
If the reference identifier is valid , the reference id is returned unchanged .
Note : The returned canonicalized identifier may not be a valid WikiLeaks identifier
anymore . In most cases the returned canonical form is identical to the WikiLeaks
identifier , but for malformed cable identifiers like " 09SECTION01OF03SANJOSE525"
it is not ( becomes " 09SANJOSE525 " ) .
` reference _ id `
The cable identifier to canonicalize""" | rid = MALFORMED_CABLE_IDS . get ( reference_id , None ) or INVALID_CABLE_IDS . get ( reference_id , None )
if rid :
reference_id = rid
m = _C14N_PATTERN . match ( reference_id )
if m :
origin = m . group ( 1 )
return reference_id . replace ( origin , canonicalize_origin ( origin ) )
return reference_id |
def _update_data ( self , data = { } ) :
'''Update the data in this object .''' | # Store the changes to prevent this update from affecting it
pending_changes = self . _changes or { }
try :
del self . _changes
except :
pass
# Map custom fields into our custom fields object
try :
custom_field_data = data . pop ( 'custom_fields' )
except KeyError :
pass
else :
self . custom_fields = Custom_Fields ( custom_field_data )
# Map all other dictionary data to object attributes
for key , value in data . iteritems ( ) :
lookup_key = self . _field_type . get ( key , key )
# if it ' s a datetime object , turn into proper DT object
if lookup_key == 'datetime' or lookup_key == 'date' :
self . __dict__ [ key ] = datetime_parse ( value )
else : # Check to see if there ' s cache data for this item .
# Will return an object if it ' s recognized as one .
self . __dict__ [ key ] = self . _redmine . check_cache ( lookup_key , value )
# Set the changes dict to track all changes from here on out
self . _changes = pending_changes |
def build ( self , requestor_private_key = None , requestor_certificate = None , other_certificates = None ) :
"""Validates the request information , constructs the ASN . 1 structure and
then optionally signs it .
The requestor _ private _ key , requestor _ certificate and other _ certificates
params are all optional and only necessary if the request needs to be
signed . Signing a request is uncommon for OCSP requests related to web
TLS connections .
: param requestor _ private _ key :
An asn1crypto . keys . PrivateKeyInfo or oscrypto . asymmetric . PrivateKey
object for the private key to sign the request with
: param requestor _ certificate :
An asn1crypto . x509 . Certificate or oscrypto . asymmetric . Certificate
object of the certificate associated with the private key
: param other _ certificates :
A list of asn1crypto . x509 . Certificate or
oscrypto . asymmetric . Certificate objects that may be useful for the
OCSP server to verify the request signature . Intermediate
certificates would be specified here .
: return :
An asn1crypto . ocsp . OCSPRequest object of the request""" | def _make_extension ( name , value ) :
return { 'extn_id' : name , 'critical' : False , 'extn_value' : value }
tbs_request_extensions = [ ]
request_extensions = [ ]
has_nonce = False
for name , value in self . _tbs_request_extensions . items ( ) :
if name == 'nonce' :
has_nonce = True
tbs_request_extensions . append ( _make_extension ( name , value ) )
if self . _nonce and not has_nonce :
tbs_request_extensions . append ( _make_extension ( 'nonce' , util . rand_bytes ( 16 ) ) )
if not tbs_request_extensions :
tbs_request_extensions = None
for name , value in self . _request_extensions . items ( ) :
request_extensions . append ( _make_extension ( name , value ) )
if not request_extensions :
request_extensions = None
tbs_request = ocsp . TBSRequest ( { 'request_list' : [ { 'req_cert' : { 'hash_algorithm' : { 'algorithm' : self . _key_hash_algo } , 'issuer_name_hash' : getattr ( self . _certificate . issuer , self . _key_hash_algo ) , 'issuer_key_hash' : getattr ( self . _issuer . public_key , self . _key_hash_algo ) , 'serial_number' : self . _certificate . serial_number , } , 'single_request_extensions' : request_extensions } ] , 'request_extensions' : tbs_request_extensions } )
signature = None
if requestor_private_key or requestor_certificate or other_certificates :
is_oscrypto = isinstance ( requestor_private_key , asymmetric . PrivateKey )
if not isinstance ( requestor_private_key , keys . PrivateKeyInfo ) and not is_oscrypto :
raise TypeError ( _pretty_message ( '''
requestor_private_key must be an instance of
asn1crypto.keys.PrivateKeyInfo or
oscrypto.asymmetric.PrivateKey, not %s
''' , _type_name ( requestor_private_key ) ) )
cert_is_oscrypto = isinstance ( requestor_certificate , asymmetric . Certificate )
if not isinstance ( requestor_certificate , x509 . Certificate ) and not cert_is_oscrypto :
raise TypeError ( _pretty_message ( '''
requestor_certificate must be an instance of
asn1crypto.x509.Certificate or
oscrypto.asymmetric.Certificate, not %s
''' , _type_name ( requestor_certificate ) ) )
if other_certificates is not None and not isinstance ( other_certificates , list ) :
raise TypeError ( _pretty_message ( '''
other_certificates must be a list of
asn1crypto.x509.Certificate or
oscrypto.asymmetric.Certificate objects, not %s
''' , _type_name ( other_certificates ) ) )
if cert_is_oscrypto :
requestor_certificate = requestor_certificate . asn1
tbs_request [ 'requestor_name' ] = x509 . GeneralName ( name = 'directory_name' , value = requestor_certificate . subject )
certificates = [ requestor_certificate ]
for other_certificate in other_certificates :
other_cert_is_oscrypto = isinstance ( other_certificate , asymmetric . Certificate )
if not isinstance ( other_certificate , x509 . Certificate ) and not other_cert_is_oscrypto :
raise TypeError ( _pretty_message ( '''
other_certificate must be an instance of
asn1crypto.x509.Certificate or
oscrypto.asymmetric.Certificate, not %s
''' , _type_name ( other_certificate ) ) )
if other_cert_is_oscrypto :
other_certificate = other_certificate . asn1
certificates . append ( other_certificate )
signature_algo = requestor_private_key . algorithm
if signature_algo == 'ec' :
signature_algo = 'ecdsa'
signature_algorithm_id = '%s_%s' % ( self . _hash_algo , signature_algo )
if requestor_private_key . algorithm == 'rsa' :
sign_func = asymmetric . rsa_pkcs1v15_sign
elif requestor_private_key . algorithm == 'dsa' :
sign_func = asymmetric . dsa_sign
elif requestor_private_key . algorithm == 'ec' :
sign_func = asymmetric . ecdsa_sign
if not is_oscrypto :
requestor_private_key = asymmetric . load_private_key ( requestor_private_key )
signature_bytes = sign_func ( requestor_private_key , tbs_request . dump ( ) , self . _hash_algo )
signature = ocsp . Signature ( { 'signature_algorithm' : { 'algorithm' : signature_algorithm_id } , 'signature' : signature_bytes , 'certs' : certificates } )
return ocsp . OCSPRequest ( { 'tbs_request' : tbs_request , 'optional_signature' : signature } ) |
def set_motion_detect ( self , enable ) :
"""Set motion detection .""" | if enable :
return api . request_motion_detection_enable ( self . sync . blink , self . network_id , self . camera_id )
return api . request_motion_detection_disable ( self . sync . blink , self . network_id , self . camera_id ) |
def parse_napoleon_doc ( doc , style ) :
"""Extract the text from the various sections of a numpy - formatted docstring .
Parameters
doc : Union [ str , None ]
The docstring to parse .
style : str
' google ' or ' numpy '
Returns
OrderedDict [ str , Union [ None , str ] ]
The extracted numpy - styled docstring sections .""" | napoleon_sections = [ "Short Summary" , "Attributes" , "Methods" , "Warning" , "Note" , "Parameters" , "Other Parameters" , "Keyword Arguments" , "Returns" , "Yields" , "Raises" , "Warns" , "See Also" , "References" , "Todo" , "Example" , "Examples" ]
aliases = { "Args" : "Parameters" , "Arguments" : "Parameters" , "Keyword Args" : "Keyword Arguments" , "Return" : "Returns" , "Warnings" : "Warning" , "Yield" : "Yields" }
doc_sections = OrderedDict ( [ ( key , None ) for key in napoleon_sections ] )
if not doc :
return doc_sections
assert style in ( "google" , "numpy" )
doc = cleandoc ( doc )
lines = iter ( doc . splitlines ( ) )
key = "Short Summary"
body = [ ]
while True :
try :
line = next ( lines ) . rstrip ( )
header = line if style == "numpy" else ( line [ : - 1 ] if line . endswith ( ":" ) else line )
if header and ( header in doc_sections or header in aliases ) :
doc_sections [ aliases . get ( key , key ) ] = "\n" . join ( body ) . rstrip ( ) if body else None
body = [ ]
key = header
if style == "numpy" :
next ( lines )
# skip section delimiter
else :
body . append ( line )
except StopIteration :
doc_sections [ aliases . get ( key , key ) ] = "\n" . join ( body )
break
return doc_sections |
def sort ( args ) :
"""% prog sort fastafile
Sort a list of sequences and output with sorted IDs , etc .""" | p = OptionParser ( sort . __doc__ )
p . add_option ( "--sizes" , default = False , action = "store_true" , help = "Sort by decreasing size [default: %default]" )
opts , args = p . parse_args ( args )
if len ( args ) != 1 :
sys . exit ( p . print_help ( ) )
fastafile , = args
sortedfastafile = fastafile . rsplit ( "." , 1 ) [ 0 ] + ".sorted.fasta"
f = Fasta ( fastafile , index = False )
fw = must_open ( sortedfastafile , "w" )
if opts . sizes : # Sort by decreasing size
sortlist = sorted ( f . itersizes ( ) , key = lambda x : ( - x [ 1 ] , x [ 0 ] ) )
logging . debug ( "Sort by size: max: {0}, min: {1}" . format ( sortlist [ 0 ] , sortlist [ - 1 ] ) )
sortlist = [ x for x , s in sortlist ]
else :
sortlist = sorted ( f . iterkeys ( ) )
for key in sortlist :
rec = f [ key ]
SeqIO . write ( [ rec ] , fw , "fasta" )
logging . debug ( "Sorted file written to `{0}`." . format ( sortedfastafile ) )
fw . close ( )
return sortedfastafile |
def marshmallow_loader ( schema_class ) :
"""Marshmallow loader for JSON requests .""" | def json_loader ( ) :
request_json = request . get_json ( )
context = { }
pid_data = request . view_args . get ( 'pid_value' )
if pid_data :
pid , _ = pid_data . data
context [ 'pid' ] = pid
result = schema_class ( context = context ) . load ( request_json )
if result . errors :
raise MarshmallowErrors ( result . errors )
return result . data
return json_loader |
def cced ( self , user , include = None ) :
"""Retrieve the tickets this user is cc ' d into .
: param include : list of objects to sideload . ` Side - loading API Docs
< https : / / developer . zendesk . com / rest _ api / docs / core / side _ loading > ` _ _ .
: param user : User object or id""" | return self . _query_zendesk ( self . endpoint . cced , 'ticket' , id = user , include = include ) |
def update_resource_assignments ( self , id_or_uri , resource_assignments , timeout = - 1 ) :
"""Modifies scope membership by adding or removing resource assignments .
Args :
id _ or _ uri : Can be either the resource ID or the resource URI .
resource _ assignments ( dict ) :
A dict object with a list of resource URIs to be added and a list of resource URIs to be removed .
timeout : Timeout in seconds . Wait for task completion by default . The timeout does not abort the operation
in OneView ; it just stops waiting for its completion .
Returns :
dict : Updated resource .""" | uri = self . _client . build_uri ( id_or_uri ) + "/resource-assignments"
headers = { 'Content-Type' : 'application/json' }
return self . _client . patch_request ( uri , resource_assignments , timeout = timeout , custom_headers = headers ) |
def pitremove_example ( ) :
"""run function of TauDEM , take pitremove as an example .
Compare the max , min , and average of rawdem and filled DEM .
The result will be : :
RawDEM : Max : 284.07 , Min : 139.11 , Mean : 203.92
FilledDEM : Max : 284.07 , Min : 139.11 , Mean : 203.93""" | dem = '../tests/data/Jamaica_dem.tif'
wp = '../tests/data/tmp_results'
fel = 'dem_pitremoved.tif'
taudem_bin = None
mpi_bin = None
num_proc = 2
TauDEM . pitremove ( num_proc , dem , fel , wp , mpiexedir = mpi_bin , exedir = taudem_bin )
rawdem = RasterUtilClass . read_raster ( dem )
feldem = RasterUtilClass . read_raster ( wp + os . sep + fel )
print ( 'RawDEM: Max: %.2f, Min: %.2f, Mean: %.2f' % ( rawdem . get_max ( ) , rawdem . get_min ( ) , rawdem . get_average ( ) ) )
print ( 'FilledDEM: Max: %.2f, Min: %.2f, Mean: %.2f' % ( feldem . get_max ( ) , feldem . get_min ( ) , feldem . get_average ( ) ) ) |
def update ( self , ** kwargs ) :
"""Returns new command with replaced fields .
: rtype : Command""" | kwargs . setdefault ( 'script' , self . script )
kwargs . setdefault ( 'output' , self . output )
return Command ( ** kwargs ) |
def update ( self , _attributes = None , ** attributes ) :
"""Perform an update on all the related models .
: param attributes : The attributes
: type attributes : dict
: rtype : int""" | if _attributes is not None :
attributes . update ( _attributes )
if self . _related . uses_timestamps ( ) :
attributes [ self . get_related_updated_at ( ) ] = self . _related . fresh_timestamp ( )
return self . _query . update ( attributes ) |
def merge ( self , other , inplace = None , overwrite_vars = frozenset ( ) , compat = 'no_conflicts' , join = 'outer' ) :
"""Merge the arrays of two datasets into a single dataset .
This method generally not allow for overriding data , with the exception
of attributes , which are ignored on the second dataset . Variables with
the same name are checked for conflicts via the equals or identical
methods .
Parameters
other : Dataset or castable to Dataset
Dataset or variables to merge with this dataset .
inplace : bool , optional
If True , merge the other dataset into this dataset in - place .
Otherwise , return a new dataset object .
overwrite _ vars : str or sequence , optional
If provided , update variables of these name ( s ) without checking for
conflicts in this dataset .
compat : { ' broadcast _ equals ' , ' equals ' , ' identical ' ,
' no _ conflicts ' } , optional
String indicating how to compare variables of the same name for
potential conflicts :
- ' broadcast _ equals ' : all values must be equal when variables are
broadcast against each other to ensure common dimensions .
- ' equals ' : all values and dimensions must be the same .
- ' identical ' : all values , dimensions and attributes must be the
same .
- ' no _ conflicts ' : only values which are not null in both datasets
must be equal . The returned dataset then contains the combination
of all non - null values .
join : { ' outer ' , ' inner ' , ' left ' , ' right ' , ' exact ' } , optional
Method for joining ` ` self ` ` and ` ` other ` ` along shared dimensions :
- ' outer ' : use the union of the indexes
- ' inner ' : use the intersection of the indexes
- ' left ' : use indexes from ` ` self ` `
- ' right ' : use indexes from ` ` other ` `
- ' exact ' : error instead of aligning non - equal indexes
Returns
merged : Dataset
Merged dataset .
Raises
MergeError
If any variables conflict ( see ` ` compat ` ` ) .""" | inplace = _check_inplace ( inplace )
variables , coord_names , dims = dataset_merge_method ( self , other , overwrite_vars = overwrite_vars , compat = compat , join = join )
return self . _replace_vars_and_dims ( variables , coord_names , dims , inplace = inplace ) |
def SetValue ( self , identifier , value ) :
"""Sets a value by identifier .
Args :
identifier ( str ) : case insensitive unique identifier for the value .
value ( object ) : value .
Raises :
TypeError : if the identifier is not a string type .""" | if not isinstance ( identifier , py2to3 . STRING_TYPES ) :
raise TypeError ( 'Identifier not a string type.' )
identifier = identifier . lower ( )
self . _values [ identifier ] = value |
def parse_reports ( self ) :
"""Find RSeQC junction _ annotation reports and parse their data""" | # Set up vars
self . junction_annotation_data = dict ( )
regexes = { 'total_splicing_events' : r"^Total splicing Events:\s*(\d+)$" , 'known_splicing_events' : r"^Known Splicing Events:\s*(\d+)$" , 'partial_novel_splicing_events' : r"^Partial Novel Splicing Events:\s*(\d+)$" , 'novel_splicing_events' : r"^Novel Splicing Events:\s*(\d+)$" , 'total_splicing_junctions' : r"^Total splicing Junctions:\s*(\d+)$" , 'known_splicing_junctions' : r"^Known Splicing Junctions:\s*(\d+)$" , 'partial_novel_splicing_junctions' : r"^Partial Novel Splicing Junctions:\s*(\d+)$" , 'novel_splicing_junctions' : r"^Novel Splicing Junctions:\s*(\d+)$" , }
# Go through files and parse data using regexes
for f in self . find_log_files ( 'rseqc/junction_annotation' ) :
d = dict ( )
for k , r in regexes . items ( ) :
r_search = re . search ( r , f [ 'f' ] , re . MULTILINE )
if r_search :
d [ k ] = int ( r_search . group ( 1 ) )
# Calculate some percentages
if 'total_splicing_events' in d :
t = float ( d [ 'total_splicing_events' ] )
if 'known_splicing_events' in d :
d [ 'known_splicing_events_pct' ] = ( float ( d [ 'known_splicing_events' ] ) / t ) * 100.0
if 'partial_novel_splicing_events' in d :
d [ 'partial_novel_splicing_events_pct' ] = ( float ( d [ 'partial_novel_splicing_events' ] ) / t ) * 100.0
if 'novel_splicing_events' in d :
d [ 'novel_splicing_events_pct' ] = ( float ( d [ 'novel_splicing_events' ] ) / t ) * 100.0
if 'total_splicing_junctions' in d :
t = float ( d [ 'total_splicing_junctions' ] )
if 'known_splicing_junctions' in d :
d [ 'known_splicing_junctions_pct' ] = ( float ( d [ 'known_splicing_junctions' ] ) / t ) * 100.0
if 'partial_novel_splicing_junctions' in d :
d [ 'partial_novel_splicing_junctions_pct' ] = ( float ( d [ 'partial_novel_splicing_junctions' ] ) / t ) * 100.0
if 'novel_splicing_junctions' in d :
d [ 'novel_splicing_junctions_pct' ] = ( float ( d [ 'novel_splicing_junctions' ] ) / t ) * 100.0
if len ( d ) > 0 :
if f [ 's_name' ] in self . junction_annotation_data :
log . debug ( "Duplicate sample name found! Overwriting: {}" . format ( f [ 's_name' ] ) )
self . add_data_source ( f , section = 'junction_annotation' )
self . junction_annotation_data [ f [ 's_name' ] ] = d
# Filter to strip out ignored sample names
self . junction_annotation_data = self . ignore_samples ( self . junction_annotation_data )
if len ( self . junction_annotation_data ) > 0 : # Write to file
self . write_data_file ( self . junction_annotation_data , 'multiqc_rseqc_junction_annotation' )
# Plot junction annotations
keys = [ OrderedDict ( ) , OrderedDict ( ) ]
keys [ 0 ] [ 'known_splicing_junctions' ] = { 'name' : 'Known Splicing Junctions' }
keys [ 0 ] [ 'partial_novel_splicing_junctions' ] = { 'name' : 'Partial Novel Splicing Junctions' }
keys [ 0 ] [ 'novel_splicing_junctions' ] = { 'name' : 'Novel Splicing Junctions' }
keys [ 1 ] [ 'known_splicing_events' ] = { 'name' : 'Known Splicing Events' }
keys [ 1 ] [ 'partial_novel_splicing_events' ] = { 'name' : 'Partial Novel Splicing Events' }
keys [ 1 ] [ 'novel_splicing_events' ] = { 'name' : 'Novel Splicing Events' }
pconfig = { 'id' : 'rseqc_junction_annotation_junctions_plot' , 'title' : 'RSeQC: Splicing Junctions' , 'ylab' : '% Junctions' , 'cpswitch_c_active' : False , 'data_labels' : [ 'Junctions' , 'Events' ] }
self . add_section ( name = 'Junction Annotation' , anchor = 'rseqc_junction_annotation' , description = '<a href="http://rseqc.sourceforge.net/#junction-annotation-py" target="_blank">Junction annotation</a>' " compares detected splice junctions to" " a reference gene model. An RNA read can be spliced 2" " or more times, each time is called a splicing event." , plot = bargraph . plot ( [ self . junction_annotation_data , self . junction_annotation_data ] , keys , pconfig ) )
# Return number of samples found
return len ( self . junction_annotation_data ) |
def featured_event_query ( self , ** kwargs ) :
"""Query the Yelp Featured Event API .
documentation : https : / / www . yelp . com / developers / documentation / v3 / featured _ event
required parameters :
* one of either :
* location - text specifying a location to search for
* latitude and longitude""" | if not kwargs . get ( 'location' ) and ( not kwargs . get ( 'latitude' ) or not kwargs . get ( 'longitude' ) ) :
raise ValueError ( 'A valid location (parameter "location") or latitude/longitude combination ' '(parameters "latitude" and "longitude") must be provided.' )
return self . _query ( FEATURED_EVENT_API_URL , ** kwargs ) |
def path_locations ( home_dir ) :
"""Return the path locations for the environment ( where libraries are ,
where scripts go , etc )""" | home_dir = os . path . abspath ( home_dir )
# XXX : We ' d use distutils . sysconfig . get _ python _ inc / lib but its
# prefix arg is broken : http : / / bugs . python . org / issue3386
if is_win : # Windows has lots of problems with executables with spaces in
# the name ; this function will remove them ( using the ~ 1
# format ) :
mkdir ( home_dir )
if ' ' in home_dir :
import ctypes
GetShortPathName = ctypes . windll . kernel32 . GetShortPathNameW
size = max ( len ( home_dir ) + 1 , 256 )
buf = ctypes . create_unicode_buffer ( size )
try :
u = unicode
except NameError :
u = str
ret = GetShortPathName ( u ( home_dir ) , buf , size )
if not ret :
print ( 'Error: the path "%s" has a space in it' % home_dir )
print ( 'We could not determine the short pathname for it.' )
print ( 'Exiting.' )
sys . exit ( 3 )
home_dir = str ( buf . value )
lib_dir = join ( home_dir , 'Lib' )
inc_dir = join ( home_dir , 'Include' )
bin_dir = join ( home_dir , 'Scripts' )
if is_jython :
lib_dir = join ( home_dir , 'Lib' )
inc_dir = join ( home_dir , 'Include' )
bin_dir = join ( home_dir , 'bin' )
elif is_pypy :
lib_dir = home_dir
inc_dir = join ( home_dir , 'include' )
bin_dir = join ( home_dir , 'bin' )
elif not is_win :
lib_dir = join ( home_dir , 'lib' , py_version )
inc_dir = join ( home_dir , 'include' , py_version + abiflags )
bin_dir = join ( home_dir , 'bin' )
return home_dir , lib_dir , inc_dir , bin_dir |
def vgadata ( ) :
"""Get data about the graphics card .""" | if os . path . isfile ( '/sbin/lspci' ) :
lspci = '/sbin/lspci'
else :
lspci = '/usr/bin/lspci'
f = os . popen ( lspci + ' -m' )
pdata = { }
for line in f . readlines ( ) :
p = line . split ( "\"" )
name = p [ 1 ] . strip ( )
if ( name == "VGA compatible controller" ) :
pdata [ "Graphics" ] = p [ 3 ] + " " + p [ 5 ]
f . close ( )
return pdata |
def add_gene_family_to_graph ( self , family_id ) :
"""Make an association between a group of genes and some grouping class .
We make the assumption that the genes in the association
are part of the supplied family _ id , and that the genes have
already been declared as classes elsewhere .
The family _ id is added as an individual of type DATA : gene _ family .
Triples :
< family _ id > a EDAM - DATA : gene _ family
< family _ id > RO : has _ member < gene1 >
< family _ id > RO : has _ member < gene2 >
: param family _ id :
: param g : the graph to modify
: return :""" | family = Family ( self . graph )
gene_family = self . globaltt [ 'gene_family' ]
# make the assumption that the genes
# have already been added as classes previously
self . model . addIndividualToGraph ( family_id , None , gene_family )
# add each gene to the family
family . addMember ( family_id , self . sub )
family . addMember ( family_id , self . obj )
return |
def clean ( self ) :
"""Validation function that checks the dimensions of the crop whether it fits into the original and the format .""" | data = self . cleaned_data
photo = data [ 'photo' ]
if ( ( data [ 'crop_left' ] > photo . width ) or ( data [ 'crop_top' ] > photo . height ) or ( ( data [ 'crop_left' ] + data [ 'crop_width' ] ) > photo . width ) or ( ( data [ 'crop_top' ] + data [ 'crop_height' ] ) > photo . height ) ) : # raise forms . ValidationError , ugettext ( " The specified crop coordinates do not fit into the source photo . " )
raise ValidationError ( ugettext ( "The specified crop coordinates do not fit into the source photo." ) )
return data |
def select ( sel , truecase , falsecase ) :
"""Multiplexer returning falsecase for select = = 0 , otherwise truecase .
: param WireVector sel : used as the select input to the multiplexer
: param WireVector falsecase : the WireVector selected if select = = 0
: param WireVector truecase : the WireVector selected if select = = 1
The hardware this generates is exactly the same as " mux " but by putting the
true case as the first argument it matches more of the C - style ternary operator
semantics which can be helpful for readablity .
Example of mux as " ternary operator " to take the max of ' a ' and 5 : : :
select ( a < 5 , truecase = a , falsecase = 5 )""" | sel , f , t = ( as_wires ( w ) for w in ( sel , falsecase , truecase ) )
f , t = match_bitwidth ( f , t )
outwire = WireVector ( bitwidth = len ( f ) )
net = LogicNet ( op = 'x' , op_param = None , args = ( sel , f , t ) , dests = ( outwire , ) )
working_block ( ) . add_net ( net )
# this includes sanity check on the mux
return outwire |
def order_snapshot_space ( self , volume_id , capacity , tier , upgrade , ** kwargs ) :
"""Orders snapshot space for the given file volume .
: param integer volume _ id : The ID of the volume
: param integer capacity : The capacity to order , in GB
: param float tier : The tier level of the file volume , in IOPS per GB
: param boolean upgrade : Flag to indicate if this order is an upgrade
: return : Returns a SoftLayer _ Container _ Product _ Order _ Receipt""" | file_mask = 'id,billingItem[location,hourlyFlag],' 'storageType[keyName],storageTierLevel,provisionedIops,' 'staasVersion,hasEncryptionAtRest'
file_volume = self . get_file_volume_details ( volume_id , mask = file_mask , ** kwargs )
order = storage_utils . prepare_snapshot_order_object ( self , file_volume , capacity , tier , upgrade )
return self . client . call ( 'Product_Order' , 'placeOrder' , order ) |
def logpdf ( self , mu ) :
"""Log PDF for Skew t prior
Parameters
mu : float
Latent variable for which the prior is being formed over
Returns
- log ( p ( mu ) )""" | if self . transform is not None :
mu = self . transform ( mu )
return self . logpdf_internal_prior ( mu , df = self . df0 , loc = self . loc0 , scale = self . scale0 , gamma = self . gamma0 ) |
def unique_value_groups ( ar , sort = True ) :
"""Group an array by its unique values .
Parameters
ar : array - like
Input array . This will be flattened if it is not already 1 - D .
sort : boolean , optional
Whether or not to sort unique values .
Returns
values : np . ndarray
Sorted , unique values as returned by ` np . unique ` .
indices : list of lists of int
Each element provides the integer indices in ` ar ` with values given by
the corresponding value in ` unique _ values ` .""" | inverse , values = pd . factorize ( ar , sort = sort )
groups = [ [ ] for _ in range ( len ( values ) ) ]
for n , g in enumerate ( inverse ) :
if g >= 0 : # pandas uses - 1 to mark NaN , but doesn ' t include them in values
groups [ g ] . append ( n )
return values , groups |
def get_info ( self ) :
"""Parses the output of a " show info " HAProxy command and returns a
simple dictionary of the results .""" | info_response = self . send_command ( "show info" )
if not info_response :
return { }
def convert_camel_case ( string ) :
return all_cap_re . sub ( r'\1_\2' , first_cap_re . sub ( r'\1_\2' , string ) ) . lower ( )
return dict ( ( convert_camel_case ( label ) , value ) for label , value in [ line . split ( ": " ) for line in info_response . split ( "\n" ) ] ) |
def authorgroup ( self ) :
"""A list of namedtuples representing the article ' s authors organized
by affiliation , in the form ( affiliation _ id , dptid , organization ,
city , postalcode , addresspart , country , auid , indexed _ name ,
surname , given _ name ) .
If " given _ name " is not present , fall back to initials .
Note : Affiliation information might be missing or mal - assigned even
when it lookes correct in the web view . In this case please request
a correction .""" | out = [ ]
fields = 'affiliation_id dptid organization city postalcode ' 'addresspart country auid indexed_name surname given_name'
auth = namedtuple ( 'Author' , fields )
items = listify ( self . _head . get ( 'author-group' , [ ] ) )
for item in items : # Affiliation information
aff = item . get ( 'affiliation' , { } )
try :
aff_ids = listify ( aff [ 'affiliation-id' ] )
aff_id = ", " . join ( [ a [ "@afid" ] for a in aff_ids ] )
except KeyError :
aff_id = aff . get ( "@afid" )
org = _get_org ( aff )
# Author information ( might relate to collaborations )
authors = listify ( item . get ( 'author' , item . get ( 'collaboration' , [ ] ) ) )
for au in authors :
try :
given = au . get ( 'ce:given-name' , au [ 'ce:initials' ] )
except KeyError : # Collaboration
given = au . get ( 'ce:text' )
new = auth ( affiliation_id = aff_id , organization = org , city = aff . get ( 'city' ) , dptid = aff . get ( "@dptid" ) , postalcode = aff . get ( 'postal-code' ) , addresspart = aff . get ( 'address-part' ) , country = aff . get ( 'country' ) , auid = au . get ( '@auid' ) , surname = au . get ( 'ce:surname' ) , given_name = given , indexed_name = chained_get ( au , [ 'preferred-name' , 'ce:indexed-name' ] ) )
out . append ( new )
return out or None |
def display ( self , xaxis , alpha , new = True ) :
"""E . display ( xaxis , alpha = . 8)
: Arguments : xaxis , alpha
Plots the CI region on the current figure , with respect to
xaxis , at opacity alpha .
: Note : The fill color of the envelope will be self . mass
on the grayscale .""" | if new :
figure ( )
if self . ndim == 1 :
if self . mass > 0. :
x = concatenate ( ( xaxis , xaxis [ : : - 1 ] ) )
y = concatenate ( ( self . lo , self . hi [ : : - 1 ] ) )
fill ( x , y , facecolor = '%f' % self . mass , alpha = alpha , label = ( 'centered CI ' + str ( self . mass ) ) )
else :
pyplot ( xaxis , self . value , 'k-' , alpha = alpha , label = ( 'median' ) )
else :
if self . mass > 0. :
subplot ( 1 , 2 , 1 )
contourf ( xaxis [ 0 ] , xaxis [ 1 ] , self . lo , cmap = cm . bone )
colorbar ( )
subplot ( 1 , 2 , 2 )
contourf ( xaxis [ 0 ] , xaxis [ 1 ] , self . hi , cmap = cm . bone )
colorbar ( )
else :
contourf ( xaxis [ 0 ] , xaxis [ 1 ] , self . value , cmap = cm . bone )
colorbar ( ) |
def in_miso_and_inner ( self ) :
"""Test if a node is miso : multiple input and single output""" | return len ( self . successor ) == 1 and self . successor [ 0 ] is not None and not self . successor [ 0 ] . in_or_out and len ( self . precedence ) > 1 and self . precedence [ 0 ] is not None and not self . successor [ 0 ] . in_or_out |
def rejected ( reason ) :
"""Creates a promise object rejected with a certain value .""" | p = Promise ( )
p . _state = 'rejected'
p . reason = reason
return p |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.