signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def inject_func_as_property ( self , func , method_name = None , class_ = None ) :
"""WARNING :
properties are more safely injected using metaclasses
References :
http : / / stackoverflow . com / questions / 13850114 / dynamically - adding - methods - with - or - without - metaclass - in - python"""
|
if method_name is None :
method_name = get_funcname ( func )
# new _ method = func . _ _ get _ _ ( self , self . _ _ class _ _ )
new_property = property ( func )
setattr ( self . __class__ , method_name , new_property )
|
def _ParseMRUListExEntryValue ( self , parser_mediator , registry_key , entry_index , entry_number , codepage = 'cp1252' , ** kwargs ) :
"""Parses the MRUListEx entry value .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
registry _ key ( dfwinreg . WinRegistryKey ) : Windows Registry key that contains
the MRUListEx value .
entry _ index ( int ) : MRUListEx entry index .
entry _ number ( int ) : entry number .
codepage ( Optional [ str ] ) : extended ASCII string codepage .
Returns :
str : MRUList entry value ."""
|
value_string = ''
value = registry_key . GetValueByName ( '{0:d}' . format ( entry_number ) )
if value is None :
parser_mediator . ProduceExtractionWarning ( 'missing MRUListEx value: {0:d} in key: {1:s}.' . format ( entry_number , registry_key . path ) )
elif not value . DataIsBinaryData ( ) :
logger . debug ( ( '[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: ' '{2:s}.' ) . format ( self . NAME , entry_number , registry_key . path ) )
elif value . data :
utf16le_string_map = self . _GetDataTypeMap ( 'utf16le_string' )
context = dtfabric_data_maps . DataTypeMapContext ( )
try :
path = self . _ReadStructureFromByteStream ( value . data , 0 , utf16le_string_map , context = context )
except ( ValueError , errors . ParseError ) as exception :
parser_mediator . ProduceExtractionWarning ( ( 'unable to parse MRUListEx entry value: {0:d} with error: ' '{1!s}' ) . format ( entry_number , exception ) )
return value_string
path = path . rstrip ( '\x00' )
shell_item_data = value . data [ context . byte_size : ]
if not shell_item_data :
parser_mediator . ProduceExtractionWarning ( ( 'missing shell item in MRUListEx value: {0:d} in key: ' '{1:s}.' ) . format ( entry_number , registry_key . path ) )
value_string = 'Path: {0:s}' . format ( path )
else :
shell_items_parser = shell_items . ShellItemsParser ( registry_key . path )
shell_items_parser . ParseByteStream ( parser_mediator , shell_item_data , codepage = codepage )
value_string = 'Path: {0:s}, Shell item: [{1:s}]' . format ( path , shell_items_parser . CopyToPath ( ) )
return value_string
|
def context ( self , key , method ) :
"""A helper method to attach a value within context .
: param key : the key attached to the context .
: param method : the constructor function .
: return : the value attached to the context ."""
|
ctx = stack . top
if ctx is not None :
if not hasattr ( ctx , key ) :
setattr ( ctx , key , method ( ) )
return getattr ( ctx , key )
|
def set_kwargs ( self , code ) :
"""Sets widget from kwargs string
Parameters
code : String
\t Code representation of kwargs value"""
|
kwargs = { }
kwarglist = list ( parse_dict_strings ( code [ 1 : - 1 ] ) )
for kwarg , val in zip ( kwarglist [ : : 2 ] , kwarglist [ 1 : : 2 ] ) :
kwargs [ unquote_string ( kwarg ) ] = val
for key in kwargs :
if key == "color" :
color = code2color ( kwargs [ key ] )
self . colorselect . SetOwnForegroundColour ( color )
elif key == "fontname" :
self . font_face = unquote_string ( kwargs [ key ] )
if self . chosen_font is None :
self . chosen_font = get_default_font ( )
self . chosen_font . SetFaceName ( self . font_face )
elif key == "fontsize" :
if kwargs [ key ] :
self . font_size = int ( kwargs [ key ] )
else :
self . font_size = get_default_font ( ) . GetPointSize ( )
if self . chosen_font is None :
self . chosen_font = get_default_font ( )
self . chosen_font . SetPointSize ( self . font_size )
elif key == "fontstyle" :
self . font_style = self . style_mpl2wx [ unquote_string ( kwargs [ key ] ) ]
if self . chosen_font is None :
self . chosen_font = get_default_font ( )
self . chosen_font . SetStyle ( self . font_style )
elif key == "fontweight" :
self . font_weight = self . weight_mpl2wx [ unquote_string ( kwargs [ key ] ) ]
if self . chosen_font is None :
self . chosen_font = get_default_font ( )
self . chosen_font . SetWeight ( self . font_weight )
|
def mergeSplitsOnInterfaces ( root : LNode ) :
"""collect all split / concatenation nodes and group them by target interface"""
|
for ch in root . children :
if ch . children :
mergeSplitsOnInterfaces ( ch )
ctx = MergeSplitsOnInterfacesCtx ( )
for ch in root . children :
srcPorts = None
try :
if ch . name == "CONCAT" :
p = single ( ch . east , lambda x : True )
e = single ( p . outgoingEdges , lambda x : True )
srcPorts = e . dsts
elif ch . name == "SLICE" :
p = single ( ch . west , lambda x : True )
e = single ( p . incomingEdges , lambda x : True )
srcPorts = e . srcs
except ( DuplicitValueExc , NoValueExc ) :
continue
if srcPorts is not None :
for srcPort in srcPorts :
if isinstance ( srcPort . parent , LPort ) : # only for non primitive ports
rootPort = getRootIntfPort ( srcPort )
ctx . register ( rootPort , ch , e )
# join them if it is possible
for srcPort , splitsAndConcats in ctx . iterPortSplits ( ) :
if len ( splitsAndConcats ) <= 1 :
continue
name = "SPLIT" if srcPort . direction == PortType . OUTPUT else "CONCAT"
newSplitNode = root . addNode ( name )
copyPort ( srcPort , newSplitNode , True , "" )
n = splitsAndConcats [ 0 ] [ 0 ]
for i in range ( max ( len ( n . west ) , len ( n . east ) ) ) :
copyPort ( srcPort , newSplitNode , False , "[%d]" % i )
reconnectPorts ( root , srcPort , splitsAndConcats , newSplitNode )
|
def _highlight ( html ) :
"""Syntax - highlights HTML - rendered Markdown .
Plucks sections to highlight that conform the the GitHub fenced code info
string as defined at https : / / github . github . com / gfm / # info - string .
Args :
html ( str ) : The rendered HTML .
Returns :
str : The HTML with Pygments syntax highlighting applied to all code
blocks ."""
|
formatter = pygments . formatters . HtmlFormatter ( nowrap = True )
code_expr = re . compile ( r'<pre><code class="language-(?P<lang>.+?)">(?P<code>.+?)' r'</code></pre>' , re . DOTALL )
def replacer ( match ) :
try :
lang = match . group ( 'lang' )
lang = _LANG_ALIASES . get ( lang , lang )
lexer = pygments . lexers . get_lexer_by_name ( lang )
except ValueError :
lexer = pygments . lexers . TextLexer ( )
code = match . group ( 'code' )
# Decode html entities in the code . cmark tries to be helpful and
# translate ' " ' to ' & quot ; ' , but it confuses pygments . Pygments will
# escape any html entities when re - writing the code , and we run
# everything through bleach after .
code = html_parser . HTMLParser ( ) . unescape ( code )
highlighted = pygments . highlight ( code , lexer , formatter )
return '<pre>{}</pre>' . format ( highlighted )
result = code_expr . sub ( replacer , html )
return result
|
def get_row_height ( self , row , tab ) :
"""Returns row height"""
|
try :
return self . row_heights [ ( row , tab ) ]
except KeyError :
return config [ "default_row_height" ]
|
def create_from_fitsfile ( cls , fitsfile ) :
"""Read a fits file and use it to make a mapping"""
|
from fermipy . skymap import Map
index_map = Map . create_from_fits ( fitsfile )
mult_map = Map . create_from_fits ( fitsfile , hdu = 1 )
ff = fits . open ( fitsfile )
hpx = HPX . create_from_hdu ( ff [ 0 ] )
mapping_data = dict ( ipixs = index_map . counts , mult_val = mult_map . counts , npix = mult_map . counts . shape )
return cls ( hpx , index_map . wcs , mapping_data )
|
def get_html ( grafs ) :
"""Renders the grafs provided in HTML by wrapping them in < p > tags .
Linebreaks are replaced with < br > tags ."""
|
html = [ format_html ( '<p>{}</p>' , p ) for p in grafs ]
html = [ p . replace ( "\n" , "<br>" ) for p in html ]
return format_html ( six . text_type ( '\n\n' . join ( html ) ) )
|
def createTileUrl ( self , x , y , z ) :
'''returns new tile url based on template'''
|
return self . tileTemplate . replace ( '{{x}}' , str ( x ) ) . replace ( '{{y}}' , str ( y ) ) . replace ( '{{z}}' , str ( z ) )
|
def is_zipfile ( filename ) :
"""Quickly see if file is a ZIP file by checking the magic number ."""
|
try :
fpin = open ( filename , "rb" )
endrec = _EndRecData ( fpin )
fpin . close ( )
if endrec :
return True
# file has correct magic number
except IOError :
pass
return False
|
def _py3_crc16 ( value ) :
"""Calculate the CRC for the value in Python 3
: param bytes value : The value to return for the CRC Checksum
: rtype : int"""
|
crc = 0
for byte in value :
crc = ( ( crc << 8 ) & 0xffff ) ^ _CRC16_LOOKUP [ ( ( crc >> 8 ) ^ byte ) & 0xff ]
return crc
|
def education ( self ) :
"""A list of structures describing the user ' s education history .
Each structure has attributes ` ` school ` ` , ` ` year ` ` , ` ` concentration ` ` and ` ` type ` ` .
` ` school ` ` , ` ` year ` ` reference ` ` Page ` ` instances , while ` ` concentration ` ` is a list of ` ` Page ` `
instances . ` ` type ` ` is just a string that describes the education level .
. . note : : ` ` concentration ` ` may be ` ` False ` ` if the user has not specified his / her
concentration for the given school ."""
|
educations = [ ]
for education in self . cache [ 'education' ] :
school = Page ( ** education . get ( 'school' ) )
year = Page ( ** education . get ( 'year' ) )
type = education . get ( 'type' )
if 'concentration' in education :
concentration = map ( lambda c : Page ( ** c ) , education . get ( 'concentration' ) )
else :
concentration = False
education = Structure ( school = school , year = year , concentration = concentration , type = type )
educations . append ( education )
return educations
|
def decrypt_ige ( cipher_text , key , iv ) :
"""Decrypts the given text in 16 - bytes blocks by using the
given key and 32 - bytes initialization vector ."""
|
if cryptg :
return cryptg . decrypt_ige ( cipher_text , key , iv )
if libssl . decrypt_ige :
return libssl . decrypt_ige ( cipher_text , key , iv )
iv1 = iv [ : len ( iv ) // 2 ]
iv2 = iv [ len ( iv ) // 2 : ]
aes = pyaes . AES ( key )
plain_text = [ ]
blocks_count = len ( cipher_text ) // 16
cipher_text_block = [ 0 ] * 16
for block_index in range ( blocks_count ) :
for i in range ( 16 ) :
cipher_text_block [ i ] = cipher_text [ block_index * 16 + i ] ^ iv2 [ i ]
plain_text_block = aes . decrypt ( cipher_text_block )
for i in range ( 16 ) :
plain_text_block [ i ] ^= iv1 [ i ]
iv1 = cipher_text [ block_index * 16 : block_index * 16 + 16 ]
iv2 = plain_text_block
plain_text . extend ( plain_text_block )
return bytes ( plain_text )
|
def list_ ( prefix = None , bin_env = None , user = None , cwd = None , env_vars = None , ** kwargs ) :
'''Filter list of installed apps from ` ` freeze ` ` and check to see if
` ` prefix ` ` exists in the list of packages installed .
. . note : :
If the version of pip available is older than 8.0.3 , the packages
` ` wheel ` ` , ` ` setuptools ` ` , and ` ` distribute ` ` will not be reported by
this function even if they are installed . Unlike : py : func : ` pip . freeze
< salt . modules . pip . freeze > ` , this function always reports the version of
pip which is installed .
CLI Example :
. . code - block : : bash
salt ' * ' pip . list salt'''
|
packages = { }
if prefix is None or 'pip' . startswith ( prefix ) :
packages [ 'pip' ] = version ( bin_env )
for line in freeze ( bin_env = bin_env , user = user , cwd = cwd , env_vars = env_vars , ** kwargs ) :
if line . startswith ( '-f' ) or line . startswith ( '#' ) : # ignore - f line as it contains - - find - links directory
# ignore comment lines
continue
elif line . startswith ( '-e hg+not trust' ) : # ignore hg + not trust problem
continue
elif line . startswith ( '-e' ) :
line = line . split ( '-e ' ) [ 1 ]
if '#egg=' in line :
version_ , name = line . split ( '#egg=' )
else :
if len ( line . split ( '===' ) ) >= 2 :
name = line . split ( '===' ) [ 0 ]
version_ = line . split ( '===' ) [ 1 ]
elif len ( line . split ( '==' ) ) >= 2 :
name = line . split ( '==' ) [ 0 ]
version_ = line . split ( '==' ) [ 1 ]
elif len ( line . split ( '===' ) ) >= 2 :
name = line . split ( '===' ) [ 0 ]
version_ = line . split ( '===' ) [ 1 ]
elif len ( line . split ( '==' ) ) >= 2 :
name = line . split ( '==' ) [ 0 ]
version_ = line . split ( '==' ) [ 1 ]
else :
logger . error ( 'Can\'t parse line \'%s\'' , line )
continue
if prefix :
if name . lower ( ) . startswith ( prefix . lower ( ) ) :
packages [ name ] = version_
else :
packages [ name ] = version_
return packages
|
def append ( self , data ) :
"""Appends the given data to the buffer , and triggers all connected
monitors , if any of them match the buffer content .
: type data : str
: param data : The data that is appended ."""
|
self . io . write ( data )
if not self . monitors :
return
# Check whether any of the monitoring regular expressions matches .
# If it does , we need to disable that monitor until the matching
# data is no longer in the buffer . We accomplish this by keeping
# track of the position of the last matching byte .
buf = str ( self )
for item in self . monitors :
regex_list , callback , bytepos , limit = item
bytepos = max ( bytepos , len ( buf ) - limit )
for i , regex in enumerate ( regex_list ) :
match = regex . search ( buf , bytepos )
if match is not None :
item [ 2 ] = match . end ( )
callback ( i , match )
|
def reverse_transform ( self , col ) :
"""Converts data back into original format .
Args :
col ( pandas . DataFrame ) : Data to transform .
Returns :
pandas . DataFrame"""
|
output = pd . DataFrame ( )
output [ self . col_name ] = self . get_category ( col [ self . col_name ] )
return output
|
def get_next_step ( self ) :
"""Find the proper step when user clicks the Next button .
: returns : The step to be switched to
: rtype : WizardStep instance or None"""
|
if is_raster_layer ( self . parent . layer ) :
new_step = self . parent . step_kw_band_selector
else :
new_step = self . parent . step_kw_layermode
return new_step
|
def path_yield ( path ) :
"""Yield on all path parts ."""
|
for part in ( x for x in path . strip ( SEP ) . split ( SEP ) if x not in ( None , '' ) ) :
yield part
|
def get_hash ( self , ireq , ireq_hashes = None ) :
"""Retrieve hashes for a specific ` ` InstallRequirement ` ` instance .
: param ireq : An ` ` InstallRequirement ` ` to retrieve hashes for
: type ireq : : class : ` ~ pip _ shims . InstallRequirement `
: return : A set of hashes .
: rtype : Set"""
|
# We _ ALWAYS MUST PRIORITIZE _ the inclusion of hashes from local sources
# PLEASE * DO NOT MODIFY THIS * TO CHECK WHETHER AN IREQ ALREADY HAS A HASH
# RESOLVED . The resolver will pull hashes from PyPI and only from PyPI .
# The entire purpose of this approach is to include missing hashes .
# This fixes a race condition in resolution for missing dependency caches
# see pypa / pipenv # 3289
if not self . _should_include_hash ( ireq ) :
return set ( )
elif self . _should_include_hash ( ireq ) and ( not ireq_hashes or ireq . link . scheme == "file" ) :
if not ireq_hashes :
ireq_hashes = set ( )
new_hashes = self . resolver . repository . _hash_cache . get_hash ( ireq . link )
ireq_hashes = add_to_set ( ireq_hashes , new_hashes )
else :
ireq_hashes = set ( ireq_hashes )
# The _ ONLY CASE _ where we flat out set the value is if it isn ' t present
# It ' s a set , so otherwise we * always * need to do a union update
if ireq not in self . hashes :
return ireq_hashes
else :
return self . hashes [ ireq ] | ireq_hashes
|
def generate_output_list ( self , source , key , val , line = '2' , hr = True , show_name = False , colorize = True ) :
"""The function for generating CLI output RDAP list results .
Args :
source ( : obj : ` str ` ) : The parent key ' network ' or ' objects '
( required ) .
key ( : obj : ` str ` ) : The event key ' events ' or ' events _ actor '
( required ) .
val ( : obj : ` dict ` ) : The event dictionary ( required ) .
line ( : obj : ` str ` ) : The line number ( 0-4 ) . Determines indentation .
Defaults to ' 0 ' .
hr ( : obj : ` bool ` ) : Enable human readable key translations . Defaults
to True .
show _ name ( : obj : ` bool ` ) : Show human readable name ( default is to
only show short ) . Defaults to False .
colorize ( : obj : ` bool ` ) : Colorize the console output with ANSI
colors . Defaults to True .
Returns :
str : The generated output ."""
|
output = generate_output ( line = line , short = HR_RDAP [ source ] [ key ] [ '_short' ] if hr else key , name = HR_RDAP [ source ] [ key ] [ '_name' ] if ( hr and show_name ) else None , is_parent = False if ( val is None or len ( val ) == 0 ) else True , value = 'None' if ( val is None or len ( val ) == 0 ) else None , colorize = colorize )
if val is not None :
for item in val :
output += generate_output ( line = str ( int ( line ) + 1 ) , value = item , colorize = colorize )
return output
|
def p_field_optional2_3 ( self , p ) :
"""field : name arguments directives"""
|
p [ 0 ] = Field ( name = p [ 1 ] , arguments = p [ 2 ] , directives = p [ 3 ] )
|
def _reset ( self ) :
"""Reset internal flags and entry lists ."""
|
self . entries = [ ]
self . default_entry = None
self . disallow_all = False
self . allow_all = False
self . last_checked = 0
# list of tuples ( sitemap url , line number )
self . sitemap_urls = [ ]
|
def as_condition ( cls , obj ) :
"""Convert obj into : class : ` Condition `"""
|
if isinstance ( obj , cls ) :
return obj
else :
return cls ( cmap = obj )
|
def innerLoop ( self ) :
"""The main loop for processing jobs by the leader ."""
|
self . timeSinceJobsLastRescued = time . time ( )
while self . toilState . updatedJobs or self . getNumberOfJobsIssued ( ) or self . serviceManager . jobsIssuedToServiceManager :
if self . toilState . updatedJobs :
self . _processReadyJobs ( )
# deal with service - related jobs
self . _startServiceJobs ( )
self . _processJobsWithRunningServices ( )
# check in with the batch system
updatedJobTuple = self . batchSystem . getUpdatedBatchJob ( maxWait = 2 )
if updatedJobTuple is not None :
self . _gatherUpdatedJobs ( updatedJobTuple )
else :
self . _processLostJobs ( )
# Check on the associated threads and exit if a failure is detected
self . statsAndLogging . check ( )
self . serviceManager . check ( )
# the cluster scaler object will only be instantiated if autoscaling is enabled
if self . clusterScaler is not None :
self . clusterScaler . check ( )
if len ( self . toilState . updatedJobs ) == 0 and self . deadlockThrottler . throttle ( wait = False ) : # Nothing happened this round and it ' s been long
# enough since we last checked . Check for deadlocks .
self . checkForDeadlocks ( )
logger . debug ( "Finished the main loop: no jobs left to run." )
# Consistency check the toil state
assert self . toilState . updatedJobs == set ( )
assert self . toilState . successorCounts == { }
assert self . toilState . successorJobStoreIDToPredecessorJobs == { }
assert self . toilState . serviceJobStoreIDToPredecessorJob == { }
assert self . toilState . servicesIssued == { }
|
def normalize_per_cell_weinreb16_deprecated ( X , max_fraction = 1 , mult_with_mean = False , ) -> np . ndarray :
"""Normalize each cell [ Weinreb17 ] _ .
This is a deprecated version . See ` normalize _ per _ cell ` instead .
Normalize each cell by UMI count , so that every cell has the same total
count .
Parameters
X : np . ndarray
Expression matrix . Rows correspond to cells and columns to genes .
max _ fraction : float , optional
Only use genes that make up more than max _ fraction of the total
reads in every cell .
mult _ with _ mean : bool , optional
Multiply the result with the mean of total counts .
Returns
Normalized version of the original expression matrix ."""
|
if max_fraction < 0 or max_fraction > 1 :
raise ValueError ( 'Choose max_fraction between 0 and 1.' )
counts_per_cell = X . sum ( 1 ) . A1 if issparse ( X ) else X . sum ( 1 )
gene_subset = np . all ( X <= counts_per_cell [ : , None ] * max_fraction , axis = 0 )
if issparse ( X ) :
gene_subset = gene_subset . A1
tc_include = X [ : , gene_subset ] . sum ( 1 ) . A1 if issparse ( X ) else X [ : , gene_subset ] . sum ( 1 )
X_norm = X . multiply ( csr_matrix ( 1 / tc_include [ : , None ] ) ) if issparse ( X ) else X / tc_include [ : , None ]
if mult_with_mean :
X_norm *= np . mean ( counts_per_cell )
return X_norm
|
def _ascii_find_urls ( bytes , mimetype , extra_tokens = True ) :
"""This function finds URLs inside of ASCII bytes ."""
|
tokens = _tokenize ( bytes , mimetype , extra_tokens = extra_tokens )
return tokens
|
def fromDatetime ( klass , dtime ) :
"""Return a new Time instance from a datetime . datetime instance .
If the datetime instance does not have an associated timezone , it is
assumed to be UTC ."""
|
self = klass . __new__ ( klass )
if dtime . tzinfo is not None :
self . _time = dtime . astimezone ( FixedOffset ( 0 , 0 ) ) . replace ( tzinfo = None )
else :
self . _time = dtime
self . resolution = datetime . timedelta . resolution
return self
|
def request_error_header ( exception ) :
"""Generates the error header for a request using a Bearer token based on a given OAuth exception ."""
|
from . conf import options
header = "Bearer realm=\"%s\"" % ( options . realm , )
if hasattr ( exception , "error" ) :
header = header + ", error=\"%s\"" % ( exception . error , )
if hasattr ( exception , "reason" ) :
header = header + ", error_description=\"%s\"" % ( exception . reason , )
return header
|
def set_XY ( self , X = None , Y = None ) :
"""Set the input / output data of the model
This is useful if we wish to change our existing data but maintain the same model
: param X : input observations
: type X : np . ndarray
: param Y : output observations
: type Y : np . ndarray"""
|
self . update_model ( False )
if Y is not None :
if self . normalizer is not None :
self . normalizer . scale_by ( Y )
self . Y_normalized = ObsAr ( self . normalizer . normalize ( Y ) )
self . Y = Y
else :
self . Y = ObsAr ( Y )
self . Y_normalized = self . Y
if X is not None :
if self . X in self . parameters : # LVM models
if isinstance ( self . X , VariationalPosterior ) :
assert isinstance ( X , type ( self . X ) ) , "The given X must have the same type as the X in the model!"
index = self . X . _parent_index_
self . unlink_parameter ( self . X )
self . X = X
self . link_parameter ( self . X , index = index )
else :
index = self . X . _parent_index_
self . unlink_parameter ( self . X )
from . . core import Param
self . X = Param ( 'latent mean' , X )
self . link_parameter ( self . X , index = index )
else :
self . X = ObsAr ( X )
self . update_model ( True )
|
def find_recursive_dependency ( self ) :
"""Return a list of nodes that have a recursive dependency ."""
|
nodes_on_path = [ ]
def helper ( nodes ) :
for node in nodes :
cycle = node in nodes_on_path
nodes_on_path . append ( node )
if cycle or helper ( self . deps . get ( node , [ ] ) ) :
return True
nodes_on_path . pop ( )
return False
helper ( self . unordered )
return nodes_on_path
|
def _preoptimize_model ( self ) :
"""Preoptimizes the model by estimating a Gaussian state space models
Returns
- Gaussian model latent variable object"""
|
gaussian_model = DynReg ( formula = self . formula , data = self . data_original )
gaussian_model . fit ( )
for i in range ( self . z_no - self . family_z_no ) :
self . latent_variables . z_list [ i ] . start = gaussian_model . latent_variables . get_z_values ( ) [ i + 1 ]
if self . model_name2 == 't' :
def temp_function ( params ) :
return - np . sum ( ss . t . logpdf ( x = self . data , df = np . exp ( params [ 0 ] ) , loc = np . ones ( self . data . shape [ 0 ] ) * params [ 1 ] , scale = np . exp ( params [ 2 ] ) ) )
p = optimize . minimize ( temp_function , np . array ( [ 2.0 , 0.0 , - 1.0 ] ) , method = 'L-BFGS-B' )
self . latent_variables . z_list [ - 2 ] . start = p . x [ 2 ]
self . latent_variables . z_list [ - 1 ] . start = p . x [ 0 ]
elif self . model_name2 == 'Skewt' :
def temp_function ( params ) :
return - np . sum ( fam . Skewt . logpdf_internal ( x = self . data , df = np . exp ( params [ 0 ] ) , loc = np . ones ( self . data . shape [ 0 ] ) * params [ 1 ] , scale = np . exp ( params [ 2 ] ) , gamma = np . exp ( params [ 3 ] ) ) )
p = optimize . minimize ( temp_function , np . array ( [ 2.0 , 0.0 , - 1.0 , 0.0 ] ) , method = 'L-BFGS-B' )
self . latent_variables . z_list [ - 3 ] . start = p . x [ 3 ]
self . latent_variables . z_list [ - 2 ] . start = p . x [ 2 ]
self . latent_variables . z_list [ - 1 ] . start = p . x [ 0 ]
return gaussian_model . latent_variables
|
def populate_keys_tree ( self ) :
"""Reads the HOTKEYS global variable and insert all data in
the TreeStore used by the preferences window treeview ."""
|
for group in HOTKEYS :
parent = self . store . append ( None , [ None , group [ 'label' ] , None , None ] )
for item in group [ 'keys' ] :
if item [ 'key' ] == "show-hide" or item [ 'key' ] == "show-focus" :
accel = self . settings . keybindingsGlobal . get_string ( item [ 'key' ] )
else :
accel = self . settings . keybindingsLocal . get_string ( item [ 'key' ] )
gsettings_path = item [ 'key' ]
keycode , mask = Gtk . accelerator_parse ( accel )
keylabel = Gtk . accelerator_get_label ( keycode , mask )
self . store . append ( parent , [ gsettings_path , item [ 'label' ] , keylabel , accel ] )
self . get_widget ( 'treeview-keys' ) . expand_all ( )
|
def topic ( self , channel , topic = None ) :
"""change or request the topic of a channel"""
|
if topic :
channel += ' :' + topic
self . send_line ( 'TOPIC %s' % channel )
|
def attrgetter_atom_split ( tokens ) :
"""Split attrgetter _ atom _ tokens into ( attr _ or _ method _ name , method _ args _ or _ none _ if _ attr ) ."""
|
if len ( tokens ) == 1 : # . attr
return tokens [ 0 ] , None
elif len ( tokens ) >= 2 and tokens [ 1 ] == "(" : # . method ( . . .
if len ( tokens ) == 2 : # . method ( )
return tokens [ 0 ] , ""
elif len ( tokens ) == 3 : # . method ( args )
return tokens [ 0 ] , tokens [ 2 ]
else :
raise CoconutInternalException ( "invalid methodcaller literal tokens" , tokens )
else :
raise CoconutInternalException ( "invalid attrgetter literal tokens" , tokens )
|
def tokml ( self ) :
"""Generate a KML Placemark element subtree .
Returns :
etree . Element : KML Placemark element"""
|
placemark = create_elem ( 'Placemark' )
if self . name :
placemark . set ( 'id' , self . name )
placemark . name = create_elem ( 'name' , text = self . name )
if self . description :
placemark . description = create_elem ( 'description' , text = self . description )
placemark . Point = create_elem ( 'Point' )
data = [ str ( self . longitude ) , str ( self . latitude ) ]
if self . altitude :
if int ( self . altitude ) == self . altitude :
data . append ( '%i' % self . altitude )
else :
data . append ( str ( self . altitude ) )
placemark . Point . coordinates = create_elem ( 'coordinates' , text = ',' . join ( data ) )
return placemark
|
def get_content ( self , obj ) :
"""All content for office ' s page on an election day ."""
|
election_day = ElectionDay . objects . get ( date = self . context [ 'election_date' ] )
return PageContent . objects . office_content ( election_day , obj )
|
def db_for_write ( self , model , ** hints ) :
"""If given some hints [ ' instance ' ] that is saved in a db , use related
fields from the same db . Otherwise if passed a class or instance to
model , return the salesforce alias if it ' s a subclass of SalesforceModel ."""
|
if 'instance' in hints :
db = hints [ 'instance' ] . _state . db
if db :
return db
if getattr ( model , '_salesforce_object' , False ) :
return self . sf_alias
|
def get_stream ( self , stream_name : str ) -> StreamWrapper :
"""Get a : py : class : ` StreamWrapper ` with the given name .
: param stream _ name : stream name
: return : dataset function name providing the respective stream
: raise AttributeError : if the dataset does not provide the function creating the stream"""
|
if stream_name not in self . _streams :
stream_fn_name = '{}_stream' . format ( stream_name )
try :
stream_fn = getattr ( self . _dataset , stream_fn_name )
stream_epoch_limit = - 1
if self . _fixed_epoch_size is not None and stream_name == self . _train_stream_name :
stream_epoch_limit = self . _fixed_epoch_size
self . _streams [ stream_name ] = StreamWrapper ( stream_fn , buffer_size = self . _buffer , epoch_size = stream_epoch_limit , name = stream_name , profile = self . _epoch_profile )
except AttributeError as ex :
raise AttributeError ( 'The dataset does not have a function for creating a stream named `{}`. ' 'The function has to be named `{}`.' . format ( stream_name , stream_fn_name ) ) from ex
return self . _streams [ stream_name ]
|
def reverse_point ( self , latitude , longitude , ** kwargs ) :
"""Method for identifying an address from a geographic point"""
|
fields = "," . join ( kwargs . pop ( "fields" , [ ] ) )
point_param = "{0},{1}" . format ( latitude , longitude )
response = self . _req ( verb = "reverse" , params = { "q" : point_param , "fields" : fields } )
if response . status_code != 200 :
return error_response ( response )
return Location ( response . json ( ) )
|
def draw_pegasus_yield ( G , ** kwargs ) :
"""Draws the given graph G with highlighted faults , according to layout .
Parameters
G : NetworkX graph
The graph to be parsed for faults
unused _ color : tuple or color string ( optional , default ( 0.9,0.9,0.9,1.0 ) )
The color to use for nodes and edges of G which are not faults .
If unused _ color is None , these nodes and edges will not be shown at all .
fault _ color : tuple or color string ( optional , default ( 1.0,0.0,0.0,1.0 ) )
A color to represent nodes absent from the graph G . Colors should be
length - 4 tuples of floats between 0 and 1 inclusive .
fault _ shape : string , optional ( default = ' x ' )
The shape of the fault nodes . Specification is as matplotlib . scatter
marker , one of ' so ^ > v < dph8 ' .
fault _ style : string , optional ( default = ' dashed ' )
Edge fault line style ( solid | dashed | dotted , dashdot )
kwargs : optional keywords
See networkx . draw _ networkx ( ) for a description of optional keywords ,
with the exception of the ` pos ` parameter which is not used by this
function . If ` linear _ biases ` or ` quadratic _ biases ` are provided ,
any provided ` node _ color ` or ` edge _ color ` arguments are ignored ."""
|
try :
assert ( G . graph [ "family" ] == "pegasus" )
m = G . graph [ 'columns' ]
offset_lists = ( G . graph [ 'vertical_offsets' ] , G . graph [ 'horizontal_offsets' ] )
coordinates = G . graph [ "labels" ] == "coordinate"
# Can ' t interpret fabric _ only from graph attributes
except :
raise ValueError ( "Target pegasus graph needs to have columns, rows, \
tile, and label attributes to be able to identify faulty qubits." )
perfect_graph = pegasus_graph ( m , offset_lists = offset_lists , coordinates = coordinates )
draw_yield ( G , pegasus_layout ( perfect_graph ) , perfect_graph , ** kwargs )
|
def critical ( self , text ) :
"""Posts a critical message adding a timestamp and logging level to it for both file and console handlers .
Logger uses a redraw rate because of console flickering . That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time . Logger will
redraw at a given time period AND when new messages or progress are logged . If you still want to force redraw
immediately ( may produce flickering ) then call ' flush ' method .
: param text : The text to log into file and console ."""
|
self . queue . put ( dill . dumps ( LogMessageCommand ( text = text , level = logging . CRITICAL ) ) )
|
def LR_predict ( w , b , X ) :
"""Predict whether the label is 0 or 1 using learned logistic regression parameters ( w , b )
Arguments :
w - - weights , a numpy array of size ( num _ px * num _ px * 3 , 1)
b - - bias , a scalar
X - - data of size ( num _ px * num _ px * 3 , number of examples )
Returns :
Y _ prediction - - a numpy array ( vector ) containing all predictions ( 0/1 ) for the examples in X"""
|
m = X . shape [ 1 ]
Y_prediction = np . zeros ( ( 1 , m ) )
w = w . reshape ( X . shape [ 0 ] , 1 )
A = sigmoid ( np . dot ( w . T , X ) + b )
for i in range ( A . shape [ 1 ] ) :
if A [ 0 , i ] > 0.5 :
Y_prediction [ 0 , i ] = 1.0
else :
Y_prediction [ 0 , i ] = 0.0
assert ( Y_prediction . shape == ( 1 , m ) )
return Y_prediction
|
def is_null ( value ) :
"""Check if the scalar value or tuple / list value is NULL .
: param value : Value to check .
: type value : a scalar or tuple or list
: return : Returns ` ` True ` ` if and only if the value is NULL ( scalar value is None
or _ any _ tuple / list elements are None ) .
: rtype : bool"""
|
if type ( value ) in ( tuple , list ) :
for v in value :
if v is None :
return True
return False
else :
return value is None
|
def match ( self , props = None , rng = None , offset = None ) :
"""Provide any of the args and match or dont .
: param props : Should be a subset of my props .
: param rng : Exactly match my range .
: param offset : I start after this offset .
: returns : True if all the provided predicates match or are None"""
|
if rng :
s , e = rng
else :
e = s = None
return ( ( e is None or self . end == e ) and ( s is None or self . start == s ) ) and ( props is None or props . issubset ( self . props ) ) and ( offset is None or self . start >= offset )
|
def send_private_message ( self , user_id , message ) :
"""Send a message to a specific client .
Returns True if successful , False otherwise"""
|
try :
client = self . channels [ 'private' ] [ str ( user_id ) ]
except KeyError :
print '====debug===='
print self . channels [ 'private' ]
print 'client with id %s not found' % user_id
return False
client . send_message ( message )
print 'message sent to client #%s' % user_id
return True
|
def _update_metadata ( self , data , ds_info ) :
"""Update metadata of the given DataArray"""
|
# Metadata from the dataset definition
data . attrs . update ( ds_info )
# If the file _ type attribute is a list and the data is xarray
# the concat of the dataset will not work . As the file _ type is
# not needed this will be popped here .
if 'file_type' in data . attrs :
data . attrs . pop ( 'file_type' )
# Metadata discovered from the file
data . attrs . update ( { 'platform_name' : self . platform_name , 'sensor' : self . sensor , 'sector' : self . sector , 'yaw_flip' : self . meta [ 'yaw_flip' ] } )
if self . meta [ 'lon0' ] is not None : # Attributes only available for full disc images . YAML reader
# doesn ' t like it if satellite _ * is present but None
data . attrs . update ( { 'satellite_longitude' : self . meta [ 'lon0' ] , 'satellite_latitude' : self . meta [ 'lat0' ] , 'satellite_altitude' : ALTITUDE , 'nadir_row' : self . meta [ 'nadir_row' ] , 'nadir_col' : self . meta [ 'nadir_col' ] , 'area_def_uniform_sampling' : self . meta [ 'area_def_uni' ] } )
|
def GetNewSessionID ( self , ** _ ) :
"""Returns a random integer session ID for this hunt .
All hunts are created under the aff4 : / hunts namespace .
Returns :
a formatted session id string ."""
|
return rdfvalue . SessionID ( base = "aff4:/hunts" , queue = self . runner_args . queue )
|
def destroy ( self , request , pk = None , parent_lookup_seedteam = None , parent_lookup_seedteam__organization = None ) :
'''Remove a permission from a team .'''
|
self . check_team_permissions ( request , parent_lookup_seedteam , parent_lookup_seedteam__organization )
return super ( TeamPermissionViewSet , self ) . destroy ( request , pk , parent_lookup_seedteam , parent_lookup_seedteam__organization )
|
def search_in_hdx ( query , configuration = None , ** kwargs ) : # type : ( str , Optional [ Configuration ] , Any ) - > List [ ' Resource ' ]
"""Searches for resources in HDX . NOTE : Does not search dataset metadata !
Args :
query ( str ) : Query
configuration ( Optional [ Configuration ] ) : HDX configuration . Defaults to global configuration .
* * kwargs : See below
order _ by ( str ) : A field on the Resource model that orders the results
offset ( int ) : Apply an offset to the query
limit ( int ) : Apply a limit to the query
Returns :
List [ Resource ] : List of resources resulting from query"""
|
resources = [ ]
resource = Resource ( configuration = configuration )
success , result = resource . _read_from_hdx ( 'resource' , query , 'query' , Resource . actions ( ) [ 'search' ] )
if result :
count = result . get ( 'count' , None )
if count :
for resourcedict in result [ 'results' ] :
resource = Resource ( resourcedict , configuration = configuration )
resources . append ( resource )
else :
logger . debug ( result )
return resources
|
def corr ( dataset , column , method = "pearson" ) :
"""Compute the correlation matrix with specified method using dataset .
: param dataset :
A Dataset or a DataFrame .
: param column :
The name of the column of vectors for which the correlation coefficient needs
to be computed . This must be a column of the dataset , and it must contain
Vector objects .
: param method :
String specifying the method to use for computing correlation .
Supported : ` pearson ` ( default ) , ` spearman ` .
: return :
A DataFrame that contains the correlation matrix of the column of vectors . This
DataFrame contains a single row and a single column of name
' $ METHODNAME ( $ COLUMN ) ' .
> > > from pyspark . ml . linalg import Vectors
> > > from pyspark . ml . stat import Correlation
> > > dataset = [ [ Vectors . dense ( [ 1 , 0 , 0 , - 2 ] ) ] ,
. . . [ Vectors . dense ( [ 4 , 5 , 0 , 3 ] ) ] ,
. . . [ Vectors . dense ( [ 6 , 7 , 0 , 8 ] ) ] ,
. . . [ Vectors . dense ( [ 9 , 0 , 0 , 1 ] ) ] ]
> > > dataset = spark . createDataFrame ( dataset , [ ' features ' ] )
> > > pearsonCorr = Correlation . corr ( dataset , ' features ' , ' pearson ' ) . collect ( ) [ 0 ] [ 0]
> > > print ( str ( pearsonCorr ) . replace ( ' nan ' , ' NaN ' ) )
DenseMatrix ( [ [ 1 . , 0.0556 . . . , NaN , 0.4004 . . . ] ,
[ 0.0556 . . . , 1 . , NaN , 0.9135 . . . ] ,
[ NaN , NaN , 1 . , NaN ] ,
[ 0.4004 . . . , 0.9135 . . . , NaN , 1 . ] ] )
> > > spearmanCorr = Correlation . corr ( dataset , ' features ' , method = ' spearman ' ) . collect ( ) [ 0 ] [ 0]
> > > print ( str ( spearmanCorr ) . replace ( ' nan ' , ' NaN ' ) )
DenseMatrix ( [ [ 1 . , 0.1054 . . . , NaN , 0.4 ] ,
[ 0.1054 . . . , 1 . , NaN , 0.9486 . . . ] ,
[ NaN , NaN , 1 . , NaN ] ,
[ 0.4 , 0.9486 . . . , NaN , 1 . ] ] )"""
|
sc = SparkContext . _active_spark_context
javaCorrObj = _jvm ( ) . org . apache . spark . ml . stat . Correlation
args = [ _py2java ( sc , arg ) for arg in ( dataset , column , method ) ]
return _java2py ( sc , javaCorrObj . corr ( * args ) )
|
def save_to_json ( self ) :
"""The method saves DatasetUpload to json from object"""
|
requestvalues = { 'DatasetId' : self . dataset , 'Name' : self . name , 'Description' : self . description , 'Source' : self . source , 'PubDate' : self . publication_date , 'AccessedOn' : self . accessed_on , 'Url' : self . dataset_ref , 'UploadFormatType' : self . upload_format_type , 'Columns' : self . columns , 'FileProperty' : self . file_property . __dict__ , 'FlatDSUpdateOptions' : self . flat_ds_update_options , 'Public' : self . public }
return json . dumps ( requestvalues )
|
def _cooked_fields ( self , dj_fields ) :
"""Returns a tuple of cooked fields
: param dj _ fields : a list of django name fields
: return :"""
|
from django . db import models
valids = [ ]
for field in dj_fields :
try :
dj_field , _ , _ , _ = self . model . _meta . get_field_by_name ( field )
if isinstance ( dj_field , models . ForeignKey ) :
valids . append ( ( field + "_id" , field , dj_field ) )
else :
valids . append ( ( field , field , dj_field ) )
except models . FieldDoesNotExist :
valids . append ( ( field , field , None ) )
return valids
|
def sent_tokenize ( text : str , engine : str = "whitespace+newline" ) -> List [ str ] :
"""This function does not yet automatically recognize when a sentence actually ends . Rather it helps split text where white space and a new line is found .
: param str text : the text to be tokenized
: param str engine : choose between ' whitespace ' or ' whitespace + newline '
: return : list of sentences"""
|
if not text or not isinstance ( text , str ) :
return [ ]
sentences = [ ]
if engine == "whitespace" :
sentences = re . split ( r" +" , text , re . U )
else : # default , use whitespace + newline
sentences = text . split ( )
return sentences
|
def validate_node_sign ( signature_node , elem , cert = None , fingerprint = None , fingerprintalg = 'sha1' , validatecert = False , debug = False ) :
"""Validates a signature node .
: param signature _ node : The signature node
: type : Node
: param xml : The element we should validate
: type : Document
: param cert : The public cert
: type : string
: param fingerprint : The fingerprint of the public cert
: type : string
: param fingerprintalg : The algorithm used to build the fingerprint
: type : string
: param validatecert : If true , will verify the signature and if the cert is valid .
: type : bool
: param debug : Activate the xmlsec debug
: type : bool
: param raise _ exceptions : Whether to return false on failure or raise an exception
: type raise _ exceptions : Boolean"""
|
if ( cert is None or cert == '' ) and fingerprint :
x509_certificate_nodes = OneLogin_Saml2_XML . query ( signature_node , '//ds:Signature/ds:KeyInfo/ds:X509Data/ds:X509Certificate' )
if len ( x509_certificate_nodes ) > 0 :
x509_certificate_node = x509_certificate_nodes [ 0 ]
x509_cert_value = OneLogin_Saml2_XML . element_text ( x509_certificate_node )
x509_cert_value_formatted = OneLogin_Saml2_Utils . format_cert ( x509_cert_value )
x509_fingerprint_value = OneLogin_Saml2_Utils . calculate_x509_fingerprint ( x509_cert_value_formatted , fingerprintalg )
if fingerprint == x509_fingerprint_value :
cert = x509_cert_value_formatted
if cert is None or cert == '' :
raise OneLogin_Saml2_Error ( 'Could not validate node signature: No certificate provided.' , OneLogin_Saml2_Error . CERT_NOT_FOUND )
# Check if Reference URI is empty
reference_elem = OneLogin_Saml2_XML . query ( signature_node , '//ds:Reference' )
if len ( reference_elem ) > 0 :
if reference_elem [ 0 ] . get ( 'URI' ) == '' :
reference_elem [ 0 ] . set ( 'URI' , '#%s' % signature_node . getparent ( ) . get ( 'ID' ) )
if validatecert :
manager = xmlsec . KeysManager ( )
manager . load_cert_from_memory ( cert , xmlsec . KeyFormat . CERT_PEM , xmlsec . KeyDataType . TRUSTED )
dsig_ctx = xmlsec . SignatureContext ( manager )
else :
dsig_ctx = xmlsec . SignatureContext ( )
dsig_ctx . key = xmlsec . Key . from_memory ( cert , xmlsec . KeyFormat . CERT_PEM , None )
dsig_ctx . set_enabled_key_data ( [ xmlsec . KeyData . X509 ] )
try :
dsig_ctx . verify ( signature_node )
except Exception as err :
raise OneLogin_Saml2_ValidationError ( 'Signature validation failed. SAML Response rejected. %s' , OneLogin_Saml2_ValidationError . INVALID_SIGNATURE , str ( err ) )
return True
|
def Churchill_1977 ( Re , eD ) :
r'''Calculates Darcy friction factor using the method in Churchill and
(1977 ) [ 2 ] _ as shown in [ 1 ] _ .
. . math : :
f _ f = 2 \ left [ ( \ frac { 8 } { Re } ) ^ { 12 } + ( A _ 2 + A _ 3 ) ^ { - 1.5 } \ right ] ^ { 1/12}
. . math : :
A _ 2 = \ left \ { 2.457 \ ln \ left [ ( \ frac { 7 } { Re } ) ^ { 0.9}
+ 0.27 \ frac { \ epsilon } { D } \ right ] \ right \ } ^ { 16}
. . math : :
A _ 3 = \ left ( \ frac { 37530 } { Re } \ right ) ^ { 16}
Parameters
Re : float
Reynolds number , [ - ]
eD : float
Relative roughness , [ - ]
Returns
fd : float
Darcy friction factor [ - ]
Notes
No range of validity specified for this equation .
Examples
> > > Churchill _ 1977(1E5 , 1E - 4)
0.018462624566280075
References
. . [ 1 ] Winning , H . and T . Coole . " Explicit Friction Factor Accuracy and
Computational Efficiency for Turbulent Flow in Pipes . " Flow , Turbulence
and Combustion 90 , no . 1 ( January 1 , 2013 ) : 1-27.
doi : 10.1007 / s10494-012-9419-7
. . [ 2 ] Churchill , S . W . : Friction factor equation spans all fluid flow
regimes . Chem . Eng . J . 91 , 91-92 ( 1977)'''
|
A3 = ( 37530 / Re ) ** 16
A2 = ( 2.457 * log ( ( 7. / Re ) ** 0.9 + 0.27 * eD ) ) ** 16
ff = 2 * ( ( 8 / Re ) ** 12 + 1 / ( A2 + A3 ) ** 1.5 ) ** ( 1 / 12. )
return 4 * ff
|
def parse_list_parts ( data , bucket_name , object_name , upload_id ) :
"""Parser for list parts response .
: param data : Response data for list parts .
: param bucket _ name : Response for the bucket .
: param object _ name : Response for the object .
: param upload _ id : Upload id of object name for
the active multipart session .
: return : Replies back three distinctive components .
- List of : class : ` UploadPart < UploadPart > ` .
- True if list is truncated , False otherwise .
- Next part marker for the next request if the
list was truncated ."""
|
root = S3Element . fromstring ( 'ListPartsResult' , data )
is_truncated = root . get_child_text ( 'IsTruncated' ) . lower ( ) == 'true'
part_marker = root . get_child_text ( 'NextPartNumberMarker' , strict = False )
parts = [ UploadPart ( bucket_name , object_name , upload_id , part . get_int_elem ( 'PartNumber' ) , part . get_etag_elem ( ) , part . get_localized_time_elem ( 'LastModified' ) , part . get_int_elem ( 'Size' ) ) for part in root . findall ( 'Part' ) ]
return parts , is_truncated , part_marker
|
def _registerHandler ( self , handler ) :
"""Registers a handler .
: param handler : A handler object ."""
|
self . _logger . addHandler ( handler )
self . _handlers . append ( handler )
|
def getServiceMessages ( self , remote ) :
"""Get service messages from CCU / Homegear"""
|
try :
return self . proxies [ "%s-%s" % ( self . _interface_id , remote ) ] . getServiceMessages ( )
except Exception as err :
LOG . debug ( "ServerThread.getServiceMessages: Exception: %s" % str ( err ) )
|
def float_str ( f , min_digits = 2 , max_digits = 6 ) :
"""Returns a string representing a float , where the number of
significant digits is min _ digits unless it takes more digits
to hit a non - zero digit ( and the number is 0 < x < 1 ) .
We stop looking for a non - zero digit after max _ digits ."""
|
if f >= 1 or f <= 0 :
return str ( round_float ( f , min_digits ) )
start_str = str ( round_float ( f , max_digits ) )
digits = start_str . split ( "." ) [ 1 ]
non_zero_indices = [ ]
for i , digit in enumerate ( digits ) :
if digit != "0" :
non_zero_indices . append ( i + 1 )
# Only saw 0s .
if len ( non_zero_indices ) == 0 :
num_digits = min_digits
else : # Of the non - zero digits , pick the num _ digit ' th of those ( including any zeros )
min_non_zero_indices = range ( non_zero_indices [ 0 ] , non_zero_indices [ - 1 ] + 1 ) [ : min_digits ]
num_digits = min_non_zero_indices [ - 1 ]
return str ( round_float ( f , num_digits ) )
|
def strides ( self , time = None , spatial = None ) :
"""Set time and / or spatial ( horizontal ) strides .
This is only used on grid requests . Used to skip points in the returned data .
This modifies the query in - place , but returns ` self ` so that multiple queries
can be chained together on one line .
Parameters
time : int , optional
Stride for times returned . Defaults to None , which is equivalent to 1.
spatial : int , optional
Stride for horizontal grid . Defaults to None , which is equivalent to 1.
Returns
self : NCSSQuery
Returns self for chaining calls"""
|
if time :
self . add_query_parameter ( timeStride = time )
if spatial :
self . add_query_parameter ( horizStride = spatial )
return self
|
def reload ( self ) :
"""Reload notmuch and alot config files"""
|
self . read_notmuch_config ( self . _notmuchconfig . filename )
self . read_config ( self . _config . filename )
|
def get ( no_create = False , server = None , port = None , force_uuid = None ) :
"""Get the thread local singleton"""
|
pid = os . getpid ( )
thread = threading . current_thread ( )
wdb = Wdb . _instances . get ( ( pid , thread ) )
if not wdb and not no_create :
wdb = object . __new__ ( Wdb )
Wdb . __init__ ( wdb , server , port , force_uuid )
wdb . pid = pid
wdb . thread = thread
Wdb . _instances [ ( pid , thread ) ] = wdb
elif wdb :
if ( server is not None and wdb . server != server or port is not None and wdb . port != port ) :
log . warn ( 'Different server/port set, ignoring' )
else :
wdb . reconnect_if_needed ( )
return wdb
|
def truncate ( text , length = 50 , ellipsis = '...' ) :
"""Returns a truncated version of the inputted text .
: param text | < str >
length | < int >
ellipsis | < str >
: return < str >"""
|
text = nativestring ( text )
return text [ : length ] + ( text [ length : ] and ellipsis )
|
def cached ( key = None , extradata = { } ) :
'''Decorator used for caching .'''
|
def decorator ( f ) :
@ wraps ( f )
def wrapper ( * args , ** kwargs ) :
uid = key
if not uid :
from hashlib import md5
arguments = list ( args ) + [ ( a , kwargs [ a ] ) for a in sorted ( kwargs . keys ( ) ) ]
uid = md5 ( str ( arguments ) ) . hexdigest ( )
if exists ( uid ) :
debug ( 'Item \'%s\' is cached (%s).' % ( uid , cache ) )
return get ( uid )
else :
debug ( 'Item \'%s\' is not cached (%s).' % ( uid , cache ) )
result = f ( * args , ** kwargs )
debug ( 'Caching result \'%s\' as \'%s\' (%s)...' % ( result , uid , cache ) )
debug ( 'Extra data: ' + ( str ( extradata ) or 'None' ) )
put ( uid , result , extradata )
return result
return wrapper
return decorator
|
def _encode_mapping ( name , value , check_keys , opts ) :
"""Encode a mapping type ."""
|
data = b"" . join ( [ _element_to_bson ( key , val , check_keys , opts ) for key , val in iteritems ( value ) ] )
return b"\x03" + name + _PACK_INT ( len ( data ) + 5 ) + data + b"\x00"
|
def cosmetics ( flat1 , flat2 = None , mask = None , lowercut = 6.0 , uppercut = 6.0 , siglev = 2.0 ) :
"""Find cosmetic defects in a detector using two flat field images .
Two arrays representing flat fields of different exposure times are
required . Cosmetic defects are selected as points that deviate
significantly of the expected normal distribution of pixels in
the ratio between ` flat2 ` and ` flat1 ` .
The median of the ratio array is computed and subtracted to it .
The standard deviation of the distribution of pixels is computed
obtaining the percentiles nearest the pixel values corresponding to
` nsig ` in the normal CDF . The standar deviation is then the distance
between the pixel values divided by two times ` nsig ` .
The ratio image is then normalized with this standard deviation .
The values in the ratio above ` uppercut ` are flagged as hot pixels ,
and those below ' - lowercut ` are flagged as dead pixels in the output mask .
: parameter flat1 : an array representing a flat illuminated exposure .
: parameter flat2 : an array representing a flat illuminated exposure .
: parameter mask : an integer array representing initial mask .
: parameter lowercut : values bellow this sigma level are flagged as dead pixels .
: parameter uppercut : values above this sigma level are flagged as hot pixels .
: parameter siglev : level to estimate the standard deviation .
: returns : the updated mask"""
|
if flat2 is None :
flat1 , flat2 = flat2 , flat1
flat1 = numpy . ones_like ( flat2 )
if type ( mask ) is not numpy . ndarray :
mask = numpy . zeros ( flat1 . shape , dtype = 'int' )
ratio , mask = comp_ratio ( flat1 , flat2 , mask )
fratio1 = ratio [ ~ mask ]
central = numpy . median ( fratio1 )
std = robust_std ( fratio1 , central , siglev )
mask_u = ratio > central + uppercut * std
mask_d = ratio < central - lowercut * std
mask_final = mask_u | mask_d | mask
return mask_final
|
def _validate_fname ( fname , arg_name ) :
"""Validate that a string is a valid file name ."""
|
if fname is not None :
msg = "Argument `{0}` is not valid" . format ( arg_name )
if ( not isinstance ( fname , str ) ) or ( isinstance ( fname , str ) and ( "\0" in fname ) ) :
raise RuntimeError ( msg )
try :
if not os . path . exists ( fname ) :
os . access ( fname , os . W_OK )
except ( TypeError , ValueError ) : # pragma : no cover
raise RuntimeError ( msg )
|
def setNetworkIDTimeout ( self , iNwkIDTimeOut ) :
"""set networkid timeout for Thread device
Args :
iNwkIDTimeOut : a given NETWORK _ ID _ TIMEOUT
Returns :
True : successful to set NETWORK _ ID _ TIMEOUT
False : fail to set NETWORK _ ID _ TIMEOUT"""
|
print '%s call setNetworkIDTimeout' % self . port
print iNwkIDTimeOut
iNwkIDTimeOut /= 1000
try :
cmd = 'networkidtimeout %s' % str ( iNwkIDTimeOut )
print cmd
return self . __sendCommand ( cmd ) [ 0 ] == 'Done'
except Exception , e :
ModuleHelper . WriteIntoDebugLogger ( "setNetworkIDTimeout() Error: " + str ( e ) )
|
def all ( cls ) :
"""Get all informations about this account"""
|
account = cls . info ( )
creditusage = cls . creditusage ( )
if not creditusage :
return account
left = account [ 'credits' ] / creditusage
years , hours = divmod ( left , 365 * 24 )
months , hours = divmod ( hours , 31 * 24 )
days , hours = divmod ( hours , 24 )
account . update ( { 'credit_usage' : creditusage , 'left' : ( years , months , days , hours ) } )
return account
|
def pxform ( fromstr , tostr , et ) :
"""Return the matrix that transforms position vectors from one
specified frame to another at a specified epoch .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / pxform _ c . html
: param fromstr : Name of the frame to transform from .
: type fromstr : str
: param tostr : Name of the frame to transform to .
: type tostr : str
: param et : Epoch of the rotation matrix .
: type et : float
: return : A rotation matrix .
: rtype : 3x3 Element Array of floats"""
|
et = ctypes . c_double ( et )
tostr = stypes . stringToCharP ( tostr )
fromstr = stypes . stringToCharP ( fromstr )
rotatematrix = stypes . emptyDoubleMatrix ( )
libspice . pxform_c ( fromstr , tostr , et , rotatematrix )
return stypes . cMatrixToNumpy ( rotatematrix )
|
def standard_deviation ( data , period ) :
"""Standard Deviation .
Formula :
std = sqrt ( avg ( abs ( x - avg ( x ) ) ^ 2 ) )"""
|
catch_errors . check_for_period_error ( data , period )
stds = [ np . std ( data [ idx + 1 - period : idx + 1 ] , ddof = 1 ) for idx in range ( period - 1 , len ( data ) ) ]
stds = fill_for_noncomputable_vals ( data , stds )
return stds
|
def add_semantic_hub_layout ( cx , hub ) :
"""Attach a layout aspect to a CX network given a hub node ."""
|
graph = cx_to_networkx ( cx )
hub_node = get_node_by_name ( graph , hub )
node_classes = classify_nodes ( graph , hub_node )
layout_aspect = get_layout_aspect ( hub_node , node_classes )
cx [ 'cartesianLayout' ] = layout_aspect
|
def message ( self , phone_number , message , message_type , ** params ) :
"""Send a message to the target phone _ number .
See https : / / developer . telesign . com / docs / messaging - api for detailed API documentation ."""
|
return self . post ( MESSAGING_RESOURCE , phone_number = phone_number , message = message , message_type = message_type , ** params )
|
def jsName ( path , name ) :
'''Returns a name string without \ , - , and . so that
the string will play nicely with javascript .'''
|
shortPath = path . replace ( "C:\\Users\\scheinerbock\\Desktop\\" + "ideogram\\scrapeSource\\test\\" , "" )
noDash = shortPath . replace ( "-" , "_dash_" )
jsPath = noDash . replace ( "\\" , "_slash_" ) . replace ( "." , "_dot_" )
jsName = jsPath + '_slash_' + name
return jsName
|
def sequence_to_alignment_coords ( self , seq_name , start , end , trim = False ) :
"""convert an interval in one of the sequences into an interval in the
alignment . Alignment intervals are inclusive of start , but not end . They
are one - based . Hence the full alignment has coords [ 1 , N + 1 ) , where N is the
length of the alignment ( number of columns ) . Sequence coords follow the
same conventions : one - based , inclusive of start but not end .
: param seq _ name : which sequence are the start and end coords for ?
: param start : start of the interval in sequence co - ordinates
: param end : end of the interval in sequence co - ordinates
: param trim : if true , trim coordinates that fall partially outside
the sequence
: raises AlignmentError : if coordinates fall entirely outside the
sequence , or partially outside and trim = = false"""
|
# check for valid order of start / end
if end <= start :
raise InvalidSequenceCoordinatesError ( "invalid region: " + str ( start ) + ", " + str ( end ) )
seq = self [ seq_name ]
s_start = seq . start
s_end = seq . end
pos_strand = seq . is_positive_strand ( )
# check that the start and end coords are at least partially in the seq
if start > s_end or end < s_start :
msg = "Cannot convert " + str ( start ) + ", " + str ( end ) + " to " + "alignment coordinates; falls fully outside of sequence " + str ( s_start ) + ", " + str ( s_end )
raise InvalidSequenceCoordinatesError ( msg )
# trim overlap if that option is sepcified , otherwise complain if outside
if trim :
start = s_start if start < s_start else start
end = s_end if end > s_end else end
elif start < s_start or end > s_end :
msg = "Cannot convert " + str ( start ) + ", " + str ( end ) + " to " + "alignment coordinates; falls artially outside of seq. " + str ( s_start ) + ", " + str ( s_end )
raise InvalidSequenceCoordinatesError ( msg )
num_gaps = 0
num_non_gaps = 0
res = [ ]
current_start = None
current_end = None
l_start = 0 if pos_strand else self . size ( ) - 1
l_end = self . size ( ) if pos_strand else 0
l_step = 1 if pos_strand else - 1
for i in range ( l_start , l_end , l_step ) :
if seq [ i ] == GAP_CHAR :
num_gaps += 1
else :
num_non_gaps += 1
if num_non_gaps > end - s_start : # done , past the end of the ROI
break
if num_non_gaps > start - s_start : # within ROI still
if seq [ i ] != GAP_CHAR :
if current_start is None and current_end is None :
current_start = i
current_end = i + 1
else :
if ( ( pos_strand and seq [ i - 1 ] == GAP_CHAR ) or ( not pos_strand and seq [ i + 1 ] == GAP_CHAR ) ) : # is the start of a new non - gapped region . . .
res . append ( ( current_start + 1 , current_end + 1 ) )
current_start = i
current_end = i + 1
if pos_strand and seq [ i - 1 ] != GAP_CHAR : # is continuation of non - gapped region
current_end += 1
if not pos_strand and seq [ i + 1 ] != GAP_CHAR : # is continuation of non - gapped region
current_start -= 1
res . append ( ( current_start + 1 , current_end + 1 ) )
return res
|
def main ( argv = None ) :
'''Execute the " intake " command line program .'''
|
from intake . cli . bootstrap import main as _main
return _main ( 'Intake Catalog CLI' , subcommands . all , argv or sys . argv )
|
def add_it ( workbench , file_list , labels ) :
"""Add the given file _ list to workbench as samples , also add them as nodes .
Args :
workbench : Instance of Workbench Client .
file _ list : list of files .
labels : labels for the nodes .
Returns :
A list of md5s ."""
|
md5s = [ ]
for filename in file_list :
if filename != '.DS_Store' :
with open ( filename , 'rb' ) as pe_file :
base_name = os . path . basename ( filename )
md5 = workbench . store_sample ( pe_file . read ( ) , base_name , 'exe' )
workbench . add_node ( md5 , md5 [ : 6 ] , labels )
md5s . append ( md5 )
return md5s
|
def mutate ( self ) :
"""Mutates code ."""
|
# Choose a random position
if len ( self . code ) == 0 :
return
index = random . randint ( 0 , len ( self . code ) - 1 )
mutation_type = random . random ( )
if mutation_type < 0.5 : # Change
self . code [ index ] = self . new ( ) . randomize ( ) . code [ 0 ]
elif mutation_type < 0.75 : # Deletion
del self . code [ index ]
else : # Insertion
self . code . insert ( index , self . new ( ) . randomize ( ) . code [ 0 ] )
|
def get_relation_count_query ( self , query , parent ) :
"""Add the constraints for a relationship count query .
: type query : orator . orm . Builder
: type parent : orator . orm . Builder
: rtype : orator . orm . Builder"""
|
if parent . get_query ( ) . from__ == query . get_query ( ) . from__ :
return self . get_relation_count_query_for_self_join ( query , parent )
self . _set_join ( query )
return super ( BelongsToMany , self ) . get_relation_count_query ( query , parent )
|
def _uncythonized_model ( self , beta ) :
"""Creates the structure of the model
Parameters
beta : np . array
Contains untransformed starting values for latent variables
Returns
theta : np . array
Contains the predicted values for the time series
Y : np . array
Contains the length - adjusted time series ( accounting for lags )
scores : np . array
Contains the scores for the time series"""
|
parm = np . array ( [ self . latent_variables . z_list [ k ] . prior . transform ( beta [ k ] ) for k in range ( beta . shape [ 0 ] ) ] )
theta = np . zeros ( self . model_Y . shape [ 0 ] )
model_scale , model_shape , model_skewness = self . _get_scale_and_shape ( parm )
# Loop over time series
theta , self . model_scores = gas_llev_recursion ( parm , theta , self . model_scores , self . model_Y , self . model_Y . shape [ 0 ] , self . family . score_function , self . link , model_scale , model_shape , model_skewness , self . max_lag )
return theta , self . model_Y , self . model_scores
|
def ray_triangle_id ( triangles , ray_origins , ray_directions , triangles_normal = None , tree = None , multiple_hits = True ) :
"""Find the intersections between a group of triangles and rays
Parameters
triangles : ( n , 3 , 3 ) float
Triangles in space
ray _ origins : ( m , 3 ) float
Ray origin points
ray _ directions : ( m , 3 ) float
Ray direction vectors
triangles _ normal : ( n , 3 ) float
Normal vector of triangles , optional
tree : rtree . Index
Rtree object holding triangle bounds
Returns
index _ triangle : ( h , ) int
Index of triangles hit
index _ ray : ( h , ) int
Index of ray that hit triangle
locations : ( h , 3 ) float
Position of intersection in space"""
|
triangles = np . asanyarray ( triangles , dtype = np . float64 )
ray_origins = np . asanyarray ( ray_origins , dtype = np . float64 )
ray_directions = np . asanyarray ( ray_directions , dtype = np . float64 )
# if we didn ' t get passed an r - tree for the bounds of each
# triangle create one here
if tree is None :
tree = triangles_mod . bounds_tree ( triangles )
# find the list of likely triangles and which ray they
# correspond with , via rtree queries
ray_candidates , ray_id = ray_triangle_candidates ( ray_origins = ray_origins , ray_directions = ray_directions , tree = tree )
# get subsets which are corresponding rays and triangles
# ( c , 3,3 ) triangle candidates
triangle_candidates = triangles [ ray_candidates ]
# ( c , 3 ) origins and vectors for the rays
line_origins = ray_origins [ ray_id ]
line_directions = ray_directions [ ray_id ]
# get the plane origins and normals from the triangle candidates
plane_origins = triangle_candidates [ : , 0 , : ]
if triangles_normal is None :
plane_normals , triangle_ok = triangles_mod . normals ( triangle_candidates )
if not triangle_ok . all ( ) :
raise ValueError ( 'Invalid triangles!' )
else :
plane_normals = triangles_normal [ ray_candidates ]
# find the intersection location of the rays with the planes
location , valid = intersections . planes_lines ( plane_origins = plane_origins , plane_normals = plane_normals , line_origins = line_origins , line_directions = line_directions )
if ( len ( triangle_candidates ) == 0 or not valid . any ( ) ) :
return [ ] , [ ] , [ ]
# find the barycentric coordinates of each plane intersection on the
# triangle candidates
barycentric = triangles_mod . points_to_barycentric ( triangle_candidates [ valid ] , location )
# the plane intersection is inside the triangle if all barycentric coordinates
# are between 0.0 and 1.0
hit = np . logical_and ( ( barycentric > - tol . zero ) . all ( axis = 1 ) , ( barycentric < ( 1 + tol . zero ) ) . all ( axis = 1 ) )
# the result index of the triangle is a candidate with a valid plane intersection and
# a triangle which contains the plane intersection point
index_tri = ray_candidates [ valid ] [ hit ]
# the ray index is a subset with a valid plane intersection and contained
# by a triangle
index_ray = ray_id [ valid ] [ hit ]
# locations are already valid plane intersections , just mask by hits
location = location [ hit ]
# only return points that are forward from the origin
vector = location - ray_origins [ index_ray ]
distance = util . diagonal_dot ( vector , ray_directions [ index_ray ] )
forward = distance > - 1e-6
index_tri = index_tri [ forward ]
index_ray = index_ray [ forward ]
location = location [ forward ]
distance = distance [ forward ]
if multiple_hits :
return index_tri , index_ray , location
# since we are not returning multiple hits , we need to
# figure out which hit is first
if len ( index_ray ) == 0 :
return index_tri , index_ray , location
first = np . zeros ( len ( index_ray ) , dtype = np . bool )
groups = grouping . group ( index_ray )
for group in groups :
index = group [ distance [ group ] . argmin ( ) ]
first [ index ] = True
return index_tri [ first ] , index_ray [ first ] , location [ first ]
|
def flatten ( l ) :
'''Flatten a multi - deminision list and return a iterable
Note that dict and str will not be expanded , instead , they will be kept as a single element .
Args :
l ( list ) : The list needs to be flattened
Returns :
A iterable of flattened list . To have a list instead use ` ` list ( flatten ( l ) ) ` `'''
|
for el in l : # I don ; t want dict to be flattened
if isinstance ( el , Iterable ) and not isinstance ( el , ( str , bytes ) ) and not isinstance ( el , dict ) :
yield from flatten ( el )
else :
yield el
|
def old_format ( self , content : BeautifulSoup ) -> List [ str ] :
"""Extracts email message information if it uses the old Mailman format
Args :
content : BeautifulSoup
Returns : List [ str ]"""
|
b = content . find ( 'body' )
sender , date , nxt , rep_to = None , None , None , None
strongs = b . findAll ( 'strong' , recursive = False )
for s in strongs :
field = str ( s ) . split ( ">" ) [ 1 ] . split ( "<" ) [ 0 ]
if 'From' in field :
sender = s . next_sibling . split ( "(" ) [ 0 ] . strip ( )
elif 'Date' in field :
date_str = s . next_sibling . strip ( ) . replace ( "-" , "" ) . replace ( " " , " " ) . strip ( )
try :
date = parsedate_to_datetime ( date_str ) . isoformat ( ) [ : 19 ]
except :
date = None
sender = b . find ( 'b' ) . text if sender == None else sender
sender = b . find ( 'a' ) . text if len ( sender ) == 0 else sender
date = b . find ( 'i' ) . text [ : 19 ] if date == None else date
try :
nav = content . find ( 'ul' ) . findAll ( 'li' )
except :
nav = None
if nav != None :
for l in nav :
s = l . text
if 'Next in thread' in s :
nxt = '/' . join ( self . email_url . split ( '/' ) [ : - 1 ] ) + '/' + l . find ( 'a' ) [ 'href' ]
nxt = nxt [ 1 : ] if nxt [ 0 ] == '/' else nxt
elif 'reply to' in s :
rep_to = '/' . join ( self . email_url . split ( '/' ) [ : - 1 ] ) + '/' + l . find ( 'a' ) [ 'href' ]
rep_to = rep_to [ 1 : ] if rep_to [ 0 ] == '/' else rep_to
body = content . find ( 'pre' )
body = body . text . strip ( ) if body != None else None
return [ str ( i ) for i in [ sender , date , body , nxt , rep_to ] ]
|
def hex_color_to_tuple ( hex ) :
"""convent hex color to tuple
" # fffff " - > ( 255 , 255 , 255)
" # ffff00ff " - > ( 255 , 255 , 0 , 255)"""
|
hex = hex [ 1 : ]
length = len ( hex ) // 2
return tuple ( int ( hex [ i * 2 : i * 2 + 2 ] , 16 ) for i in range ( length ) )
|
def run_track ( track , result_hosts = None , crate_root = None , output_fmt = None , logfile_info = None , logfile_result = None , failfast = False , sample_mode = 'reservoir' ) :
"""Execute a track file"""
|
with Logger ( output_fmt = output_fmt , logfile_info = logfile_info , logfile_result = logfile_result ) as log :
executor = Executor ( track_dir = os . path . dirname ( track ) , log = log , result_hosts = result_hosts , crate_root = crate_root , fail_fast = failfast , sample_mode = sample_mode )
error = executor . execute ( toml . load ( track ) )
if error :
sys . exit ( 1 )
|
def consume_all ( self , max_loops = None ) :
"""Consume the streamed responses until there are no more .
. . warning : :
This method will be removed in future releases . Please use this
class as a generator instead .
: type max _ loops : int
: param max _ loops : ( Optional ) Maximum number of times to try to consume
an additional ` ` ReadRowsResponse ` ` . You can use this
to avoid long wait times ."""
|
for row in self :
self . rows [ row . row_key ] = row
|
def _check ( self ) :
"""Validates model parameters prior to fitting .
Raises
ValueError
If any of the parameters are invalid , e . g . if : attr : ` startprob _ `
don ' t sum to 1."""
|
self . startprob_ = np . asarray ( self . startprob_ )
if len ( self . startprob_ ) != self . n_components :
raise ValueError ( "startprob_ must have length n_components" )
if not np . allclose ( self . startprob_ . sum ( ) , 1.0 ) :
raise ValueError ( "startprob_ must sum to 1.0 (got {:.4f})" . format ( self . startprob_ . sum ( ) ) )
self . transmat_ = np . asarray ( self . transmat_ )
if self . transmat_ . shape != ( self . n_components , self . n_components ) :
raise ValueError ( "transmat_ must have shape (n_components, n_components)" )
if not np . allclose ( self . transmat_ . sum ( axis = 1 ) , 1.0 ) :
raise ValueError ( "rows of transmat_ must sum to 1.0 (got {})" . format ( self . transmat_ . sum ( axis = 1 ) ) )
|
def deployment_delete ( name , resource_group , ** kwargs ) :
'''. . versionadded : : 2019.2.0
Delete a deployment .
: param name : The name of the deployment to delete .
: param resource _ group : The resource group name assigned to the
deployment .
CLI Example :
. . code - block : : bash
salt - call azurearm _ resource . deployment _ delete testdeploy testgroup'''
|
result = False
resconn = __utils__ [ 'azurearm.get_client' ] ( 'resource' , ** kwargs )
try :
deploy = resconn . deployments . delete ( deployment_name = name , resource_group_name = resource_group )
deploy . wait ( )
result = True
except CloudError as exc :
__utils__ [ 'azurearm.log_cloud_error' ] ( 'resource' , str ( exc ) , ** kwargs )
return result
|
def zstack_array ( self , s = 0 , c = 0 , t = 0 ) :
"""Return zstack as a : class : ` numpy . ndarray ` .
: param s : series
: param c : channel
: param t : timepoint
: returns : zstack as a : class : ` numpy . ndarray `"""
|
return np . dstack ( [ x . image for x in self . zstack_proxy_iterator ( s = s , c = c , t = t ) ] )
|
def param_set_send ( self , parm_name , parm_value , parm_type = None ) :
'''wrapper for parameter set'''
|
if self . mavlink10 ( ) :
if parm_type == None :
parm_type = mavlink . MAVLINK_TYPE_FLOAT
self . mav . param_set_send ( self . target_system , self . target_component , parm_name , parm_value , parm_type )
else :
self . mav . param_set_send ( self . target_system , self . target_component , parm_name , parm_value )
|
def _loads ( self , string ) :
"""If : prop : serialized is True , @ string will be unserialized
using : prop : serializer"""
|
if not self . serialized :
return self . _decode ( string )
if string is not None :
try :
return self . serializer . loads ( string )
except TypeError : # : catches bytes errors with the builtin json library
return self . serializer . loads ( self . _decode ( string ) )
except pickle . UnpicklingError as e : # : incr and decr methods create issues when pickle serialized
# It ' s a terrible idea for a serialized instance
# to be performing incr and decr methods , but I think
# it makes sense to catch the error regardless
decoded = self . _decode ( string )
if decoded . isdigit ( ) :
return decoded
raise pickle . UnpicklingError ( e )
|
def import_events ( self , source = 'wonambi' ) :
"""action : import events from text file ( Wonambi or RemLogic ) ."""
|
if self . annot is None : # remove if buttons are disabled
self . parent . statusBar ( ) . showMessage ( 'No score file loaded' )
return
if 'wonambi' == source :
format_str = 'CSV File (*.csv)'
rec_start = None
elif 'remlogic' == source :
format_str = 'Text file (*.txt)'
rec_start = self . parent . info . dataset . header [ 'start_time' ]
fn , _ = QFileDialog . getOpenFileName ( self , 'Import events' , None , format_str )
if fn == '' :
return
fn = Path ( fn ) . resolve ( )
self . annot . import_events ( fn , source = source , rec_start = rec_start , parent = self . parent )
self . display_notes ( )
|
def configure_scraper ( self , scraper_config ) :
"""Configures a PrometheusScaper object with query credentials
: param scraper : valid PrometheusScaper object
: param endpoint : url that will be scraped"""
|
endpoint = scraper_config [ 'prometheus_url' ]
scraper_config . update ( { 'ssl_ca_cert' : self . _ssl_verify , 'ssl_cert' : self . _ssl_cert , 'ssl_private_key' : self . _ssl_private_key , 'extra_headers' : self . headers ( endpoint ) or { } , } )
|
def _get_args ( self , kwargs ) :
'''Discard all keywords which aren ' t function - specific from the kwargs .
: param kwargs :
: return :'''
|
_args = list ( )
_kwargs = salt . utils . args . clean_kwargs ( ** kwargs )
return _args , _kwargs
|
def _build_endpoint ( self , endpoint_name ) :
"""Generate an enpoint url from a setting name .
Args :
endpoint _ name ( str ) : setting name for the enpoint to build
Returns :
( str ) url enpoint"""
|
endpoint_relative = settings . get ( 'asmaster_endpoints' , endpoint_name )
return '%s%s' % ( self . host , endpoint_relative )
|
def _asStr ( self ) :
'''_ asStr - Get the string representation of this style
@ return < str > - A string representation of this style ( semicolon separated , key : value format )'''
|
styleDict = self . _styleDict
if styleDict :
return '; ' . join ( [ name + ': ' + value for name , value in styleDict . items ( ) ] )
return ''
|
def fmt_ac_sia ( ac_sia ) :
"""Format a AcSystemIrreducibilityAnalysis ."""
|
body = ( '{ALPHA} = {alpha}\n' 'direction: {ac_sia.direction}\n' 'transition: {ac_sia.transition}\n' 'before state: {ac_sia.before_state}\n' 'after state: {ac_sia.after_state}\n' 'cut:\n{ac_sia.cut}\n' '{account}\n' '{partitioned_account}' . format ( ALPHA = ALPHA , alpha = round ( ac_sia . alpha , 4 ) , ac_sia = ac_sia , account = fmt_account ( ac_sia . account , 'Account' ) , partitioned_account = fmt_account ( ac_sia . partitioned_account , 'Partitioned Account' ) ) )
return box ( header ( 'AcSystemIrreducibilityAnalysis' , body , under_char = HORIZONTAL_BAR ) )
|
def nl_cb_err ( cb , kind , func , arg ) :
"""Set up an error callback . Updates ` cb ` in place .
https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / lib / handlers . c # L343
Positional arguments :
cb - - nl _ cb class instance .
kind - - kind of callback ( integer ) .
func - - callback function .
arg - - argument to be passed to callback function .
Returns :
0 on success or a negative error code ."""
|
if kind < 0 or kind > NL_CB_KIND_MAX :
return - NLE_RANGE
if kind == NL_CB_CUSTOM :
cb . cb_err = func
cb . cb_err_arg = arg
else :
cb . cb_err = cb_err_def [ kind ]
cb . cb_err_arg = arg
return 0
|
def _create_signing_params ( self , url , keypair_id , expire_time = None , valid_after_time = None , ip_address = None , policy_url = None , private_key_file = None , private_key_string = None ) :
"""Creates the required URL parameters for a signed URL ."""
|
params = { }
# Check if we can use a canned policy
if expire_time and not valid_after_time and not ip_address and not policy_url : # we manually construct this policy string to ensure formatting
# matches signature
policy = self . _canned_policy ( url , expire_time )
params [ "Expires" ] = str ( expire_time )
else : # If no policy _ url is specified , default to the full url .
if policy_url is None :
policy_url = url
# Can ' t use canned policy
policy = self . _custom_policy ( policy_url , expires = expire_time , valid_after = valid_after_time , ip_address = ip_address )
encoded_policy = self . _url_base64_encode ( policy )
params [ "Policy" ] = encoded_policy
# sign the policy
signature = self . _sign_string ( policy , private_key_file , private_key_string )
# now base64 encode the signature ( URL safe as well )
encoded_signature = self . _url_base64_encode ( signature )
params [ "Signature" ] = encoded_signature
params [ "Key-Pair-Id" ] = keypair_id
return params
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.