signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def setModel ( self , model ) :
"""sets the model for the auto parameters
: param model : The data stucture for this editor to provide access to
: type model : : class : ` QAutoParameterModel < sparkle . gui . stim . qauto _ parameter _ model . QAutoParameterModel > `"""
|
self . paramList . setModel ( model )
model . hintRequested . connect ( self . hintRequested )
model . rowsInserted . connect ( self . updateTitle )
model . rowsRemoved . connect ( self . updateTitle )
self . updateTitle ( )
|
def p_for_stmt ( p ) :
"""for _ stmt : FOR ident EQ expr SEMI stmt _ list END _ STMT
| FOR LPAREN ident EQ expr RPAREN SEMI stmt _ list END _ STMT
| FOR matrix EQ expr SEMI stmt _ list END _ STMT"""
|
if len ( p ) == 8 :
if not isinstance ( p [ 2 ] , node . ident ) :
raise_exception ( SyntaxError , "Not implemented: for loop" , new_lexer )
p [ 2 ] . props = "I"
# I = for - loop iteration variable
p [ 0 ] = node . for_stmt ( ident = p [ 2 ] , expr = p [ 4 ] , stmt_list = p [ 6 ] )
|
def _call ( self , x , out ) :
"""Implement ` ` self ( x , out ) ` ` ."""
|
with writable_array ( out ) as out_arr :
resize_array ( x . asarray ( ) , self . range . shape , offset = self . offset , pad_mode = self . pad_mode , pad_const = self . pad_const , direction = 'forward' , out = out_arr )
|
def autocommit ( data_access ) :
"""Make statements autocommit .
: param data _ access : a DataAccess instance"""
|
if not data_access . autocommit :
data_access . commit ( )
old_autocommit = data_access . autocommit
data_access . autocommit = True
try :
yield data_access
finally :
data_access . autocommit = old_autocommit
|
def parse_args ( args = sys . argv [ 1 : ] ) :
"""Parse command line arguments for Grole server running as static file server"""
|
parser = argparse . ArgumentParser ( )
parser . add_argument ( '-a' , '--address' , help = 'address to listen on, default localhost' , default = 'localhost' )
parser . add_argument ( '-p' , '--port' , help = 'port to listen on, default 1234' , default = 1234 , type = int )
parser . add_argument ( '-d' , '--directory' , help = 'directory to serve, default .' , default = '.' )
parser . add_argument ( '-n' , '--noindex' , help = 'do not show directory indexes' , default = False , action = 'store_true' )
loglevel = parser . add_mutually_exclusive_group ( )
loglevel . add_argument ( '-v' , '--verbose' , help = 'verbose logging' , default = False , action = 'store_true' )
loglevel . add_argument ( '-q' , '--quiet' , help = 'quiet logging' , default = False , action = 'store_true' )
return parser . parse_args ( args )
|
def GET_AUTH ( self ) : # pylint : disable = arguments - differ
"""GET request"""
|
userdata = self . database . users . find_one ( { "email" : self . user_manager . session_email ( ) } )
if not userdata :
raise web . notfound ( )
return self . template_helper . get_renderer ( ) . preferences . profile ( "" , False )
|
def make_links ( self ) :
"""Replace the default behaviour of make _ links . More specifically , this method
implements the logic required to connect DFPT calculation to ` DDK ` files .
Remember that DDK is an extension introduced in AbiPy to deal with the
irdddk input variable and the fact that the 3 files with du / dk produced by Abinit
have a file extension constructed from the number of atom ( e . g . 1WF [ 3natom + 1 ] ) .
AbiPy uses the user - friendly syntax deps = { node : " DDK " } to specify that
the children will read the DDK from ` node ` but this also means that
we have to implement extract logic to handle this case at runtime ."""
|
for dep in self . deps :
for d in dep . exts :
if d == "DDK" :
ddk_task = dep . node
out_ddk = ddk_task . outdir . has_abiext ( "DDK" )
if not out_ddk :
raise RuntimeError ( "%s didn't produce the DDK file" % ddk_task )
# Get ( fortran ) idir and costruct the name of the 1WF expected by Abinit
rfdir = list ( ddk_task . input [ "rfdir" ] )
if rfdir . count ( 1 ) != 1 :
raise RuntimeError ( "Only one direction should be specifned in rfdir but rfdir = %s" % rfdir )
idir = rfdir . index ( 1 ) + 1
ddk_case = idir + 3 * len ( ddk_task . input . structure )
infile = self . indir . path_in ( "in_1WF%d" % ddk_case )
os . symlink ( out_ddk , infile )
elif d in ( "WFK" , "WFQ" ) :
gs_task = dep . node
out_wfk = gs_task . outdir . has_abiext ( d )
if not out_wfk :
raise RuntimeError ( "%s didn't produce the %s file" % ( gs_task , d ) )
if d == "WFK" :
bname = "in_WFK"
elif d == "WFQ" :
bname = "in_WFQ"
else :
raise ValueError ( "Don't know how to handle `%s`" % d )
if not os . path . exists ( self . indir . path_in ( bname ) ) :
os . symlink ( out_wfk , self . indir . path_in ( bname ) )
elif d == "DEN" :
gs_task = dep . node
out_wfk = gs_task . outdir . has_abiext ( "DEN" )
if not out_wfk :
raise RuntimeError ( "%s didn't produce the DEN file" % gs_task )
if not os . path . exists ( self . indir . path_in ( "in_DEN" ) ) :
os . symlink ( out_wfk , self . indir . path_in ( "in_DEN" ) )
elif d == "1WF" :
gs_task = dep . node
out_wfk = gs_task . outdir . has_abiext ( "1WF" )
if not out_wfk :
raise RuntimeError ( "%s didn't produce the 1WF file" % gs_task )
dest = self . indir . path_in ( "in_" + out_wfk . split ( "_" ) [ - 1 ] )
if not os . path . exists ( dest ) :
os . symlink ( out_wfk , dest )
elif d == "1DEN" :
gs_task = dep . node
out_wfk = gs_task . outdir . has_abiext ( "DEN" )
if not out_wfk :
raise RuntimeError ( "%s didn't produce the 1DEN file" % gs_task )
dest = self . indir . path_in ( "in_" + out_wfk . split ( "_" ) [ - 1 ] )
if not os . path . exists ( dest ) :
os . symlink ( out_wfk , dest )
else :
raise ValueError ( "Don't know how to handle extension: %s" % str ( dep . exts ) )
|
def add_file ( self , fieldname , filename , filePath , mimetype = None ) :
"""Add a file to be uploaded .
Inputs :
fieldname - name of the POST value
fieldname - name of the file to pass to the server
filePath - path to the local file on disk
mimetype - MIME stands for Multipurpose Internet Mail Extensions .
It ' s a way of identifying files on the Internet according to
their nature and format . Default is None ."""
|
body = filePath
if mimetype is None :
mimetype = mimetypes . guess_type ( filename ) [ 0 ] or 'application/octet-stream'
self . files . append ( ( fieldname , filename , mimetype , body ) )
|
def get ( self , request , * args , ** kwargs ) :
'''Invoices can be viewed only if the validation string is provided , unless
the user is logged in and has view _ all _ invoice permissions'''
|
user_has_validation_string = self . get_object ( ) . validationString
user_has_permissions = request . user . has_perm ( 'core.view_all_invoices' )
if request . GET . get ( 'v' , None ) == user_has_validation_string or user_has_permissions :
return super ( ViewInvoiceView , self ) . get ( request , * args , ** kwargs )
return self . handle_no_permission ( )
|
async def Track ( self , payloads ) :
'''payloads : typing . Sequence [ ~ Payload ]
Returns - > typing . Sequence [ ~ PayloadResult ]'''
|
# map input types to rpc msg
_params = dict ( )
msg = dict ( type = 'PayloadsHookContext' , request = 'Track' , version = 1 , params = _params )
_params [ 'payloads' ] = payloads
reply = await self . rpc ( msg )
return reply
|
def snmp_server_enable_trap_trap_flag ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
snmp_server = ET . SubElement ( config , "snmp-server" , xmlns = "urn:brocade.com:mgmt:brocade-snmp" )
enable = ET . SubElement ( snmp_server , "enable" )
trap = ET . SubElement ( enable , "trap" )
trap_flag = ET . SubElement ( trap , "trap-flag" )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def _GetIgnoredDirs ( self ) :
"""Get ignored directories values from database table . If no values found user
will be prompted to enter values for this table .
Returns
string
List of ignored directories from database table ."""
|
goodlogging . Log . Info ( "CLEAR" , "Loading ignored directories from database:" )
goodlogging . Log . IncreaseIndent ( )
ignoredDirs = self . _db . GetIgnoredDirs ( )
if ignoredDirs is None :
goodlogging . Log . Info ( "CLEAR" , "No ignored directories exist in database" )
ignoredDirs = self . _UserUpdateIgnoredDirs ( )
else :
goodlogging . Log . Info ( "CLEAR" , "Got ignored directories from database: {0}" . format ( ignoredDirs ) )
if self . _archiveDir not in ignoredDirs :
ignoredDirs . append ( self . _archiveDir )
goodlogging . Log . Info ( "CLEAR" , "Using ignored directories: {0}" . format ( ignoredDirs ) )
goodlogging . Log . DecreaseIndent ( )
return ignoredDirs
|
def import_string ( import_name , silent = False ) :
"""Imports an object based on a string . This is useful if you want to
use import paths as endpoints or something similar . An import path can
be specified either in dotted notation ( ` ` xml . sax . saxutils . escape ` ` )
or with a colon as object delimiter ( ` ` xml . sax . saxutils : escape ` ` ) .
If ` silent ` is True the return value will be ` None ` if the import fails .
: param import _ name : the dotted name for the object to import .
: param silent : if set to ` True ` import errors are ignored and
` None ` is returned instead .
: return : imported object"""
|
try :
if ':' in import_name :
module , obj_name = import_name . split ( ':' , 1 )
elif '.' in import_name :
module , obj_name = import_name . rsplit ( '.' , 1 )
else :
return __import__ ( import_name )
return getattr ( __import__ ( module , None , None , [ obj_name ] ) , obj_name )
except ( ImportError , AttributeError ) :
if not silent :
raise
|
def clean ( self ) :
"""* parse and clean the html document with Mercury Parser *
* * Return : * *
- ` ` filePath ` ` - - path to the cleaned HTML document
* * Usage : * *
See class usage"""
|
self . log . debug ( 'starting the ``clean`` method' )
url = self . url
# PARSE THE CONTENT OF THE WEBPAGE AT THE URL
parser_response = self . _request_parsed_article_from_mercury ( url )
if "503" in str ( parser_response ) :
return None
article = parser_response . json ( )
if not article :
return None
# GRAB THE CSS USED TO STYLE THE WEBPAGE / PDF CONTENT
if self . style :
moduleDirectory = os . path . dirname ( __file__ )
cssFile = moduleDirectory + "/css/main.css"
pathToReadFile = cssFile
readFile = codecs . open ( pathToReadFile , encoding = 'utf-8' , mode = 'r' )
thisCss = readFile . read ( )
readFile . close ( )
else :
thisCss = ""
# CATCH ERRORS
if "error" in article and article [ "error" ] == True :
print url
print " " + article [ "messages" ]
return None
try :
text = article [ "content" ]
except :
print "Can't decode the text of %(url)s - moving on" % locals ( )
return None
# COMMON FIXES TO HTML TO RENDER CORRECTLY
regex = re . compile ( u'<span class="mw-editsection"><span class="mw-editsection-bracket">.*"mw-editsection-bracket">]' )
text = regex . sub ( u"" , text )
regex2 = re . compile ( u'\<sup class="noprint.*better source needed\<\/span\>\<\/a\>\<\/i\>\]\<\/sup\>' , re . I )
text = regex2 . sub ( u"" , text )
regex2 = re . compile ( u'\<a href="https\:\/\/en\.wikipedia\.org\/wiki\/.*(\#.*)"\>\<span class=\"tocnumber\"\>' , re . I )
text = regex2 . sub ( u'<a href="\g<1>"><span class="tocnumber">' , text )
regex = re . compile ( u'srcset=".*?">' )
text = regex . sub ( u"" , text )
# GRAB HTML TITLE IF NOT SET IN ARGUMENTS
if self . title == False :
title = article [ "title" ] . encode ( "utf-8" , "ignore" )
title = title . decode ( "utf-8" )
title = title . encode ( "ascii" , "ignore" )
rstrings = """:/"&\\'`"""
for i in rstrings :
title = title . replace ( i , "" )
# USE DATETIME IF TITLE STILL NOT SET
if len ( title ) == 0 :
from datetime import datetime , date , time
now = datetime . now ( )
title = now . strftime ( "%Y%m%dt%H%M%S" )
self . title = title
title = self . title . replace ( ".html" , "" )
pageTitle = title . replace ( "_" , " " )
# REGENERATE THE HTML DOCUMENT WITH CUSTOM STYLE
filePath = self . outputDirectory + "/" + title + ".html"
writeFile = codecs . open ( filePath , encoding = 'utf-8' , mode = 'w' )
if self . metadata :
metadata = "<title>%(title)s</title>" % locals ( )
else :
metadata = ""
if self . h1 :
h1 = "<h1>%(pageTitle)s</h1>" % locals ( )
else :
h1 = ""
content = u"""
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
%(metadata)s
<style>
%(thisCss)s
</style>
</head>
<body>
%(h1)s
<a href="%(url)s">original source</a>
</br></br>
%(text)s
</body>
</html>""" % locals ( )
writeFile . write ( content )
writeFile . close ( )
self . log . debug ( 'completed the ``clean`` method' )
tag ( log = self . log , filepath = filePath , tags = False , rating = False , wherefrom = self . url )
return filePath
|
def DeregisterFormatter ( cls , formatter_class ) :
"""Deregisters a formatter class .
The formatter classes are identified based on their lower case data type .
Args :
formatter _ class ( type ) : class of the formatter .
Raises :
KeyError : if formatter class is not set for the corresponding data type ."""
|
formatter_data_type = formatter_class . DATA_TYPE . lower ( )
if formatter_data_type not in cls . _formatter_classes :
raise KeyError ( 'Formatter class not set for data type: {0:s}.' . format ( formatter_class . DATA_TYPE ) )
del cls . _formatter_classes [ formatter_data_type ]
|
def upload_complete ( self , path , url , quiet ) :
"""function to complete an upload to retrieve a path from a url
Parameters
path : the path for the upload that is read in
url : the url to send the POST to
quiet : suppress verbose output ( default is False )"""
|
file_size = os . path . getsize ( path )
try :
with tqdm ( total = file_size , unit = 'B' , unit_scale = True , unit_divisor = 1024 , disable = quiet ) as progress_bar :
with io . open ( path , 'rb' , buffering = 0 ) as fp :
reader = TqdmBufferedReader ( fp , progress_bar )
session = requests . Session ( )
retries = Retry ( total = 10 , backoff_factor = 0.5 )
adapter = HTTPAdapter ( max_retries = retries )
session . mount ( 'http://' , adapter )
session . mount ( 'https://' , adapter )
response = session . put ( url , data = reader )
except Exception as error :
print ( error )
return False
return response . status_code == 200 or response . status_code == 201
|
def read_data ( self , ** kwargs ) :
'''Read the datafile specified in Sample . datafile and
return the resulting object .
Does NOT assign the data to self . data
It ' s advised not to use this method , but instead to access
the data through the FCMeasurement . data attribute .'''
|
meta , data = parse_fcs ( self . datafile , ** kwargs )
return data
|
def num_throats ( self , labels = 'all' , mode = 'union' ) :
r"""Return the number of throats of the specified labels
Parameters
labels : list of strings , optional
The throat labels that should be included in the count .
If not supplied , all throats are counted .
mode : string , optional
Specifies how the count should be performed . The options are :
* * ' or ' , ' union ' , ' any ' * * : ( default ) Throats with * one or more * of
the given labels are counted .
* * ' and ' , ' intersection ' , ' all ' * * : Throats with * all * of the given
labels are counted .
* * ' xor ' , ' exclusive _ or ' * * : Throats with * only one * of the given
labels are counted .
* * ' nor ' , ' none ' , ' not ' * * : Throats with * none * of the given labels
are counted .
* * ' nand ' * * : Throats with * some but not all * of the given labels
are counted .
* * ' xnor ' * * : Throats with * more than one * of the given labels are
counted .
Returns
Nt : int
Number of throats with the specified labels
See Also
num _ pores
count
Notes
Technically , * ' nand ' * and * ' xnor ' * should also count throats with
* none * of the labels , however , to make the count more useful these are
not included ."""
|
# Count number of pores of specified type
Ts = self . _get_indices ( labels = labels , mode = mode , element = 'throat' )
Nt = sp . shape ( Ts ) [ 0 ]
return Nt
|
def broadcast ( self , data_dict ) :
'''Send to the visualizer ( if there is one ) or enqueue for later'''
|
if self . vis_socket :
self . queued_messages . append ( data_dict )
self . send_all_updates ( )
|
def preview ( self , n = 10 , k = 'items' , kheader = 'displayLink' , klink = 'link' , kdescription = 'snippet' ) :
"""Print a preview of the search results .
Args :
n ( int ) :
Maximum number of search results to preview
k ( str ) :
Key in : class : ` api . results ` . metadata to preview
kheader ( str ) :
Key in : class : ` api . results ` . metadata [ ` ` k ` ` ] to use as the header
klink ( str ) :
Key in : class : ` api . results ` . metadata [ ` ` k ` ` ] to use as the link if image search
kdescription ( str ) :
Key in : class : ` api . results ` . metadata [ ` ` k ` ` ] to use as the description"""
|
if 'searchType' in self . cseargs :
searchType = self . cseargs [ 'searchType' ]
else :
searchType = None
items = self . metadata [ k ]
# ( cse _ print ) Print results
for i , kv in enumerate ( items [ : n ] ) :
if 'start' in self . cseargs :
i += int ( self . cseargs [ 'start' ] )
# ( print _ header ) Print result header
header = '\n[' + str ( i ) + '] ' + kv [ kheader ]
print ( header )
print ( '=' * len ( header ) )
# ( print _ image ) Print result image file
if searchType == 'image' :
link = '\n' + path . basename ( kv [ klink ] )
print ( link )
# ( print _ description ) Print result snippet
description = '\n' + kv [ kdescription ]
print ( description )
|
def revnet_164_cifar ( ) :
"""Tiny hparams suitable for CIFAR / etc ."""
|
hparams = revnet_cifar_base ( )
hparams . bottleneck = True
hparams . num_channels = [ 16 , 32 , 64 ]
hparams . num_layers_per_block = [ 8 , 8 , 8 ]
return hparams
|
def document ( self ) :
"""Render the error document"""
|
resp = request . environ . get ( 'pylons.original_response' )
content = literal ( resp . body ) or cgi . escape ( request . GET . get ( 'message' ) )
page = error_document_template % dict ( prefix = request . environ . get ( 'SCRIPT_NAME' , '' ) , code = cgi . escape ( request . GET . get ( 'code' , str ( resp . status_int ) ) ) , message = content )
return page
|
def step ( self , amt = 1 ) :
"Make a frame of the animation"
|
self . move_particles ( )
if self . has_moving_emitters :
self . move_emitters ( )
self . start_new_particles ( )
self . render_particles ( )
if self . emitters == [ ] and self . particles == [ ] :
self . completed = True
|
def best_case ( self , matrix , m_list , indices_left ) :
"""Computes a best case given a matrix and manipulation list .
Args :
matrix : the current matrix ( with some permutations already
performed )
m _ list : [ ( multiplication fraction , number _ of _ indices , indices ,
species ) ] describing the manipulation
indices : Set of indices which haven ' t had a permutation
performed on them ."""
|
m_indices = [ ]
fraction_list = [ ]
for m in m_list :
m_indices . extend ( m [ 2 ] )
fraction_list . extend ( [ m [ 0 ] ] * m [ 1 ] )
indices = list ( indices_left . intersection ( m_indices ) )
interaction_matrix = matrix [ indices , : ] [ : , indices ]
fractions = np . zeros ( len ( interaction_matrix ) ) + 1
fractions [ : len ( fraction_list ) ] = fraction_list
fractions = np . sort ( fractions )
# Sum associated with each index ( disregarding interactions between
# indices )
sums = 2 * np . sum ( matrix [ indices ] , axis = 1 )
sums = np . sort ( sums )
# Interaction corrections . Can be reduced to ( 1 - x ) ( 1 - y ) for x , y in
# fractions each element in a column gets multiplied by ( 1 - x ) , and then
# the sum of the columns gets multiplied by ( 1 - y ) since fractions are
# less than 1 , there is no effect of one choice on the other
step1 = np . sort ( interaction_matrix ) * ( 1 - fractions )
step2 = np . sort ( np . sum ( step1 , axis = 1 ) )
step3 = step2 * ( 1 - fractions )
interaction_correction = np . sum ( step3 )
if self . _algo == self . ALGO_TIME_LIMIT :
elapsed_time = datetime . utcnow ( ) - self . _start_time
speedup_parameter = elapsed_time . total_seconds ( ) / 1800
avg_int = np . sum ( interaction_matrix , axis = None )
avg_frac = np . average ( np . outer ( 1 - fractions , 1 - fractions ) )
average_correction = avg_int * avg_frac
interaction_correction = average_correction * speedup_parameter + interaction_correction * ( 1 - speedup_parameter )
best_case = np . sum ( matrix ) + np . inner ( sums [ : : - 1 ] , fractions - 1 ) + interaction_correction
return best_case
|
def has_documented_type_or_fields ( self , include_inherited_fields = False ) :
"""Returns whether this type , or any of its fields , are documented .
Use this when deciding whether to create a block of documentation for
this type ."""
|
if self . doc :
return True
else :
return self . has_documented_fields ( include_inherited_fields )
|
def validate_probability ( p : float , p_str : str ) -> float :
"""Validates that a probability is between 0 and 1 inclusively .
Args :
p : The value to validate .
p _ str : What to call the probability in error messages .
Returns :
The probability p if the probability if valid .
Raises :
ValueError if the probability is invalid ."""
|
if p < 0 :
raise ValueError ( '{} was less than 0.' . format ( p_str ) )
elif p > 1 :
raise ValueError ( '{} was greater than 1.' . format ( p_str ) )
return p
|
def shorten_line ( tokens , source , indentation , indent_word , max_line_length , aggressive = False , experimental = False , previous_line = '' ) :
"""Separate line at OPERATOR .
Multiple candidates will be yielded ."""
|
for candidate in _shorten_line ( tokens = tokens , source = source , indentation = indentation , indent_word = indent_word , aggressive = aggressive , previous_line = previous_line ) :
yield candidate
if aggressive :
for key_token_strings in SHORTEN_OPERATOR_GROUPS :
shortened = _shorten_line_at_tokens ( tokens = tokens , source = source , indentation = indentation , indent_word = indent_word , key_token_strings = key_token_strings , aggressive = aggressive )
if shortened is not None and shortened != source :
yield shortened
if experimental :
for shortened in _shorten_line_at_tokens_new ( tokens = tokens , source = source , indentation = indentation , max_line_length = max_line_length ) :
yield shortened
|
def from_dict ( cls , d , alternative_paths = { } , datasets = None , pwd = None , ignore_keys = [ 'attrs' , 'plotter' , 'ds' ] , only = None , chname = { } , ** kwargs ) :
"""Create a list from the dictionary returned by : meth : ` array _ info `
This classmethod creates an : class : ` ~ psyplot . data . ArrayList ` instance
from a dictionary containing filename , dimension infos and array names
Parameters
d : dict
The dictionary holding the data
alternative _ paths : dict or list or str
A mapping from original filenames as used in ` d ` to filenames that
shall be used instead . If ` alternative _ paths ` is not None ,
datasets must be None . Paths must be accessible from the current
working directory .
If ` alternative _ paths ` is a list ( or any other iterable ) is
provided , the file names will be replaced as they appear in ` d `
( note that this is very unsafe if ` d ` is not and OrderedDict )
datasets : dict or list or None
A mapping from original filenames in ` d ` to the instances of
: class : ` xarray . Dataset ` to use . If it is an iterable , the same
holds as for the ` alternative _ paths ` parameter
pwd : str
Path to the working directory from where the data can be imported .
If None , use the current working directory .
ignore _ keys : list of str
Keys specified in this list are ignored and not seen as array
information ( note that ` ` attrs ` ` are used anyway )
only : string , list or callable
Can be one of the following three things :
- a string that represents a pattern to match the array names
that shall be included
- a list of array names to include
- a callable with two arguments , a string and a dict such as
. . code - block : : python
def filter _ func ( arr _ name : str , info : dict ) : - > bool
Filter the array names
This function should return True if the array shall be
included , else False
Parameters
arr _ name : str
The array name ( i . e . the ` ` arr _ name ` ` attribute )
info : dict
The dictionary with the array informations . Common
keys are ` ` ' name ' ` ` that points to the variable name
and ` ` ' dims ' ` ` that points to the dimensions and
` ` ' fname ' ` ` that points to the file name
return True or False
The function should return ` ` True ` ` if the array shall be
included , else ` ` False ` ` . This function will also be given to
subsequents instances of : class : ` InteractiveList ` objects that
are contained in the returned value
chname : dict
A mapping from variable names in the project to variable names
that should be used instead
Other Parameters
` ` * * kwargs ` `
Any other parameter from the ` psyplot . data . open _ dataset ` function
% ( open _ dataset . parameters ) s
Returns
psyplot . data . ArrayList
The list with the interactive objects
See Also
from _ dataset , array _ info"""
|
pwd = pwd or getcwd ( )
if only is None :
def only_filter ( arr_name , info ) :
return True
elif callable ( only ) :
only_filter = only
elif isstring ( only ) :
def only_filter ( arr_name , info ) :
return patt . search ( arr_name ) is not None
patt = re . compile ( only )
only = None
else :
def only_filter ( arr_name , info ) :
return arr_name in save_only
save_only = only
only = None
def get_fname_use ( fname ) :
squeeze = isstring ( fname )
fname = safe_list ( fname )
ret = tuple ( f if utils . is_remote_url ( f ) or osp . isabs ( f ) else osp . join ( pwd , f ) for f in fname )
return ret [ 0 ] if squeeze else ret
def get_name ( name ) :
if not isstring ( name ) :
return list ( map ( get_name , name ) )
else :
return chname . get ( name , name )
if not isinstance ( alternative_paths , dict ) :
it = iter ( alternative_paths )
alternative_paths = defaultdict ( partial ( next , it , None ) )
# first open all datasets if not already done
if datasets is None :
replace_concat_dim = 'concat_dim' not in kwargs
names_and_stores = cls . _get_dsnames ( d , concat_dim = True )
datasets = { }
for fname , ( store_mod , store_cls ) , concat_dim in names_and_stores :
fname_use = fname
got = True
if replace_concat_dim and concat_dim is not None :
kwargs [ 'concat_dim' ] = concat_dim
elif replace_concat_dim and concat_dim is None :
kwargs . pop ( 'concat_dim' , None )
try :
fname_use = alternative_paths [ fname ]
except KeyError :
got = False
if not got or not fname_use :
if fname is not None :
fname_use = get_fname_use ( fname )
if fname_use is not None :
datasets [ fname ] = _open_ds_from_store ( fname_use , store_mod , store_cls , ** kwargs )
if alternative_paths is not None :
for fname in set ( alternative_paths ) . difference ( datasets ) :
datasets [ fname ] = _open_ds_from_store ( fname , ** kwargs )
elif not isinstance ( datasets , dict ) :
it_datasets = iter ( datasets )
datasets = defaultdict ( partial ( next , it_datasets , None ) )
arrays = [ 0 ] * len ( d )
i = 0
for arr_name , info in six . iteritems ( d ) :
if arr_name in ignore_keys or not only_filter ( arr_name , info ) :
arrays . pop ( i )
continue
if not { 'fname' , 'ds' , 'arr' } . intersection ( info ) : # the described object is an InteractiveList
arr = InteractiveList . from_dict ( info , alternative_paths = alternative_paths , datasets = datasets , chname = chname )
if not arr :
warn ( "Skipping empty list %s!" % arr_name )
arrays . pop ( i )
continue
else :
if 'arr' in info :
arr = info . pop ( 'arr' )
elif 'ds' in info :
arr = cls . from_dataset ( info [ 'ds' ] , dims = info [ 'dims' ] , name = get_name ( info [ 'name' ] ) ) [ 0 ]
else :
fname = info [ 'fname' ]
if fname is None :
warn ( "Could not open array %s because no filename was " "specified!" % arr_name )
arrays . pop ( i )
continue
try : # in case , datasets is a defaultdict
datasets [ fname ]
except KeyError :
pass
if fname not in datasets :
warn ( "Could not open array %s because %s was not in " "the list of datasets!" % ( arr_name , fname ) )
arrays . pop ( i )
continue
arr = cls . from_dataset ( datasets [ fname ] , dims = info [ 'dims' ] , name = get_name ( info [ 'name' ] ) ) [ 0 ]
for key , val in six . iteritems ( info . get ( 'attrs' , { } ) ) :
arr . attrs . setdefault ( key , val )
arr . psy . arr_name = arr_name
arrays [ i ] = arr
i += 1
return cls ( arrays , attrs = d . get ( 'attrs' , { } ) )
|
def _get_max_subplot_ids ( fig ) :
"""Given an input figure , return a dict containing the max subplot number
for each subplot type in the figure
Parameters
fig : dict
A plotly figure dict
Returns
dict
A dict from subplot type strings to integers indicating the largest
subplot number in the figure of that subplot type"""
|
max_subplot_ids = { subplot_type : 0 for subplot_type in _subplot_types }
max_subplot_ids [ 'xaxis' ] = 0
max_subplot_ids [ 'yaxis' ] = 0
for trace in fig . get ( 'data' , [ ] ) :
trace_type = trace . get ( 'type' , 'scatter' )
subplot_types = _trace_to_subplot . get ( trace_type , [ ] )
for subplot_type in subplot_types :
subplot_prop_name = _get_subplot_prop_name ( subplot_type )
subplot_val_prefix = _get_subplot_val_prefix ( subplot_type )
subplot_val = trace . get ( subplot_prop_name , subplot_val_prefix )
# extract trailing number ( if any )
subplot_number = _get_subplot_number ( subplot_val )
max_subplot_ids [ subplot_type ] = max ( max_subplot_ids [ subplot_type ] , subplot_number )
return max_subplot_ids
|
def get_mean_and_stddevs ( self , sites , rup , dists , imt , stddev_types ) :
"""See : meth : ` superclass method
< . base . GroundShakingIntensityModel . get _ mean _ and _ stddevs > `
for spec of input and result values ."""
|
# extract dictionaries of coefficients specific to required
# intensity measure type
C = self . COEFFS [ imt ]
mean = self . _compute_mean ( C , rup , dists , sites , imt )
stddevs = self . _get_stddevs ( C , stddev_types , sites . vs30 . shape [ 0 ] )
return mean , stddevs
|
def decode ( cls , command_str ) :
"""Decode a string encoded command back into a Command object .
Args :
command _ str ( str ) : The encoded command string output from a
previous call to encode .
Returns :
Command : The decoded Command object ."""
|
name , _ , arg = command_str . partition ( " " )
args = [ ]
if len ( arg ) > 0 :
if arg [ 0 ] != '{' or arg [ - 1 ] != '}' :
raise DataError ( "Invalid command, argument is not contained in { and }" , arg = arg , cmd = name )
arg = arg [ 1 : - 1 ]
args = arg . split ( "," )
proc = [ ]
for arg in args :
if arg . startswith ( "hex:" ) :
arg = unhexlify ( arg [ 4 : ] ) . decode ( 'utf-8' )
proc . append ( arg )
return Command ( name , proc )
|
def paragraph ( node ) :
"""Process a paragraph , which includes all content under it"""
|
text = ''
if node . string_content is not None :
text = node . string_content
o = nodes . paragraph ( '' , ' ' . join ( text ) )
o . line = node . sourcepos [ 0 ] [ 0 ]
for n in MarkDown ( node ) :
o . append ( n )
return o
|
def get_plugins ( modules , classes ) :
"""Find all given ( sub - ) classes in all modules .
@ param modules : the modules to search
@ ptype modules : iterator of modules
@ return : found classes
@ rytpe : iterator of class objects"""
|
for module in modules :
for plugin in get_module_plugins ( module , classes ) :
yield plugin
|
def visit_FormattedValue ( self , node ) :
"""FormattedValue ( expr value , int ? conversion , expr ? format _ spec )"""
|
self . result += '{'
self . visit ( node . value )
self . result += { - 1 : '' , # no formatting
97 : '!a' , # ascii formatting
114 : '!r' , # repr formatting
115 : '!s' , # string formatting
} [ node . conversion ]
if node . format_spec :
self . result += ':'
self . visit_joined_str ( node . format_spec )
self . result += '}'
|
def _hijack_target ( self ) :
"""Replaces the target method on the target object with the proxy method ."""
|
if self . _target . is_class_or_module ( ) :
setattr ( self . _target . obj , self . _method_name , self )
elif self . _attr . kind == 'property' :
proxy_property = ProxyProperty ( double_name ( self . _method_name ) , self . _original_method , )
setattr ( self . _target . obj . __class__ , self . _method_name , proxy_property )
self . _target . obj . __dict__ [ double_name ( self . _method_name ) ] = self
else :
self . _target . obj . __dict__ [ self . _method_name ] = self
if self . _method_name in [ '__call__' , '__enter__' , '__exit__' ] :
self . _target . hijack_attr ( self . _method_name )
|
def OnToggle ( self , event ) :
"""Toggle button event handler"""
|
if self . selection_toggle_button . GetValue ( ) :
self . entry_line . last_selection = self . entry_line . GetSelection ( )
self . entry_line . last_selection_string = self . entry_line . GetStringSelection ( )
self . entry_line . last_table = self . main_window . grid . current_table
self . entry_line . Disable ( )
post_command_event ( self , self . EnterSelectionModeMsg )
else :
self . entry_line . Enable ( )
post_command_event ( self , self . GridActionTableSwitchMsg , newtable = self . entry_line . last_table )
post_command_event ( self , self . ExitSelectionModeMsg )
|
def autogen_argparse_block ( extra_args = [ ] ) :
"""SHOULD TURN ANY REGISTERED ARGS INTO A A NEW PARSING CONFIG
FILE FOR BETTER - - help COMMANDS
import utool as ut
_ _ REGISTERED _ ARGS _ _ = ut . util _ arg . _ _ REGISTERED _ ARGS _ _
Args :
extra _ args ( list ) : ( default = [ ] )
CommandLine :
python - m utool . util _ arg - - test - autogen _ argparse _ block
Example :
> > > # DISABLE _ DOCTEST
> > > import utool as ut
> > > extra _ args = [ ]
> > > result = ut . autogen _ argparse _ block ( extra _ args )
> > > print ( result )"""
|
# import utool as ut # NOQA
# _ _ REGISTERED _ ARGS _ _
# TODO FINISHME
grouped_args = [ ]
# Group similar a args
for argtup in __REGISTERED_ARGS__ :
argstr_list , type_ , default , help_ = argtup
argstr_set = set ( argstr_list )
# < MULTIKEY _ SETATTR >
# hack in multikey setattr n * * 2 yuck
found = False
for index , ( keyset , vals ) in enumerate ( grouped_args ) :
if len ( keyset . intersection ( argstr_set ) ) > 0 : # update
keyset . update ( argstr_set )
vals . append ( argtup )
found = True
break
if not found :
new_keyset = argstr_set
new_vals = [ argtup ]
grouped_args . append ( ( new_keyset , new_vals ) )
# < / MULTIKEY _ SETATTR >
# DEBUG
multi_groups = [ ]
for keyset , vals in grouped_args :
if len ( vals ) > 1 :
multi_groups . append ( vals )
if len ( multi_groups ) > 0 :
import utool as ut
print ( 'Following arg was specified multiple times' )
print ( ut . repr4 ( multi_groups , newlines = 2 ) )
|
def convert ( self , plugin = None ) :
"""Return a : class : ` PluginBlockType ` for the given plugin name .
If plugin is ` ` None ` ` , return the first registered plugin ."""
|
if plugin :
plugin = kurt . plugin . Kurt . get_plugin ( plugin )
if plugin . name in self . _plugins :
return self . _plugins [ plugin . name ]
else :
err = BlockNotSupported ( "%s doesn't have %r" % ( plugin . display_name , self ) )
err . block_type = self
raise err
else :
return self . conversions [ 0 ]
|
def _fetch_url_data ( self , url , username , password , verify , custom_headers ) :
'''Hit a given http url and return the stats lines'''
|
# Try to fetch data from the stats URL
auth = ( username , password )
url = "%s%s" % ( url , STATS_URL )
custom_headers . update ( headers ( self . agentConfig ) )
self . log . debug ( "Fetching haproxy stats from url: %s" % url )
response = requests . get ( url , auth = auth , headers = custom_headers , verify = verify , timeout = self . default_integration_http_timeout )
response . raise_for_status ( )
# it only needs additional decoding in py3 , so skip it if it ' s py2
if PY2 :
return response . content . splitlines ( )
else :
content = response . content
# If the content is a string , it can ' t be decoded again
# But if it ' s bytes , it can be decoded .
# So , check if it has the decode method
decode_fn = getattr ( content , "decode" , None )
if callable ( decode_fn ) :
content = content . decode ( 'utf-8' )
return content . splitlines ( )
|
def _write_build_file ( self ) :
"""Write Maven build file ( pom . xml )"""
|
self . write ( destination = self . _base_output_directory , filename = "pom.xml" , template_name = "pom.xml.tpl" , version = self . api_version , product_accronym = self . _product_accronym , class_prefix = self . _class_prefix , root_api = self . api_root , api_prefix = self . api_prefix , product_name = self . _product_name , name = self . _name , header = self . header_content , version_string = self . _api_version_string , package_prefix = self . _package_prefix , library_version = self . library_version )
|
def rabin_karp_factor ( s , t , k ) :
"""Find a common factor by Rabin - Karp
: param string s : haystack
: param string t : needle
: param int k : factor length
: returns : ( i , j ) such that s [ i : i + k ] = = t [ j : j + k ] or None .
In case of tie , lexicographical minimum ( i , j ) is returned
: complexity : O ( len ( s ) + len ( t ) ) in expected time ,
and O ( len ( s ) + len ( t ) * k ) in worst case"""
|
last_pos = pow ( DOMAIN , k - 1 ) % PRIME
pos = { }
assert k > 0
if len ( s ) < k or len ( t ) < k :
return None
hash_t = 0
for j in range ( k ) : # store hashing values
hash_t = ( DOMAIN * hash_t + ord ( t [ j ] ) ) % PRIME
for j in range ( len ( t ) - k + 1 ) :
if hash_t in pos :
pos [ hash_t ] . append ( j )
else :
pos [ hash_t ] = [ j ]
if j < len ( t ) - k :
hash_t = roll_hash ( hash_t , ord ( t [ j ] ) , ord ( t [ j + k ] ) , last_pos )
hash_s = 0
for i in range ( k ) : # preprocessing
hash_s = ( DOMAIN * hash_s + ord ( s [ i ] ) ) % PRIME
for i in range ( len ( s ) - k + 1 ) :
if hash_s in pos : # is this signature in s ?
for j in pos [ hash_s ] :
if matches ( s , t , i , j , k ) :
return ( i , j )
if i < len ( s ) - k :
hash_s = roll_hash ( hash_s , ord ( s [ i ] ) , ord ( s [ i + k ] ) , last_pos )
return None
|
def shutdown ( self , container , instances = None , map_name = None , ** kwargs ) :
"""Shut down container instances from a container configuration . Typically this means stopping and removing
containers . Note that not all policy classes necessarily implement this method .
: param container : Container name .
: type container : unicode | str
: param instances : Instance names to remove . If not specified , will remove all instances as specified in the
configuration ( or just one default instance ) .
: type instances : collections . Iterable [ unicode | str | NoneType ]
: param map _ name : Container map name . Optional - if not provided the default map is used .
: type map _ name : unicode | str
: param kwargs : Additional kwargs . Only options controlling policy behavior are considered .
: return : Return values of removed containers .
: rtype : list [ dockermap . map . runner . ActionOutput ]"""
|
return self . run_actions ( 'shutdown' , container , instances = instances , map_name = map_name , ** kwargs )
|
def GetWSAActionFault ( operation , name ) :
"""Find wsa : Action attribute , and return value or WSA . FAULT
for the default ."""
|
attr = operation . faults [ name ] . action
if attr is not None :
return attr
return WSA . FAULT
|
def install_cygwin ( name , install_args = None , override_args = False ) :
'''Instructs Chocolatey to install a package via Cygwin .
name
The name of the package to be installed . Only accepts a single argument .
install _ args
A list of install arguments you want to pass to the installation process
i . e product key or feature list
override _ args
Set to true if you want to override the original install arguments ( for
the native installer ) in the package and use your own . When this is set
to False install _ args will be appended to the end of the default
arguments
CLI Example :
. . code - block : : bash
salt ' * ' chocolatey . install _ cygwin < package name >
salt ' * ' chocolatey . install _ cygwin < package name > install _ args = < args > override _ args = True'''
|
return install ( name , source = 'cygwin' , install_args = install_args , override_args = override_args )
|
def t_INDENTIFIER ( t ) :
r'( \ $ ? [ _ a - zA - Z ] [ _ a - zA - Z0-9 ] * ) | ( _ _ [ A - Z _ ] + _ _ )'
|
if t . value in reserved :
t . type = t . value . upper ( )
if t . value in reservedMap :
t . value = reservedMap [ t . value ]
elif t . value in strStatment :
t . type = 'STATEMENT'
return t
|
def count_relations ( graph ) -> Counter :
"""Return a histogram over all relationships in a graph .
: param pybel . BELGraph graph : A BEL graph
: return : A Counter from { relation type : frequency }"""
|
return Counter ( data [ RELATION ] for _ , _ , data in graph . edges ( data = True ) )
|
def _evaluate ( self , R , phi = 0. , t = 0. ) :
"""NAME :
_ evaluate
PURPOSE :
evaluate the potential at R , phi , t
INPUT :
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT :
Phi ( R , phi , t )
HISTORY :
2011-10-19 - Started - Bovy ( IAS )"""
|
# Calculate relevant time
if not self . _tform is None :
if t < self . _tform :
smooth = 0.
elif t < self . _tsteady :
deltat = t - self . _tform
xi = 2. * deltat / ( self . _tsteady - self . _tform ) - 1.
smooth = ( 3. / 16. * xi ** 5. - 5. / 8 * xi ** 3. + 15. / 16. * xi + .5 )
else : # fully on
smooth = 1.
else :
smooth = 1.
return smooth * self . _twophio / 2. * R ** self . _p * m . cos ( 2. * ( phi - self . _phib ) )
|
def render_scene ( self ) :
"render scene one time"
|
self . init_gl ( )
# should be a no - op after the first frame is rendered
glfw . make_context_current ( self . window )
self . renderer . render_scene ( )
# Done rendering
# glfw . swap _ buffers ( self . window ) # avoid double buffering to avoid stalling
glFlush ( )
# single buffering
glfw . poll_events ( )
|
def split_name ( name ) :
"""Extracts pieces of name from full name string .
Full name can have one of these formats :
< NAME _ TEXT > |
/ < NAME _ TEXT > / |
< NAME _ TEXT > / < NAME _ TEXT > / |
/ < NAME _ TEXT > / < NAME _ TEXT > |
< NAME _ TEXT > / < NAME _ TEXT > / < NAME _ TEXT >
< NAME _ TEXT > can include almost anything excluding commas , numbers ,
special characters ( though some test files use numbers for the names ) .
Text between slashes is considered a surname , outside slashes - given
name .
This method splits full name into pieces at slashes , e . g . :
" First / Last / " - > ( " First " , " Last " , " " )
" / Last / First " - > ( " " , " Last " , " First " )
" First / Last / Jr . " - > ( " First " , " Last " , " Jr . " )
" First Jr . " - > ( " First Jr . " , " " , " " )
: param str name : Full name string .
: return : 2 - tuple ` ( given1 , surname , given2 ) ` , ` surname ` or ` given ` will
be empty strings if they are not present in full string ."""
|
given1 , _ , rem = name . partition ( "/" )
surname , _ , given2 = rem . partition ( "/" )
return given1 . strip ( ) , surname . strip ( ) , given2 . strip ( )
|
def reset_script ( self ) :
"""Clear any partially received script ."""
|
self . remote_bridge . status = BRIDGE_STATUS . IDLE
self . remote_bridge . error = 0
self . remote_bridge . parsed_script = None
self . _device . script = bytearray ( )
return [ 0 ]
|
def immediate_postdominators ( self , end , target_graph = None ) :
"""Get all immediate postdominators of sub graph from given node upwards .
: param str start : id of the node to navigate forwards from .
: param networkx . classes . digraph . DiGraph target _ graph : graph to analyse , default is self . graph .
: return : each node of graph as index values , with element as respective node ' s immediate dominator .
: rtype : dict"""
|
return self . _immediate_dominators ( end , target_graph = target_graph , reverse_graph = True )
|
def compute_total ( self , precision = None ) :
'''Gets the total of the invoice with a defined decimal precision
@ param precision : int Number of decimal places
@ return : Decimal'''
|
return quantize ( ( self . compute_gross ( precision ) + self . compute_taxes ( precision ) - self . compute_discounts ( precision ) ) , places = precision )
|
def factory ( container , name = None ) :
"""A decorator to register a factory on the container .
For more information see : meth : ` Container . add _ factory ` ."""
|
def register ( factory ) :
container . add_factory ( factory , name )
return factory
return register
|
def _is_valid_inherit_element ( self , element ) :
"""Check that the children of element can be manipulated to apply the CSS
properties .
: param element : The element .
: type element : hatemile . util . html . htmldomelement . HTMLDOMElement
: return : True if the children of element can be manipulated to apply
the CSS properties or False if the children of element cannot
be manipulated to apply the CSS properties .
: rtype : bool"""
|
# pylint : disable = no - self - use
tag_name = element . get_tag_name ( )
return ( ( tag_name in AccessibleCSSImplementation . VALID_INHERIT_TAGS ) and ( not element . has_attribute ( CommonFunctions . DATA_IGNORE ) ) )
|
def refresh ( self ) :
"""Updates the current line decoration"""
|
if self . enabled and self . line :
self . _clear_deco ( )
brush = QtGui . QBrush ( self . _color )
self . _decoration = TextDecoration ( self . editor . textCursor ( ) , start_line = self . line )
self . _decoration . set_background ( brush )
self . _decoration . set_full_width ( )
self . _decoration . draw_order = 255
self . editor . decorations . append ( self . _decoration )
|
def find_revision_number ( self , revision = None ) :
"""Find the local revision number of the given revision ."""
|
# Make sure the local repository exists .
self . create ( )
# Try to find the revision number of the specified revision .
revision = revision or self . default_revision
output = self . context . capture ( 'hg' , 'id' , '--rev=%s' % revision , '--num' ) . rstrip ( '+' )
# Validate the ` hg id - - num ' output .
if not output . isdigit ( ) :
msg = "Failed to find local revision number! ('hg id --num' gave unexpected output)"
raise EnvironmentError ( msg )
return int ( output )
|
def num_pores ( self , labels = 'all' , mode = 'or' ) :
r"""Returns the number of pores of the specified labels
Parameters
labels : list of strings , optional
The pore labels that should be included in the count .
If not supplied , all pores are counted .
labels : list of strings
Label of pores to be returned
mode : string , optional
Specifies how the count should be performed . The options are :
* * ' or ' , ' union ' , ' any ' * * : ( default ) Pores with * one or more * of
the given labels are counted .
* * ' and ' , ' intersection ' , ' all ' * * : Pores with * all * of the given
labels are counted .
* * ' xor ' , ' exclusive _ or ' * * : Pores with * only one * of the given
labels are counted .
* * ' nor ' , ' none ' , ' not ' * * : Pores with * none * of the given labels
are counted .
* * ' nand ' * * : Pores with * some but not all * of the given labels are
counted .
* * ' xnor ' * * : Pores with * more than one * of the given labels are
counted .
Returns
Np : int
Number of pores with the specified labels
See Also
num _ throats
count
Notes
Technically , * ' nand ' * and * ' xnor ' * should also count pores with * none *
of the labels , however , to make the count more useful these are not
included .
Examples
> > > import openpnm as op
> > > pn = op . network . Cubic ( shape = [ 5 , 5 , 5 ] )
> > > pn . num _ pores ( )
125
> > > pn . num _ pores ( labels = [ ' top ' ] )
25
> > > pn . num _ pores ( labels = [ ' top ' , ' front ' ] , mode = ' or ' )
45
> > > pn . num _ pores ( labels = [ ' top ' , ' front ' ] , mode = ' xnor ' )"""
|
# Count number of pores of specified type
Ps = self . _get_indices ( labels = labels , mode = mode , element = 'pore' )
Np = sp . shape ( Ps ) [ 0 ]
return Np
|
def to_bitstream ( self ) :
'''Create bitstream from properties'''
|
# Verify that properties make sense
self . sanitize ( )
# Start with the TTL
bitstream = BitArray ( 'uint:32=%d' % self . ttl )
# Add the locator count
bitstream += BitArray ( 'uint:8=%d' % len ( self . locator_records ) )
# Add the EID prefix mask length
bitstream += BitArray ( 'uint:8=%d' % self . eid_prefix . prefixlen )
# Add the NMR action
bitstream += BitArray ( 'uint:3=%d' % self . action )
# Add the authoritative flag
bitstream += BitArray ( 'bool=%d' % self . authoritative )
# Add reserved bits
bitstream += self . _reserved1
# Add the map version
bitstream += BitArray ( 'uint:12=%d' % self . map_version )
# Add the EID prefix
bitstream += get_bitstream_for_afi_address ( self . eid_prefix )
# Add the locator records
for locator_record in self . locator_records :
bitstream += locator_record . to_bitstream ( )
return bitstream
|
def _islinklike ( dir_path ) :
'''Parameters
dir _ path : str
Directory path .
Returns
bool
` ` True ` ` if : data : ` dir _ path ` is a link * or * junction .'''
|
dir_path = ph . path ( dir_path )
if platform . system ( ) == 'Windows' :
if dir_path . isjunction ( ) :
return True
elif dir_path . islink ( ) :
return True
return False
|
def copy ( self , target_parent , name = None , include_children = True , include_instances = True ) :
"""Copy the ` Part ` to target parent , both of them having the same category .
. . versionadded : : 2.3
: param target _ parent : ` Part ` object under which the desired ` Part ` is copied
: type target _ parent : : class : ` Part `
: param name : how the copied top - level ` Part ` should be called
: type name : basestring
: param include _ children : True to copy also the descendants of ` Part ` .
: type include _ children : bool
: param include _ instances : True to copy also the instances of ` Part ` to ALL the instances of target _ parent .
: type include _ instances : bool
: returns : copied : class : ` Part ` model .
: raises IllegalArgumentError : if part and target _ parent have different ` Category `
: raises IllegalArgumentError : if part and target _ parent are identical
Example
> > > model _ to _ copy = client . model ( name = ' Model to be copied ' )
> > > bike = client . model ( ' Bike ' )
> > > model _ to _ copy . copy ( target _ parent = bike , name = ' Copied model ' ,
> > > include _ children = True ,
> > > include _ instances = True )"""
|
if self . category == Category . MODEL and target_parent . category == Category . MODEL : # Cannot add a model under an instance or vice versa
copied_model = relocate_model ( part = self , target_parent = target_parent , name = name , include_children = include_children )
if include_instances :
instances_to_be_copied = list ( self . instances ( ) )
parent_instances = list ( target_parent . instances ( ) )
for parent_instance in parent_instances :
for instance in instances_to_be_copied :
instance . populate_descendants ( )
move_part_instance ( part_instance = instance , target_parent = parent_instance , part_model = self , name = instance . name , include_children = include_children )
return copied_model
elif self . category == Category . INSTANCE and target_parent . category == Category . INSTANCE :
copied_instance = relocate_instance ( part = self , target_parent = target_parent , name = name , include_children = include_children )
return copied_instance
else :
raise IllegalArgumentError ( 'part "{}" and target parent "{}" must have the same category' )
|
def paired_environment_phenotype_grid ( environment , phenotypes , ** kwargs ) :
"""Plots the given environment ( EnvironmentFile object ) and phenotypes
(2d array of numbers or binary strings ) onto the same image and saves
the image based on the name of the environment file . The environment file
will be represented by coloring square cells , while the phenotypes are
circles overlaid on top .
By default , color is determined using the palettes in the EnvironmentFile
object passed as the first parameter . The easiest way to change color
palettes is to assign new palettes to environment . task _ palette and
environment . resource _ palette before calling this function . If either the
environment or phenotypes grids contain integers greater than 1 , you should
pass a ` denom ` keyword argument indicating how to normalize them . Using
differnet denoms for the environment and phenotypes is not currently
supported ( if you need to , you should probably just divide everything by
the appropraite denoms before passing them to this funciton ) .
Inputs :
environment - an EnvironmentFile object indicatng the distribution
of resources and the appropriate palettes to use .
phenotypes - a 2d array of numbers or binary strings representing
the placement of phenotypes across the environment
kwargs :
denom - an integer indicating how to normalize numbers in the
environment and phenotype grids if neccesary ."""
|
denom , palette = get_kwargs ( environment , kwargs )
print ( "plot world" )
plot_world ( environment , palette = environment . resource_palette , denom = denom )
print ( "plot phens" )
plot_phens ( phenotypes , palette = environment . task_palette , denom = denom )
print ( "save" )
plt . savefig ( "phenotype_niches_" + environment . name + ".png" , dpi = 1000 )
print ( "done" )
|
def items ( self ) :
"""Return a copied list of the property names and values
of this CIM instance .
Each item in the returned list is a tuple of property name ( in the
original lexical case ) and property value .
The order of properties is preserved ."""
|
return [ ( key , v . value ) for key , v in self . properties . items ( ) ]
|
def getPixmap ( self , matrix = None , colorspace = None , alpha = 0 , clip = None ) :
"""getPixmap ( self , matrix = None , colorspace = None , alpha = 0 , clip = None ) - > Pixmap"""
|
return _fitz . DisplayList_getPixmap ( self , matrix , colorspace , alpha , clip )
|
def data_received ( self , data ) :
"""Used to signal ` asyncio . Protocol ` of incoming data ."""
|
if self . _on_data :
self . _on_data ( data )
return
self . _queued_data . append ( data )
|
def predict_quantiles ( self , X , quantiles = ( 2.5 , 97.5 ) , Y_metadata = None , likelihood = None , kern = None ) :
"""Get the predictive quantiles around the prediction at X
: param X : The points at which to make a prediction
: type X : np . ndarray ( Xnew x self . input _ dim )
: param quantiles : tuple of quantiles , default is ( 2.5 , 97.5 ) which is the 95 % interval
: type quantiles : tuple
: returns : list of quantiles for each X and predictive quantiles for interval combination
: rtype : [ np . ndarray ( Xnew x self . input _ dim ) , np . ndarray ( Xnew x self . input _ dim ) ]"""
|
qs = super ( WarpedGP , self ) . predict_quantiles ( X , quantiles , Y_metadata = Y_metadata , likelihood = likelihood , kern = kern )
if self . predict_in_warped_space :
return [ self . warping_function . f_inv ( q ) for q in qs ]
return qs
|
def create_modelo ( self ) :
"""Get an instance of modelo services facade ."""
|
return Modelo ( self . networkapi_url , self . user , self . password , self . user_ldap )
|
def update_floatingip ( floatingip_id , port = None , profile = None ) :
'''Updates a floatingIP
CLI Example :
. . code - block : : bash
salt ' * ' neutron . update _ floatingip network - name port - name
: param floatingip _ id : ID of floatingIP
: param port : ID or name of port , to associate floatingip to ` None ` or do
not specify to disassociate the floatingip ( Optional )
: param profile : Profile to build on ( Optional )
: return : Value of updated floating IP information'''
|
conn = _auth ( profile )
return conn . update_floatingip ( floatingip_id , port )
|
def _parse_value ( self , raw ) :
"""Parses value
: param raw : raw value
: return : Parsed value"""
|
try :
if not raw . startswith ( "0" ) :
val = float ( raw )
if ( val % 1 ) == 0 : # integer
val = int ( raw )
return str ( val )
return self . num_format . format ( val )
else :
raise ValueError ( "Cannot parse int!" )
except :
return str ( raw )
|
def check_status ( zap_helper , timeout ) :
"""Check if ZAP is running and able to receive API calls .
You can provide a timeout option which is the amount of time in seconds
the command should wait for ZAP to start if it is not currently running .
This is useful to run before calling other commands if ZAP was started
outside of zap - cli . For example :
zap - cli status - t 60 & & zap - cli open - url " http : / / 127.0.0.1 / "
Exits with code 1 if ZAP is either not running or the command timed out
waiting for ZAP to start ."""
|
with helpers . zap_error_handler ( ) :
if zap_helper . is_running ( ) :
console . info ( 'ZAP is running' )
elif timeout is not None :
zap_helper . wait_for_zap ( timeout )
console . info ( 'ZAP is running' )
else :
console . error ( 'ZAP is not running' )
sys . exit ( 2 )
|
def add_site ( self , site_name , location_name = None , er_data = None , pmag_data = None ) :
"""Create a Site object and add it to self . sites .
If a location name is provided , add the site to location . sites as well ."""
|
if location_name :
location = self . find_by_name ( location_name , self . locations )
if not location :
location = self . add_location ( location_name )
else :
location = None
# # check all declinations / azimuths / longitudes in range 0 = > 360.
# for key , value in er _ data . items ( ) :
# er _ data [ key ] = pmag . adjust _ to _ 360 ( value , key )
new_site = Site ( site_name , location , self . data_model , er_data , pmag_data )
self . sites . append ( new_site )
if location :
location . sites . append ( new_site )
return new_site
|
def _get_available_letters ( field_name , queryset ) :
"""Makes a query to the database to return the first character of each
value of the field and table passed in .
Returns a set that represents the letters that exist in the database ."""
|
if django . VERSION [ 1 ] <= 4 :
result = queryset . values ( field_name ) . annotate ( fl = FirstLetter ( field_name ) ) . values ( 'fl' ) . distinct ( )
return set ( [ res [ 'fl' ] for res in result if res [ 'fl' ] is not None ] )
else :
from django . db import connection
qn = connection . ops . quote_name
db_table = queryset . model . _meta . db_table
sql = "SELECT DISTINCT UPPER(SUBSTR(%s, 1, 1)) as letter FROM %s" % ( qn ( field_name ) , qn ( db_table ) )
cursor = connection . cursor ( )
cursor . execute ( sql )
rows = cursor . fetchall ( ) or ( )
return set ( [ row [ 0 ] for row in rows if row [ 0 ] is not None ] )
|
def rebuild_schema ( doc , r , df ) :
"""Rebuild the schema for a resource based on a dataframe"""
|
import numpy as np
# Re - get the resource in the doc , since it may be different .
try :
r = doc . resource ( r . name )
except AttributeError : # Maybe r is actually a resource name
r = doc . resource ( r )
def alt_col_name ( name , i ) :
import re
if not name :
return 'col{}' . format ( i )
return re . sub ( '_+' , '_' , re . sub ( '[^\w_]' , '_' , str ( name ) ) . lower ( ) ) . rstrip ( '_' )
df_types = { np . dtype ( 'O' ) : 'text' , np . dtype ( 'int64' ) : 'integer' , np . dtype ( 'float64' ) : 'number' }
try :
df_index_frame = df . index . to_frame ( )
except AttributeError :
df_index_frame = None
def get_col_dtype ( c ) :
c = str ( c )
try :
return df_types [ df [ c ] . dtype ]
except KeyError : # Maybe it is in the index ?
pass
try :
return df_types [ df_index_frame [ c ] . dtype ]
except TypeError : # Maybe not a multi - index
pass
if c == 'id' or c == df . index . name :
return df_types [ df . index . dtype ]
return 'unknown'
columns = [ ]
schema_term = r . schema_term [ 0 ]
if schema_term :
old_cols = { c [ 'name' ] . value : c . properties for c in schema_term . children }
for c in schema_term . children :
schema_term . remove_child ( c )
schema_term . children = [ ]
else :
old_cols = { }
schema_term = doc [ 'Schema' ] . new_term ( 'Table' , r . schema_name )
index_names = [ n if n else "id" for n in df . index . names ]
for i , col in enumerate ( index_names + list ( df . columns ) ) :
acn = alt_col_name ( col , i ) if alt_col_name ( col , i ) != str ( col ) else ''
d = { 'name' : col , 'datatype' : get_col_dtype ( col ) , 'altname' : acn }
if col in old_cols . keys ( ) :
lookup_name = col
elif acn in old_cols . keys ( ) :
lookup_name = acn
else :
lookup_name = None
if lookup_name and lookup_name in old_cols :
for k , v in schema_term . properties . items ( ) :
old_col = old_cols . get ( lookup_name )
for k , v in old_col . items ( ) :
if k != 'name' and v :
d [ k ] = v
columns . append ( d )
for c in columns :
name = c [ 'name' ]
del c [ 'name' ]
datatype = c [ 'datatype' ]
del c [ 'datatype' ]
altname = c [ 'altname' ]
del c [ 'altname' ]
schema_term . new_child ( 'Column' , name , datatype = datatype , altname = altname , ** c )
|
def convert_list ( self , list_input ) :
"""Iterate over the JSON list and process it
to generate either an HTML table or a HTML list , depending on what ' s inside .
If suppose some key has array of objects and all the keys are same ,
instead of creating a new row for each such entry ,
club such values , thus it makes more sense and more readable table .
@ example :
jsonObject = {
" sampleData " : [
{ " a " : 1 , " b " : 2 , " c " : 3 } ,
{ " a " : 5 , " b " : 6 , " c " : 7}
OUTPUT :
_ _ _ _ _
| | a | c | b |
| sampleData | - - - | - - - | - - - |
| | 1 | 3 | 2 |
| | 5 | 7 | 6 |
@ contributed by : @ muellermichel"""
|
if not list_input :
return ""
converted_output = ""
column_headers = None
if self . clubbing :
column_headers = self . column_headers_from_list_of_dicts ( list_input )
if column_headers is not None :
converted_output += self . table_init_markup
converted_output += '<thead>'
converted_output += '<tr><th>' + '</th><th>' . join ( column_headers ) + '</th></tr>'
converted_output += '</thead>'
converted_output += '<tbody>'
for list_entry in list_input :
converted_output += '<tr><td>'
converted_output += '</td><td>' . join ( [ self . convert_json_node ( list_entry [ column_header ] ) for column_header in column_headers ] )
converted_output += '</td></tr>'
converted_output += '</tbody>'
converted_output += '</table>'
return converted_output
# so you don ' t want or need clubbing eh ? This makes @ muellermichel very sad . . . ; (
# alright , let ' s fall back to a basic list here . . .
converted_output = '<ul><li>'
converted_output += '</li><li>' . join ( [ self . convert_json_node ( child ) for child in list_input ] )
converted_output += '</li></ul>'
return converted_output
|
def _internal_kv_get ( key ) :
"""Fetch the value of a binary key ."""
|
worker = ray . worker . get_global_worker ( )
if worker . mode == ray . worker . LOCAL_MODE :
return _local . get ( key )
return worker . redis_client . hget ( key , "value" )
|
def get_index ( self , as_dict = False , grouping_pattern = None ) :
"""Returns the index of the DataFrames in the system
Parameters
as _ dict : boolean , optional
If True , returns a 1:1 key - value matching for further processing
prior to groupby functions . Otherwise ( default ) the index
is returned as pandas index .
grouping _ pattern : dict , optional
Dictionary with keys being regex patterns matching index and
values the name for the grouping . If the index is a pandas
multiindex , the keys must be tuples of length levels in the
multiindex , with a valid regex expression at each position .
Otherwise , the keys need to be strings .
Only relevant if as _ dict is True ."""
|
possible_dataframes = [ 'A' , 'L' , 'Z' , 'Y' , 'F' , 'FY' , 'M' , 'S' , 'D_cba' , 'D_pba' , 'D_imp' , 'D_exp' , 'D_cba_reg' , 'D_pba_reg' , 'D_imp_reg' , 'D_exp_reg' , 'D_cba_cap' , 'D_pba_cap' , 'D_imp_cap' , 'D_exp_cap' , ]
for df in possible_dataframes :
if ( df in self . __dict__ ) and ( getattr ( self , df ) is not None ) :
orig_idx = getattr ( self , df ) . index
break
else :
logging . warn ( "No attributes available to get index" )
return None
if as_dict :
dd = { k : k for k in orig_idx }
if grouping_pattern :
for pattern , new_group in grouping_pattern . items ( ) :
if type ( pattern ) is str :
dd . update ( { k : new_group for k , v in dd . items ( ) if re . match ( pattern , k ) } )
else :
dd . update ( { k : new_group for k , v in dd . items ( ) if all ( [ re . match ( pat , k [ nr ] ) for nr , pat in enumerate ( pattern ) ] ) } )
return dd
else :
return orig_idx
|
def sct2e ( sc , sclkdp ) :
"""Convert encoded spacecraft clock ( " ticks " ) to ephemeris
seconds past J2000 ( ET ) .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / sct2e _ c . html
: param sc : NAIF spacecraft ID code .
: type sc : int
: param sclkdp : SCLK , encoded as ticks since spacecraft clock start .
: type sclkdp : float
: return : Ephemeris time , seconds past J2000.
: rtype : float"""
|
sc = ctypes . c_int ( sc )
sclkdp = ctypes . c_double ( sclkdp )
et = ctypes . c_double ( )
libspice . sct2e_c ( sc , sclkdp , ctypes . byref ( et ) )
return et . value
|
def sample ( records , k , random_seed = None ) :
"""Choose a length - ` ` k ` ` subset of ` ` records ` ` , retaining the input
order . If k > len ( records ) , all are returned . If an integer
` ` random _ seed ` ` is provided , sets ` ` random . seed ( ) ` `"""
|
if random_seed is not None :
random . seed ( random_seed )
result = [ ]
for i , record in enumerate ( records ) :
if len ( result ) < k :
result . append ( record )
else :
r = random . randint ( 0 , i )
if r < k :
result [ r ] = record
return result
|
def strace_read ( self , num_instructions ) :
"""Reads and returns a number of instructions captured by STRACE .
The number of instructions must be a non - negative value of at most
` ` 0x10000 ` ` ( ` ` 65536 ` ` ) .
Args :
self ( JLink ) : the ` ` JLink ` ` instance .
num _ instructions ( int ) : number of instructions to fetch .
Returns :
A list of instruction addresses in order from most recently executed
to oldest executed instructions . Note that the number of
instructions returned can be less than the number of instructions
requested in the case that there are not ` ` num _ instructions ` ` in the
trace buffer .
Raises :
JLinkException : on error .
ValueError : if ` ` num _ instructions < 0 ` ` or
` ` num _ instructions > 0x10000 ` ` ."""
|
if num_instructions < 0 or num_instructions > 0x10000 :
raise ValueError ( 'Invalid instruction count.' )
buf = ( ctypes . c_uint32 * num_instructions ) ( )
buf_size = num_instructions
res = self . _dll . JLINK_STRACE_Read ( ctypes . byref ( buf ) , buf_size )
if res < 0 :
raise errors . JLinkException ( 'Failed to read from STRACE buffer.' )
return list ( buf ) [ : res ]
|
def memory_objects_for_hash ( self , n ) :
"""Returns a set of : class : ` SimMemoryObjects ` that contain expressions that contain a variable with the hash"""
|
return set ( [ self [ i ] for i in self . addrs_for_hash ( n ) ] )
|
def query ( self , query_str , * query_args , ** query_options ) :
"""run a raw query on the db
query _ str - - string - - the query to run
* query _ args - - if the query _ str is a formatting string , pass the values in this
* * query _ options - - any query options can be passed in by using key = val syntax"""
|
with self . connection ( ** query_options ) as connection :
query_options [ 'connection' ] = connection
return self . _query ( query_str , query_args , ** query_options )
|
def v ( msg , * args , ** kwargs ) :
'''log a message at verbose level ;'''
|
return logging . log ( VERBOSE , msg , * args , ** kwargs )
|
def _sendAction ( self , action , attrs = None , chan_vars = None ) :
"""Send action to Asterisk Manager Interface .
@ param action : Action name
@ param attrs : Tuple of key - value pairs for action attributes .
@ param chan _ vars : Tuple of key - value pairs for channel variables ."""
|
self . _conn . write ( "Action: %s\r\n" % action )
if attrs :
for ( key , val ) in attrs :
self . _conn . write ( "%s: %s\r\n" % ( key , val ) )
if chan_vars :
for ( key , val ) in chan_vars :
self . _conn . write ( "Variable: %s=%s\r\n" % ( key , val ) )
self . _conn . write ( "\r\n" )
|
def ensure_instance ( value , types ) :
"""Ensure value is an instance of a certain type
> > > ensure _ instance ( 1 , [ str ] )
Traceback ( most recent call last ) :
TypeError :
> > > ensure _ instance ( 1 , str )
Traceback ( most recent call last ) :
TypeError :
> > > ensure _ instance ( 1 , int )
> > > ensure _ instance ( 1 , ( int , str ) )
: attr types : Type of list of types"""
|
if not isinstance ( value , types ) :
raise TypeError ( "expected instance of {}, got {}" . format ( types , value ) )
|
def loads ( s , single = False ) :
"""Deserialize : class : ` Eds ` string representations
Args :
s ( str ) : Eds string
single ( bool ) : if ` True ` , only return the first Xmrs object
Returns :
a generator of : class : ` Eds ` objects ( unless the * single * option
is ` True ` )"""
|
es = deserialize ( s )
if single :
return next ( es )
return es
|
def update_ssl_termination ( self , securePort = None , enabled = None , secureTrafficOnly = None ) :
"""Updates existing SSL termination information for the load balancer
without affecting the existing certificates / keys ."""
|
return self . manager . update_ssl_termination ( self , securePort = securePort , enabled = enabled , secureTrafficOnly = secureTrafficOnly )
|
def newton_iterate ( evaluate_fn , s , t ) :
r"""Perform a Newton iteration .
In this function , we assume that : math : ` s ` and : math : ` t ` are nonzero ,
this makes convergence easier to detect since " relative error " at
` ` 0.0 ` ` is not a useful measure .
There are several tolerance / threshold quantities used below :
* : math : ` 10 ` ( : attr : ` MAX _ NEWTON _ ITERATIONS ` ) iterations will be done before
" giving up " . This is based on the assumption that we are already starting
near a root , so quadratic convergence should terminate quickly .
* : math : ` \ tau = \ frac { 1 } { 4 } ` is used as the boundary between linear
and superlinear convergence . So if the current error
: math : ` \ | p _ { n + 1 } - p _ n \ | ` is not smaller than : math : ` \ tau ` times
the previous error : math : ` \ | p _ n - p _ { n - 1 } \ | ` , then convergence
is considered to be linear at that point .
* : math : ` \ frac { 2 } { 3 } ` of all iterations must be converging linearly
for convergence to be stopped ( and moved to the next regime ) . This
will only be checked after 4 or more updates have occurred .
* : math : ` \ tau = 2 ^ { - 42 } ` ( : attr : ` NEWTON _ ERROR _ RATIO ` ) is used to
determine that an update is sufficiently small to stop iterating . So if
the error : math : ` \ | p _ { n + 1 } - p _ n \ | ` smaller than : math : ` \ tau ` times
size of the term being updated : math : ` \ | p _ n \ | ` , then we
exit with the " correct " answer .
It is assumed that ` ` evaluate _ fn ` ` will use a Jacobian return value of
: data : ` None ` to indicate that : math : ` F ( s , t ) ` is exactly ` ` 0.0 ` ` . We
* * assume * * that if the function evaluates to exactly ` ` 0.0 ` ` , then we are
at a solution . It is possible however , that badly parameterized curves
can evaluate to exactly ` ` 0.0 ` ` for inputs that are relatively far away
from a solution ( see issue # 21 ) .
Args :
evaluate _ fn ( Callable [ Tuple [ float , float ] , tuple ] ) : A callable
which takes : math : ` s ` and : math : ` t ` and produces an evaluated
function value and the Jacobian matrix .
s ( float ) : The ( first ) parameter where the iteration will start .
t ( float ) : The ( second ) parameter where the iteration will start .
Returns :
Tuple [ bool , float , float ] : The triple of
* Flag indicating if the iteration converged .
* The current : math : ` s ` value when the iteration stopped .
* The current : math : ` t ` value when the iteration stopped ."""
|
# Several quantities will be tracked throughout the iteration :
# * norm _ update _ prev : | | p { n } - p { n - 1 } | | = | | dp { n - 1 } | |
# * norm _ update : | | p { n + 1 } - p { n } | | = | | dp { n } | |
# * linear _ updates : This is a count on the number of times that
# ` ` dp { n } ` ` " looks like " ` ` dp { n - 1 } ` ` ( i . e .
# is within a constant factor of it ) .
norm_update_prev = None
norm_update = None
linear_updates = 0
# Track the number of " linear " updates .
current_s = s
current_t = t
for index in six . moves . xrange ( MAX_NEWTON_ITERATIONS ) :
jacobian , func_val = evaluate_fn ( current_s , current_t )
if jacobian is None :
return True , current_s , current_t
singular , delta_s , delta_t = _helpers . solve2x2 ( jacobian , func_val [ : , 0 ] )
if singular :
break
norm_update_prev = norm_update
norm_update = np . linalg . norm ( [ delta_s , delta_t ] , ord = 2 )
# If | | p { n } - p { n - 1 } | | > 0.25 | | p { n - 1 } - p { n - 2 } | | , then that means
# our convergence is acting linear at the current step .
if index > 0 and norm_update > 0.25 * norm_update_prev :
linear_updates += 1
# If ` ` > = 2/3 ` ` of the updates have been linear , we are near a
# non - simple root . ( Make sure at least 5 updates have occurred . )
if index >= 4 and 3 * linear_updates >= 2 * index :
break
# Determine the norm of the " old " solution before updating .
norm_soln = np . linalg . norm ( [ current_s , current_t ] , ord = 2 )
current_s -= delta_s
current_t -= delta_t
if norm_update < NEWTON_ERROR_RATIO * norm_soln :
return True , current_s , current_t
return False , current_s , current_t
|
def print_direction_mean ( mean_dictionary ) :
"""Does a pretty job printing a Fisher mean and associated statistics for
directional data .
Parameters
mean _ dictionary : output dictionary of pmag . fisher _ mean
Examples
Generate a Fisher mean using ` ` ipmag . fisher _ mean ` ` and then print it nicely
using ` ` ipmag . print _ direction _ mean ` `
> > > my _ mean = ipmag . fisher _ mean ( di _ block = [ [ 140,21 ] , [ 127,23 ] , [ 142,19 ] , [ 136,22 ] ] )
> > > ipmag . print _ direction _ mean ( my _ mean )
Dec : 136.3 Inc : 21.3
Number of directions in mean ( n ) : 4
Angular radius of 95 % confidence ( a _ 95 ) : 7.3
Precision parameter ( k ) estimate : 159.7"""
|
print ( 'Dec: ' + str ( round ( mean_dictionary [ 'dec' ] , 1 ) ) + ' Inc: ' + str ( round ( mean_dictionary [ 'inc' ] , 1 ) ) )
print ( 'Number of directions in mean (n): ' + str ( mean_dictionary [ 'n' ] ) )
print ( 'Angular radius of 95% confidence (a_95): ' + str ( round ( mean_dictionary [ 'alpha95' ] , 1 ) ) )
print ( 'Precision parameter (k) estimate: ' + str ( round ( mean_dictionary [ 'k' ] , 1 ) ) )
|
def to_json ( cls , obj ) :
"""Serialize wrapped datetime . timedelta instance to a string the
with the following format :
[ DAYS ] d [ SECONDS ] s [ MICROSECONDS ] us"""
|
return "{0}d {1}s {2}us" . format ( obj . days , obj . seconds , obj . microseconds )
|
def close ( self , code : int = None , reason : str = None ) -> None :
"""Closes the WebSocket connection ."""
|
if not self . server_terminated :
if not self . stream . closed ( ) :
if code is None and reason is not None :
code = 1000
# " normal closure " status code
if code is None :
close_data = b""
else :
close_data = struct . pack ( ">H" , code )
if reason is not None :
close_data += utf8 ( reason )
try :
self . _write_frame ( True , 0x8 , close_data )
except StreamClosedError :
self . _abort ( )
self . server_terminated = True
if self . client_terminated :
if self . _waiting is not None :
self . stream . io_loop . remove_timeout ( self . _waiting )
self . _waiting = None
self . stream . close ( )
elif self . _waiting is None : # Give the client a few seconds to complete a clean shutdown ,
# otherwise just close the connection .
self . _waiting = self . stream . io_loop . add_timeout ( self . stream . io_loop . time ( ) + 5 , self . _abort )
|
def _read_charge_and_multiplicity ( self ) :
"""Parses charge and multiplicity ."""
|
temp_charge = read_pattern ( self . text , { "key" : r"\$molecule\s+([\-\d]+)\s+\d" } , terminate_on_match = True ) . get ( 'key' )
if temp_charge != None :
self . data [ "charge" ] = int ( temp_charge [ 0 ] [ 0 ] )
else :
temp_charge = read_pattern ( self . text , { "key" : r"Sum of atomic charges \=\s+([\d\-\.\+]+)" } , terminate_on_match = True ) . get ( 'key' )
if temp_charge == None :
self . data [ "charge" ] = None
else :
self . data [ "charge" ] = int ( float ( temp_charge [ 0 ] [ 0 ] ) )
temp_multiplicity = read_pattern ( self . text , { "key" : r"\$molecule\s+[\-\d]+\s+(\d)" } , terminate_on_match = True ) . get ( 'key' )
if temp_multiplicity != None :
self . data [ "multiplicity" ] = int ( temp_multiplicity [ 0 ] [ 0 ] )
else :
temp_multiplicity = read_pattern ( self . text , { "key" : r"Sum of spin\s+charges \=\s+([\d\-\.\+]+)" } , terminate_on_match = True ) . get ( 'key' )
if temp_multiplicity == None :
self . data [ "multiplicity" ] = 1
else :
self . data [ "multiplicity" ] = int ( float ( temp_multiplicity [ 0 ] [ 0 ] ) ) + 1
|
def readValuesBigWigToWig ( self , reference , start , end ) :
"""Read a bigwig file and return a protocol object with values
within the query range .
This method uses the bigWigToWig command line tool from UCSC
GoldenPath . The tool is used to return values within a query region .
The output is in wiggle format , which is processed by the WiggleReader
class .
There could be memory issues if the returned results are large .
The input reference can be a security problem ( script injection ) .
Ideally , it should be checked against a list of known chromosomes .
Start and end should not be problems since they are integers ."""
|
if not self . checkReference ( reference ) :
raise exceptions . ReferenceNameNotFoundException ( reference )
if start < 0 :
raise exceptions . ReferenceRangeErrorException ( reference , start , end )
# TODO : CHECK IF QUERY IS BEYOND END
cmd = [ "bigWigToWig" , self . _sourceFile , "stdout" , "-chrom=" + reference , "-start=" + str ( start ) , "-end=" + str ( end ) ]
wiggleReader = WiggleReader ( reference , start , end )
try : # run command and grab output simultaneously
process = subprocess . Popen ( cmd , stdout = subprocess . PIPE )
while True :
line = process . stdout . readline ( )
if line == '' and process . poll ( ) is not None :
break
wiggleReader . readWiggleLine ( line . strip ( ) )
except ValueError :
raise
except :
raise Exception ( "bigWigToWig failed to run" )
return wiggleReader . getData ( )
|
def generateSteps ( self , minStep ) :
"""Generate allowed steps with step > = minStep in increasing order ."""
|
self . checkFinite ( minStep )
if self . binary :
base = 2.0
mantissas = [ 1.0 ]
exponent = math . floor ( math . log ( minStep , 2 ) - EPSILON )
else :
base = 10.0
mantissas = [ 1.0 , 2.0 , 5.0 ]
exponent = math . floor ( math . log10 ( minStep ) - EPSILON )
while True :
multiplier = base ** exponent
for mantissa in mantissas :
value = mantissa * multiplier
if value >= minStep * ( 1.0 - EPSILON ) :
yield value
exponent += 1
|
def init_parsecmdline ( argv = [ ] ) :
"""Parse arguments from the command line
: param argv : list of arguments"""
|
# main argument parser
parser = argparse . ArgumentParser ( prog = PKG_NAME )
# - - version
parser . add_argument ( '--version' , action = 'version' , version = version )
# - c , - - config < file _ name >
parser . add_argument ( "-c" , "--config" , action = "store" , dest = "config_file" , default = config . CONF_DEFAULT_FILE , help = "specify configuration file to use" )
# - - dry - run
parser . add_argument ( "-d" , "--dry-run" , action = "store_true" , dest = "dry_run" , default = False , help = "don't actually do anything" )
# - - quiet
parser . add_argument ( "-q" , "--quiet" , action = "store_true" , dest = "log_quiet" , default = False , help = "quiet output" )
# - - ll < level >
# logging level
parser . add_argument ( "--ll" , "--log-level" , action = "store" , type = int , dest = "log_lvl" , default = log . LOG_LVL_DEFAULT , help = "set logging level" )
# - l , - - log - file
parser . add_argument ( "-l" , "--log-file" , action = "store" , dest = "log_file" , default = log . LOG_FILE_DEFAULT , help = "set log file" )
# Absorb the options
options = parser . parse_args ( argv )
# Set whether we are going to perform a dry run
global _opt
_opt [ "dry_run" ] = options . dry_run
# Initiate the log level
log . init ( threshold_lvl = options . log_lvl , quiet_stdout = options . log_quiet , log_file = options . log_file )
# Print the splash
_splash ( )
# Merge configuration with a JSON file
config_file = os . path . abspath ( options . config_file )
log . msg ( "Attempting to use configuration file '{config_file}'" . format ( config_file = config_file ) )
try :
config . set_from_file ( config_file )
except FileNotFoundError :
raise FileNotFoundError ( "Configuration file '{config_file}' not found!" . format ( config_file = config_file ) )
|
def condensed_coords ( i , j , n ) :
"""Transform square distance matrix coordinates to the corresponding
index into a condensed , 1D form of the matrix .
Parameters
i : int
Row index .
j : int
Column index .
n : int
Size of the square matrix ( length of first or second dimension ) .
Returns
ix : int"""
|
# guard conditions
if i == j or i >= n or j >= n or i < 0 or j < 0 :
raise ValueError ( 'invalid coordinates: %s, %s' % ( i , j ) )
# normalise order
i , j = sorted ( [ i , j ] )
# calculate number of items in rows before this one ( sum of arithmetic
# progression )
x = i * ( ( 2 * n ) - i - 1 ) / 2
# add on previous items in current row
ix = x + j - i - 1
return int ( ix )
|
def get_vnetwork_vswitches_output_vnetwork_vswitches_pnic ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
get_vnetwork_vswitches = ET . Element ( "get_vnetwork_vswitches" )
config = get_vnetwork_vswitches
output = ET . SubElement ( get_vnetwork_vswitches , "output" )
vnetwork_vswitches = ET . SubElement ( output , "vnetwork-vswitches" )
pnic = ET . SubElement ( vnetwork_vswitches , "pnic" )
pnic . text = kwargs . pop ( 'pnic' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def decode_length ( self , data , state ) :
"""Extract and decode a frame length from the data buffer . The
consumed data should be removed from the buffer . If the
length data is incomplete , must raise a ` ` NoFrames ` `
exception .
: param data : A ` ` bytearray ` ` instance containing the data so
far read .
: param state : An instance of ` ` FramerState ` ` . If the buffer
contains a partial encoded length , this object
can be used to store state information to allow
the remainder of the length to be read .
: returns : The frame length , as an integer ."""
|
# Do we have enough data yet ?
if len ( data ) < self . fmt . size :
raise exc . NoFrames ( )
# Extract the length
length = self . fmt . unpack ( six . binary_type ( data [ : self . fmt . size ] ) ) [ 0 ]
del data [ : self . fmt . size ]
# Return the length
return length
|
def create_database ( self , name , path = None , force = False ) :
"""Create a new Impala database
Parameters
name : string
Database name
path : string , default None
HDFS path where to store the database data ; otherwise uses Impala
default"""
|
if path : # explicit mkdir ensures the user own the dir rather than impala ,
# which is easier for manual cleanup , if necessary
self . hdfs . mkdir ( path )
statement = ddl . CreateDatabase ( name , path = path , can_exist = force )
return self . _execute ( statement )
|
def create_geoms ( self , plot ) :
"""Make information needed to draw a legend for each of the layers .
For each layer , that information is a dictionary with the geom
to draw the guide together with the data and the parameters that
will be used in the call to geom ."""
|
def get_legend_geom ( layer ) :
if hasattr ( layer . geom , 'draw_legend' ) :
geom = layer . geom . __class__
else :
name = 'geom_{}' . format ( layer . geom . legend_geom )
geom = Registry [ name ]
return geom
# A layer either contributes to the guide , or it does not . The
# guide entries may be ploted in the layers
self . glayers = [ ]
for l in plot . layers :
exclude = set ( )
if isinstance ( l . show_legend , dict ) :
l . show_legend = rename_aesthetics ( l . show_legend )
exclude = { ae for ae , val in l . show_legend . items ( ) if not val }
elif l . show_legend not in ( None , True ) :
continue
matched = self . legend_aesthetics ( l , plot )
# This layer does not contribute to the legend
if not set ( matched ) - exclude :
continue
data = self . key [ matched ] . copy ( )
data = l . use_defaults ( data )
# override . aes in guide _ legend manually changes the geom
for ae in set ( self . override_aes ) & set ( data . columns ) :
data [ ae ] = self . override_aes [ ae ]
geom = get_legend_geom ( l )
data = remove_missing ( data , l . geom . params [ 'na_rm' ] , list ( l . geom . REQUIRED_AES | l . geom . NON_MISSING_AES ) , '{} legend' . format ( l . geom . __class__ . __name__ ) )
self . glayers . append ( types . SimpleNamespace ( geom = geom , data = data , layer = l ) )
if not self . glayers :
return None
return self
|
def map_subcommands ( self , func ) :
"""Run ` func ` against all the subcommands attached to our root
command ."""
|
def crawl ( cmd ) :
for sc in cmd . subcommands . values ( ) :
yield from crawl ( sc )
yield cmd
return map ( func , crawl ( self . root_command ) )
|
def insert_statement ( table , columns , values ) :
"""Generate an insert statement string for dumping to text file or MySQL execution ."""
|
if not all ( isinstance ( r , ( list , set , tuple ) ) for r in values ) :
values = [ [ r ] for r in values ]
rows = [ ]
for row in values :
new_row = [ ]
for col in row :
if col is None :
new_col = 'NULL'
elif isinstance ( col , ( int , float , Decimal ) ) :
new_col = str ( MySQLConverterBase ( ) . to_mysql ( col ) )
else :
string = str ( MySQLConverterBase ( ) . to_mysql ( col ) )
if "'" in string :
new_col = '"' + string + '"'
else :
new_col = "'" + string + "'"
new_row . append ( new_col )
rows . append ( ', ' . join ( new_row ) )
vals = '(' + '),\n\t(' . join ( rows ) + ')'
statement = "INSERT INTO\n\t{0} ({1}) \nVALUES\n\t{2}" . format ( wrap ( table ) , cols_str ( columns ) , vals )
return statement
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.