signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def attach ( self , remote_entry ) :
"""Attach a remote entry to a local entry""" | self . name = remote_entry . name
self . sha = remote_entry . sha
self . url = remote_entry . url
self . author = remote_entry . author
return self |
def icon_resource ( name , package = None ) :
"""Returns the absolute URI path to an image . If a package is not explicitly specified then the calling package name is
used .
: param name : path relative to package path of the image resource .
: param package : package name in dotted format .
: return : the file URI path to the image resource ( i . e . file : / / / foo / bar / image . png ) .""" | if not package :
package = '%s.resources.images' % calling_package ( )
name = resource_filename ( package , name )
if not name . startswith ( '/' ) :
return 'file://%s' % abspath ( name )
return 'file://%s' % name |
def _members_changed ( sender , instance , action , reverse , model , pk_set , ** kwargs ) :
"""Hook that executes whenever the group members are changed .""" | if action == "post_remove" :
if not reverse :
group = instance
for person in model . objects . filter ( pk__in = pk_set ) :
_remove_group ( group , person )
else :
person = instance
for group in model . objects . filter ( pk__in = pk_set ) :
_remove_group ( group , person )
elif action == "pre_clear" : # This has to occur in pre _ clear , not post _ clear , as otherwise
# we won ' t see what groups need to be removed .
if not reverse :
group = instance
for person in group . members . all ( ) :
_remove_group ( group , person )
else :
person = instance
for group in person . groups . all ( ) :
_remove_group ( group , person ) |
def _get_file_md5 ( filename ) :
"""Compute the md5 checksum of a file""" | md5_data = md5 ( )
with open ( filename , 'rb' ) as f :
for chunk in iter ( lambda : f . read ( 128 * md5_data . block_size ) , b'' ) :
md5_data . update ( chunk )
return md5_data . hexdigest ( ) |
def rescale ( self , fun ) :
"""perform raster computations with custom functions and assign them to the existing raster object in memory
Parameters
fun : function
the custom function to compute on the data
Examples
> > > with Raster ( ' filename ' ) as ras :
> > > ras . rescale ( lambda x : 10 * x )""" | if self . bands != 1 :
raise ValueError ( 'only single band images are currently supported' )
# load array
mat = self . matrix ( )
# scale values
scaled = fun ( mat )
# assign newly computed array to raster object
self . assign ( scaled , band = 0 ) |
def datasets ( data = 'all' , type = None , uuid = None , query = None , id = None , limit = 100 , offset = None , ** kwargs ) :
'''Search for datasets and dataset metadata .
: param data : [ str ] The type of data to get . Default : ` ` all ` `
: param type : [ str ] Type of dataset , options include ` ` OCCURRENCE ` ` , etc .
: param uuid : [ str ] UUID of the data node provider . This must be specified if data
is anything other than ` ` all ` ` .
: param query : [ str ] Query term ( s ) . Only used when ` ` data = ' all ' ` `
: param id : [ int ] A metadata document id .
References http : / / www . gbif . org / developer / registry # datasets
Usage : :
from pygbif import registry
registry . datasets ( limit = 5)
registry . datasets ( type = " OCCURRENCE " )
registry . datasets ( uuid = " a6998220-7e3a - 485d - 9cd6-73076bd85657 " )
registry . datasets ( data = ' contact ' , uuid = " a6998220-7e3a - 485d - 9cd6-73076bd85657 " )
registry . datasets ( data = ' metadata ' , uuid = " a6998220-7e3a - 485d - 9cd6-73076bd85657 " )
registry . datasets ( data = ' metadata ' , uuid = " a6998220-7e3a - 485d - 9cd6-73076bd85657 " , id = 598)
registry . datasets ( data = [ ' deleted ' , ' duplicate ' ] )
registry . datasets ( data = [ ' deleted ' , ' duplicate ' ] , limit = 1)''' | args = { 'q' : query , 'type' : type , 'limit' : limit , 'offset' : offset }
data_choices = [ 'all' , 'organization' , 'contact' , 'endpoint' , 'identifier' , 'tag' , 'machinetag' , 'comment' , 'constituents' , 'document' , 'metadata' , 'deleted' , 'duplicate' , 'subDataset' , 'withNoEndpoint' ]
check_data ( data , data_choices )
if len2 ( data ) == 1 :
return datasets_fetch ( data , uuid , args , ** kwargs )
else :
return [ datasets_fetch ( x , uuid , args , ** kwargs ) for x in data ] |
def on_uninstall ( self ) :
"""Uninstalls the editor extension from the editor .""" | self . _on_close = True
self . enabled = False
self . _editor = None |
def stackexchange_request ( self , path , callback , access_token = None , post_args = None , ** kwargs ) :
"""Make a request to the StackExchange API , passing in the path , a
callback , the access token , optional post arguments and keyword
arguments to be added as values in the request body or URI""" | url = self . _API_URL + path
all_args = { }
if access_token :
all_args [ "access_token" ] = access_token
all_args . update ( kwargs )
if all_args :
url += "?" + auth . urllib_parse . urlencode ( all_args )
callback = self . async_callback ( self . _on_stackexchange_request , callback )
http = self . _get_auth_http_client ( )
if post_args is not None :
http . fetch ( url , method = "POST" , body = auth . urllib_parse . urlencode ( post_args ) , callback = callback )
else :
http . fetch ( url , callback = callback ) |
def create_textfile_with_contents ( filename , contents , encoding = 'utf-8' ) :
"""Creates a textual file with the provided contents in the workdir .
Overwrites an existing file .""" | ensure_directory_exists ( os . path . dirname ( filename ) )
if os . path . exists ( filename ) :
os . remove ( filename )
outstream = codecs . open ( filename , "w" , encoding )
outstream . write ( contents )
if contents and not contents . endswith ( "\n" ) :
outstream . write ( "\n" )
outstream . flush ( )
outstream . close ( )
assert os . path . exists ( filename ) , "ENSURE file exists: %s" % filename |
def set_backgroundcolor ( self , color ) :
'''Sets the background color of the current axes ( and legend ) .
Use ' None ' ( with quotes ) for transparent . To get transparent
background on saved figures , use :
pp . savefig ( " fig1 . svg " , transparent = True )''' | ax = self . ax
ax . patch . set_facecolor ( color )
lh = ax . get_legend ( )
if lh != None :
lh . legendPatch . set_facecolor ( color )
plt . draw ( ) |
def convex_comb_agg_log ( model , a , b ) :
"""convex _ comb _ agg _ log - - add piecewise relation with a logarithmic number of binary variables
using the convex combination formulation - - non - disaggregated .
Parameters :
- model : a model where to include the piecewise linear relation
- a [ k ] : x - coordinate of the k - th point in the piecewise linear relation
- b [ k ] : y - coordinate of the k - th point in the piecewise linear relation
Returns the model with the piecewise linear relation on added variables X , Y , and z .""" | K = len ( a ) - 1
G = int ( math . ceil ( ( math . log ( K ) / math . log ( 2 ) ) ) )
# number of required bits
w , g = { } , { }
for k in range ( K + 1 ) :
w [ k ] = model . addVar ( lb = 0 , ub = 1 , vtype = "C" )
for j in range ( G ) :
g [ j ] = model . addVar ( vtype = "B" )
X = model . addVar ( lb = a [ 0 ] , ub = a [ K ] , vtype = "C" )
Y = model . addVar ( lb = - model . infinity ( ) , vtype = "C" )
model . addCons ( X == quicksum ( a [ k ] * w [ k ] for k in range ( K + 1 ) ) )
model . addCons ( Y == quicksum ( b [ k ] * w [ k ] for k in range ( K + 1 ) ) )
model . addCons ( quicksum ( w [ k ] for k in range ( K + 1 ) ) == 1 )
# binary variables setup
for j in range ( G ) :
zeros , ones = [ 0 ] , [ ]
# print ( j , " \ tinit zeros : " , zeros , " ones : " , ones
for k in range ( 1 , K + 1 ) : # print ( j , k , " \ t > zeros : " , zeros , " ones : " , ones
if ( 1 & gray ( k ) >> j ) == 1 and ( 1 & gray ( k - 1 ) >> j ) == 1 :
ones . append ( k )
if ( 1 & gray ( k ) >> j ) == 0 and ( 1 & gray ( k - 1 ) >> j ) == 0 :
zeros . append ( k )
# print ( j , k , " \ tzeros > : " , zeros , " ones : " , ones
# print ( j , " \ tzeros : " , zeros , " ones : " , ones
model . addCons ( quicksum ( w [ k ] for k in ones ) <= g [ j ] )
model . addCons ( quicksum ( w [ k ] for k in zeros ) <= 1 - g [ j ] )
return X , Y , w |
def partition ( cls , iterable , pred ) :
"""Use a predicate to partition items into false and true entries .""" | t1 , t2 = itertools . tee ( iterable )
return cls ( itertools . filterfalse ( pred , t1 ) , filter ( pred , t2 ) ) |
async def process_update ( self , update : types . Update ) :
"""Process single update object
: param update :
: return :""" | types . Update . set_current ( update )
try :
if update . message :
types . User . set_current ( update . message . from_user )
types . Chat . set_current ( update . message . chat )
return await self . message_handlers . notify ( update . message )
if update . edited_message :
types . User . set_current ( update . edited_message . from_user )
types . Chat . set_current ( update . edited_message . chat )
return await self . edited_message_handlers . notify ( update . edited_message )
if update . channel_post :
types . Chat . set_current ( update . channel_post . chat )
return await self . channel_post_handlers . notify ( update . channel_post )
if update . edited_channel_post :
types . Chat . set_current ( update . edited_channel_post . chat )
return await self . edited_channel_post_handlers . notify ( update . edited_channel_post )
if update . inline_query :
types . User . set_current ( update . inline_query . from_user )
return await self . inline_query_handlers . notify ( update . inline_query )
if update . chosen_inline_result :
types . User . set_current ( update . chosen_inline_result . from_user )
return await self . chosen_inline_result_handlers . notify ( update . chosen_inline_result )
if update . callback_query :
if update . callback_query . message :
types . Chat . set_current ( update . callback_query . message . chat )
types . User . set_current ( update . callback_query . from_user )
return await self . callback_query_handlers . notify ( update . callback_query )
if update . shipping_query :
types . User . set_current ( update . shipping_query . from_user )
return await self . shipping_query_handlers . notify ( update . shipping_query )
if update . pre_checkout_query :
types . User . set_current ( update . pre_checkout_query . from_user )
return await self . pre_checkout_query_handlers . notify ( update . pre_checkout_query )
if update . poll :
return await self . poll_handlers . notify ( update . poll )
except Exception as e :
err = await self . errors_handlers . notify ( update , e )
if err :
return err
raise |
def fix_lib64 ( lib_dir , symlink = True ) :
"""Some platforms ( particularly Gentoo on x64 ) put things in lib64 / pythonX . Y
instead of lib / pythonX . Y . If this is such a platform we ' ll just create a
symlink so lib64 points to lib""" | # PyPy ' s library path scheme is not affected by this .
# Return early or we will die on the following assert .
if is_pypy :
logger . debug ( 'PyPy detected, skipping lib64 symlinking' )
return
# Check we have a lib64 library path
if not [ p for p in distutils . sysconfig . get_config_vars ( ) . values ( ) if isinstance ( p , basestring ) and 'lib64' in p ] :
return
logger . debug ( 'This system uses lib64; symlinking lib64 to lib' )
assert os . path . basename ( lib_dir ) == 'python%s' % sys . version [ : 3 ] , ( "Unexpected python lib dir: %r" % lib_dir )
lib_parent = os . path . dirname ( lib_dir )
top_level = os . path . dirname ( lib_parent )
lib_dir = os . path . join ( top_level , 'lib' )
lib64_link = os . path . join ( top_level , 'lib64' )
assert os . path . basename ( lib_parent ) == 'lib' , ( "Unexpected parent dir: %r" % lib_parent )
if os . path . lexists ( lib64_link ) :
return
if symlink :
os . symlink ( 'lib' , lib64_link )
else :
copyfile ( 'lib' , lib64_link ) |
def lookup_field_help ( self , field , default = None ) :
"""Looks up the help text for the passed in field .
This is overloaded so that we can check whether our form has help text set
explicitely . If so , we will pass this as the default to our parent function .""" | default = None
for form_field in self . form :
if form_field . name == field :
default = form_field . help_text
break
return super ( SmartFormMixin , self ) . lookup_field_help ( field , default = default ) |
def clear_license ( self ) :
"""Removes the license .
raise : NoAccess - ` ` Metadata . isRequired ( ) ` ` is ` ` true ` ` or
` ` Metadata . isReadOnly ( ) ` ` is ` ` true ` `
* compliance : mandatory - - This method must be implemented . *""" | if ( self . get_license_metadata ( ) . is_read_only ( ) or self . get_license_metadata ( ) . is_required ( ) ) :
raise errors . NoAccess ( )
self . _my_map [ 'license' ] = dict ( self . _license_default ) |
def _read_centroid_from_ndk_string ( self , ndk_string , hypocentre ) :
"""Reads the centroid data from the ndk string to return an
instance of the GCMTCentroid class
: param str ndk _ string :
String of data ( line 3 of ndk format )
: param hypocentre :
Instance of the GCMTHypocentre class""" | centroid = GCMTCentroid ( hypocentre . date , hypocentre . time )
data = ndk_string [ : 58 ] . split ( )
centroid . centroid_type = data [ 0 ] . rstrip ( ':' )
data = [ float ( x ) for x in data [ 1 : ] ]
time_diff = data [ 0 ]
if fabs ( time_diff ) > 1E-6 :
centroid . _get_centroid_time ( time_diff )
centroid . time_error = data [ 1 ]
centroid . latitude = data [ 2 ]
centroid . latitude_error = data [ 3 ]
centroid . longitude = data [ 4 ]
centroid . longitude_error = data [ 5 ]
centroid . depth = data [ 6 ]
centroid . depth_error = data [ 7 ]
centroid . depth_type = ndk_string [ 59 : 63 ]
centroid . centroid_id = ndk_string [ 64 : ]
return centroid |
def handler ( event , context ) : # pylint : disable = W0613
"""Historical S3 event differ .
Listens to the Historical current table and determines if there are differences that need to be persisted in the
historical record .""" | # De - serialize the records :
records = deserialize_records ( event [ 'Records' ] )
for record in records :
process_dynamodb_differ_record ( record , CurrentS3Model , DurableS3Model ) |
def fromfd ( fd , family , type , proto = 0 ) :
"""fromfd ( fd , family , type [ , proto ] ) - > socket object
Create a socket object from a duplicate of the given file
descriptor . The remaining arguments are the same as for socket ( ) .""" | nfd = dup ( fd )
return socket ( family , type , proto , nfd ) |
def commit ( self , message , author = None ) :
"""Commit changes to tracked files in the working tree .
: param message : The commit message ( a string ) .
: param author : Override : attr : ` author ` ( refer to
: func : ` coerce _ author ( ) ` for details
on argument handling ) .""" | # Make sure the local repository exists and supports a working tree .
self . ensure_exists ( )
self . ensure_working_tree ( )
logger . info ( "Committing changes in %s: %s" , format_path ( self . local ) , message )
author = coerce_author ( author ) if author else self . author
self . context . execute ( * self . get_commit_command ( message , author ) ) |
def _check_valid_data ( self , data ) :
"""Checks that the incoming data is a 3 x # elements ndarray of normal
vectors .
Parameters
data : : obj : ` numpy . ndarray `
The data to verify .
Raises
ValueError
If the data is not of the correct shape or type , or if the vectors
therein are not normalized .""" | if data . dtype . type != np . float32 and data . dtype . type != np . float64 :
raise ValueError ( 'Must initialize normals clouds with a numpy float ndarray' )
if data . shape [ 0 ] != 3 :
raise ValueError ( 'Illegal data array passed to normal cloud. Must have 3 coordinates' )
if len ( data . shape ) > 2 :
raise ValueError ( 'Illegal data array passed to normal cloud. Must have 1 or 2 dimensions' )
if np . any ( ( np . abs ( np . linalg . norm ( data , axis = 0 ) - 1 ) > 1e-4 ) & ( np . linalg . norm ( data , axis = 0 ) != 0 ) ) :
raise ValueError ( 'Illegal data array passed to normal cloud. Must have norm=1.0 or norm=0.0' ) |
def _object_contents ( obj ) :
"""Return the signature contents of any Python object .
We have to handle the case where object contains a code object
since it can be pickled directly .""" | try : # Test if obj is a method .
return _function_contents ( obj . __func__ )
except AttributeError :
try : # Test if obj is a callable object .
return _function_contents ( obj . __call__ . __func__ )
except AttributeError :
try : # Test if obj is a code object .
return _code_contents ( obj )
except AttributeError :
try : # Test if obj is a function object .
return _function_contents ( obj )
except AttributeError as ae : # Should be a pickle - able Python object .
try :
return _object_instance_content ( obj )
# pickling an Action instance or object doesn ' t yield a stable
# content as instance property may be dumped in different orders
# return pickle . dumps ( obj , ACTION _ SIGNATURE _ PICKLE _ PROTOCOL )
except ( pickle . PicklingError , TypeError , AttributeError ) as ex : # This is weird , but it seems that nested classes
# are unpickable . The Python docs say it should
# always be a PicklingError , but some Python
# versions seem to return TypeError . Just do
# the best we can .
return bytearray ( repr ( obj ) , 'utf-8' ) |
def identity_gate ( qubits : Union [ int , Qubits ] ) -> Gate :
"""Returns the K - qubit identity gate""" | _ , qubits = qubits_count_tuple ( qubits )
return I ( * qubits ) |
def updateSiteName ( self , block_name , origin_site_name ) :
"""Update the origin _ site _ name for a given block name""" | if not origin_site_name :
dbsExceptionHandler ( 'dbsException-invalid-input' , "DBSBlock/updateSiteName. origin_site_name is mandatory." )
conn = self . dbi . connection ( )
trans = conn . begin ( )
try :
self . updatesitename . execute ( conn , block_name , origin_site_name )
except :
if trans :
trans . rollback ( )
raise
else :
if trans :
trans . commit ( )
finally :
if conn :
conn . close ( ) |
def _addToPoolingActivation ( self , activeCells , overlaps ) :
"""Adds overlaps from specified active cells to cells ' pooling
activation .
@ param activeCells : Indices of those cells winning the inhibition step
@ param overlaps : A current set of overlap values for each cell
@ return current pooling activation""" | self . _poolingActivation [ activeCells ] = self . _exciteFunction . excite ( self . _poolingActivation [ activeCells ] , overlaps [ activeCells ] )
# increase pooling timers for all cells
self . _poolingTimer [ self . _poolingTimer >= 0 ] += 1
# reset pooling timer for active cells
self . _poolingTimer [ activeCells ] = 0
self . _poolingActivationInitLevel [ activeCells ] = self . _poolingActivation [ activeCells ]
return self . _poolingActivation |
def _parse_the_ned_list_results ( self ) :
"""* parse the ned results *
* * Key Arguments : * *
* * Return : * *
- None
. . todo : :
- @ review : when complete , clean _ parse _ the _ ned _ results method
- @ review : when complete add logging""" | self . log . info ( 'starting the ``_parse_the_ned_list_results`` method' )
results = [ ]
# CHOOSE VALUES TO RETURN
allHeaders = [ "searchIndex" , "searchRa" , "searchDec" , "row_number" , "input_note" , "input_name" , "ned_notes" , "ned_name" , "ra" , "dec" , "eb-v" , "object_type" , "redshift" , "redshift_err" , "redshift_quality" , "magnitude_filter" , "major_diameter_arcmin" , "minor_diameter_arcmin" , "morphology" , "hierarchy" , "galaxy_morphology" , "radio_morphology" , "activity_type" , "distance_indicator" , "distance_mod" , "distance" ]
if self . verbose == True :
headers = [ "searchIndex" , "searchRa" , "searchDec" , "row_number" , "input_note" , "input_name" , "ned_notes" , "ned_name" , "ra" , "dec" , "eb-v" , "object_type" , "redshift" , "redshift_err" , "redshift_quality" , "magnitude_filter" , "major_diameter_arcmin" , "minor_diameter_arcmin" , "morphology" , "hierarchy" , "galaxy_morphology" , "radio_morphology" , "activity_type" , "distance_indicator" , "distance_mod" , "distance" ]
else :
headers = [ "searchIndex" , "searchRa" , "searchDec" , "ned_name" , "ra" , "dec" , "object_type" , "redshift" ]
if self . theseBatchParams == False :
allHeaders = allHeaders [ 3 : ]
headers = headers [ 3 : ]
for thisFile in self . nedResults :
if thisFile :
pathToReadFile = thisFile
# FIND THE BATCH INDEX NUMBER
thisIndex = int ( thisFile . split ( "/" ) [ - 1 ] . split ( "_" ) [ 0 ] )
try :
self . log . debug ( "attempting to open the file %s" % ( pathToReadFile , ) )
readFile = codecs . open ( pathToReadFile , encoding = 'utf-8' , mode = 'rb' )
thisData = readFile . read ( )
readFile . close ( )
except IOError , e :
message = 'could not open the file %s' % ( pathToReadFile , )
self . log . critical ( message )
raise IOError ( message )
readFile . close ( )
# GRAB THE ROWS OF DATA
matchObject = re . search ( r"\n1\s*?\|\s*?.*" , thisData , re . S )
thisRow = ""
if matchObject :
thisHeader = ""
for head in allHeaders :
thisHeader += str ( head ) . ljust ( self . resultSpacing , ' ' ) + " | "
theseLines = string . split ( matchObject . group ( ) , '\n' ) [ 1 : ]
if self . theseBatchParams :
newLines = [ ]
for t , b in zip ( theseLines , self . theseBatchParams [ thisIndex ] ) :
t = "%s | %s | %s | %s " % ( b [ "searchIndex" ] , b [ "searchRa" ] , b [ "searchDec" ] , t )
newLines . append ( t )
theseLines = newLines
theseLines = [ thisHeader ] + theseLines
csvReader = csv . DictReader ( theseLines , dialect = 'excel' , delimiter = '|' , quotechar = '"' )
for row in csvReader :
thisDict = { }
row = dict ( row )
if not row . keys ( ) :
continue
if None in row . keys ( ) :
continue
if "ned_name" not in ( "" ) . join ( row . keys ( ) ) . lower ( ) :
continue
for k , v in row . iteritems ( ) :
try : # self . log . debug ( " attempting to strip ned key " )
k = k . strip ( )
except Exception , e :
self . log . error ( 'cound not strip ned key (%(k)s, %(v)s)' % locals ( ) )
self . log . error ( "could not strip ned key - failed with this error: %s " % ( str ( e ) , ) )
break
if ( k == "ra" or k == "dec" ) :
v = v . replace ( "h" , ":" ) . replace ( "m" , ":" ) . replace ( "d" , ":" ) . replace ( "s" , "" )
if isinstance ( v , str ) :
v = v . strip ( )
thisDict [ k ] = v
results . append ( thisDict )
os . remove ( thisFile )
self . log . info ( 'completed the ``_parse_the_ned_list_results`` method' )
return results , headers |
def elbv2_load_balancer_hosted_zone ( self , lookup , default = None ) :
"""Args :
lookup : the friendly name of the V2 elb to look up
default : value to return in case of no match
Returns :
The hosted zone ID of the ELB found with a name matching ' lookup ' .""" | try :
elb = self . _elbv2_load_balancer ( lookup )
return elb [ 'CanonicalHostedZoneId' ]
except ClientError :
return default |
async def append ( self , reply : Reply ) -> None :
"""Add the given Reply to this transaction store ' s list of responses .
Also add to processedRequests if not added previously .""" | result = reply . result
identifier = result . get ( f . IDENTIFIER . nm )
txnId = result . get ( TXN_ID )
logger . debug ( "Reply being sent {}" . format ( reply ) )
if self . _isNewTxn ( identifier , reply , txnId ) :
self . addToProcessedTxns ( identifier , txnId , reply )
if identifier not in self . responses :
self . responses [ identifier ] = asyncio . Queue ( )
await self . responses [ identifier ] . put ( reply ) |
def get_cash_balance ( self ) :
"""Returns the account cash balance available for investing
Returns
float
The cash balance in your account .""" | cash = False
try :
response = self . session . get ( '/browse/cashBalanceAj.action' )
json_response = response . json ( )
if self . session . json_success ( json_response ) :
self . __log ( 'Cash available: {0}' . format ( json_response [ 'cashBalance' ] ) )
cash_value = json_response [ 'cashBalance' ]
# Convert currency to float value
# Match values like $ 1,000.12 or 1,0000 $
cash_match = re . search ( '^[^0-9]?([0-9\.,]+)[^0-9]?' , cash_value )
if cash_match :
cash_str = cash_match . group ( 1 )
cash_str = cash_str . replace ( ',' , '' )
cash = float ( cash_str )
else :
self . __log ( 'Could not get cash balance: {0}' . format ( response . text ) )
except Exception as e :
self . __log ( 'Could not get the cash balance on the account: Error: {0}\nJSON: {1}' . format ( str ( e ) , response . text ) )
raise e
return cash |
def main ( self ) :
"""This is the main entry point of the ElastiCluster CLI .
First the central configuration is created , which can be altered
through the command line interface . Then the given command from
the command line interface is called .""" | assert self . params . func , "No subcommand defined in `ElastiCluster.main()"
try :
return self . params . func ( )
except Exception as err :
log . error ( "Error: %s" , err )
if self . params . verbose > 2 :
import traceback
traceback . print_exc ( )
print ( "Aborting because of errors: {err}." . format ( err = err ) )
sys . exit ( 1 ) |
def tmpdir ( prefix = 'npythy_tempdir_' , delete = True ) :
'''tmpdir ( ) creates a temporary directory and yields its path . At python exit , the directory and
all of its contents are recursively deleted ( so long as the the normal python exit process is
allowed to call the atexit handlers ) .
tmpdir ( prefix ) uses the given prefix in the tempfile . mkdtemp ( ) call .
The option delete may be set to False to specify that the tempdir should not be deleted on exit .''' | path = tempfile . mkdtemp ( prefix = prefix )
if not os . path . isdir ( path ) :
raise ValueError ( 'Could not find or create temp directory' )
if delete :
atexit . register ( shutil . rmtree , path )
return path |
def fit_arrays ( uv , xy ) :
"""Performs a generalized fit between matched lists of positions
given by the 2 column arrays xy and uv .
This function fits for translation , rotation , and scale changes
between ' xy ' and ' uv ' , allowing for different scales and
orientations for X and Y axes .
DEVELOPMENT NOTE :
Checks need to be put in place to verify that
enough objects are available for a fit .
Output :
( Xo , Yo ) , Rot , ( Scale , Sx , Sy )
where
Xo , Yo : offset ,
Rot : rotation ,
Scale : average scale change , and
Sx , Sy : scale changes in X and Y separately .
Algorithm and nomenclature provided by : Colin Cox ( 11 Nov 2004)""" | if not isinstance ( xy , np . ndarray ) : # cast input list as numpy ndarray for fitting
xy = np . array ( xy )
if not isinstance ( uv , np . ndarray ) : # cast input list as numpy ndarray for fitting
uv = np . array ( uv )
# Set up products used for computing the fit
Sx = xy [ : , 0 ] . sum ( )
Sy = xy [ : , 1 ] . sum ( )
Su = uv [ : , 0 ] . sum ( )
Sv = uv [ : , 1 ] . sum ( )
Sux = np . dot ( uv [ : , 0 ] , xy [ : , 0 ] )
Svx = np . dot ( uv [ : , 1 ] , xy [ : , 0 ] )
Suy = np . dot ( uv [ : , 0 ] , xy [ : , 1 ] )
Svy = np . dot ( uv [ : , 1 ] , xy [ : , 1 ] )
Sxx = np . dot ( xy [ : , 0 ] , xy [ : , 0 ] )
Syy = np . dot ( xy [ : , 1 ] , xy [ : , 1 ] )
Sxy = np . dot ( xy [ : , 0 ] , xy [ : , 1 ] )
n = len ( xy [ : , 0 ] )
M = np . array ( [ [ Sx , Sy , n ] , [ Sxx , Sxy , Sx ] , [ Sxy , Syy , Sy ] ] )
U = np . array ( [ Su , Sux , Suy ] )
V = np . array ( [ Sv , Svx , Svy ] )
# The fit solutioN . . .
# where
# u = P0 + P1 * x + P2 * y
# v = Q0 + Q1 * x + Q2 * y
try :
invM = np . linalg . inv ( M . astype ( np . float64 ) )
except np . linalg . LinAlgError :
raise SingularMatrixError ( "Singular matrix: suspected colinear points." )
P = np . dot ( invM , U ) . astype ( np . float64 )
Q = np . dot ( invM , V ) . astype ( np . float64 )
if not ( np . all ( np . isfinite ( P ) ) and np . all ( np . isfinite ( Q ) ) ) :
raise ArithmeticError ( 'Singular matrix.' )
# Return the shift , rotation , and scale changes
return build_fit ( P , Q , 'general' ) |
def init_signals ( self ) :
"""Connect signals""" | self . widget . activated . connect ( self . on_item_activated )
self . widget . clicked . connect ( self . on_item_clicked )
self . widget . doubleClicked . connect ( self . on_item_double_clicked )
self . widget . entered . connect ( self . on_item_entered )
self . widget . pressed . connect ( self . on_item_pressed )
self . widget . customContextMenuRequested . connect ( self . on_custom_context_menu_requested )
self . selection_model = self . widget . selectionModel ( )
self . selection_model . selectionChanged . connect ( self . on_selection_changed )
self . widget . horizontalScrollBar ( ) . valueChanged . connect ( self . on_horizontal_scrollbar_moved )
self . widget . verticalScrollBar ( ) . valueChanged . connect ( self . on_vertical_scrollbar_moved ) |
def norm_nuclear ( X ) :
r"""Compute the nuclear norm
. . math : :
\ | X \ | _ * = \ sum _ i \ sigma _ i
where : math : ` \ sigma _ i ` are the singular values of matrix : math : ` X ` .
Parameters
X : array _ like
Input array : math : ` X `
Returns
nncl : float
Nuclear norm of ` X `""" | return np . sum ( np . linalg . svd ( sl . promote16 ( X ) , compute_uv = False ) ) |
def previous ( self ) :
"""Return a copy of this object as was at its previous state in
history .
Returns None if this object is new ( and therefore has no history ) .
The returned object is always " disconnected " , i . e . does not receive
live updates .""" | return self . model . state . get_entity ( self . entity_type , self . entity_id , self . _history_index - 1 , connected = False ) |
def create_page_blob_service ( self ) :
'''Creates a PageBlobService object with the settings specified in the
CloudStorageAccount .
: return : A service object .
: rtype : : class : ` ~ azure . storage . blob . pageblobservice . PageBlobService `''' | try :
from azure . storage . blob . pageblobservice import PageBlobService
return PageBlobService ( self . account_name , self . account_key , sas_token = self . sas_token , is_emulated = self . is_emulated )
except ImportError :
raise Exception ( 'The package azure-storage-blob is required. ' + 'Please install it using "pip install azure-storage-blob"' ) |
def values ( self ) :
"""list of _ ColumnPairwiseSignificance tests .
Result has as many elements as there are coliumns in the slice . Each
significance test contains ` p _ vals ` and ` t _ stats ` significance tests .""" | # TODO : Figure out how to intersperse pairwise objects for columns
# that represent H & S
return [ _ColumnPairwiseSignificance ( self . _slice , col_idx , self . _axis , self . _weighted , self . _alpha , self . _only_larger , self . _hs_dims , ) for col_idx in range ( self . _slice . get_shape ( hs_dims = self . _hs_dims ) [ 1 ] ) ] |
def _construct_email ( self , email , ** extra ) :
"""Converts incoming data to properly structured dictionary .""" | if isinstance ( email , dict ) :
email = Email ( manager = self . _manager , ** email )
elif isinstance ( email , ( MIMEText , MIMEMultipart ) ) :
email = Email . from_mime ( email , self . _manager )
elif not isinstance ( email , Email ) :
raise ValueError
email . _update ( extra )
return email . as_dict ( ) |
def p_function_declaration_statement ( p ) :
'function _ declaration _ statement : FUNCTION is _ reference STRING LPAREN parameter _ list RPAREN LBRACE inner _ statement _ list RBRACE' | p [ 0 ] = ast . Function ( p [ 3 ] , p [ 5 ] , p [ 8 ] , p [ 2 ] , lineno = p . lineno ( 1 ) ) |
def plot_phase_plane ( self , indices = None , ** kwargs ) :
"""Plots a phase portrait from last integration .
This method will be deprecated . Please use : meth : ` Result . plot _ phase _ plane ` .
See : func : ` pyodesys . plotting . plot _ phase _ plane `""" | return self . _plot ( plot_phase_plane , indices = indices , ** kwargs ) |
def GetString ( self ) :
"""Retrieves a string representation of the report .
Returns :
str : string representation of the report .""" | string_list = [ ]
string_list . append ( 'Report generated from: {0:s}' . format ( self . plugin_name ) )
time_compiled = getattr ( self , 'time_compiled' , 0 )
if time_compiled :
time_compiled = timelib . Timestamp . CopyToIsoFormat ( time_compiled )
string_list . append ( 'Generated on: {0:s}' . format ( time_compiled ) )
filter_string = getattr ( self , 'filter_string' , '' )
if filter_string :
string_list . append ( 'Filter String: {0:s}' . format ( filter_string ) )
string_list . append ( '' )
string_list . append ( 'Report text:' )
string_list . append ( self . text )
return '\n' . join ( string_list ) |
def crossplat_loop_run ( coro ) -> Any :
"""Cross - platform method for running a subprocess - spawning coroutine .""" | if sys . platform == 'win32' :
signal . signal ( signal . SIGINT , signal . SIG_DFL )
loop = asyncio . ProactorEventLoop ( )
else :
loop = asyncio . new_event_loop ( )
asyncio . set_event_loop ( loop )
with contextlib . closing ( loop ) :
return loop . run_until_complete ( coro ) |
def evict ( self , key ) :
"""Evicts the specified key from this map .
* * Warning : This method uses _ _ hash _ _ and _ _ eq _ _ methods of binary form of the key , not the actual implementations
of _ _ hash _ _ and _ _ eq _ _ defined in key ' s class . * *
: param key : ( object ) , key to evict .
: return : ( bool ) , ` ` true ` ` if the key is evicted , ` ` false ` ` otherwise .""" | check_not_none ( key , "key can't be None" )
key_data = self . _to_data ( key )
return self . _evict_internal ( key_data ) |
def is_2d_regular_grid ( nc , variable ) :
'''Returns True if the variable is a 2D Regular grid .
: param netCDF4 . Dataset nc : An open netCDF dataset
: param str variable : name of the variable to check''' | # x ( x ) , y ( y ) , t ( t )
# X ( t , y , x )
dims = nc . variables [ variable ] . dimensions
cmatrix = coordinate_dimension_matrix ( nc )
for req in ( 'x' , 'y' , 't' ) :
if req not in cmatrix :
return False
x = get_lon_variable ( nc )
y = get_lat_variable ( nc )
t = get_time_variable ( nc )
if cmatrix [ 'x' ] != ( x , ) :
return False
if cmatrix [ 'y' ] != ( y , ) :
return False
if cmatrix [ 't' ] != ( t , ) :
return False
# Relaxed dimension ordering
if len ( dims ) == 3 and x in dims and y in dims and t in dims :
return True
return False |
def addConstraint ( self , login , tableName , constraintClassName ) :
"""Parameters :
- login
- tableName
- constraintClassName""" | self . send_addConstraint ( login , tableName , constraintClassName )
return self . recv_addConstraint ( ) |
def remove_child_bank ( self , bank_id , child_id ) :
"""Removes a child from a bank .
arg : bank _ id ( osid . id . Id ) : the ` ` Id ` ` of a bank
arg : child _ id ( osid . id . Id ) : the ` ` Id ` ` of the new child
raise : NotFound - ` ` bank _ id ` ` not parent of ` ` child _ id ` `
raise : NullArgument - ` ` bank _ id ` ` or ` ` child _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure occurred
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . BinHierarchyDesignSession . remove _ child _ bin _ template
if self . _catalog_session is not None :
return self . _catalog_session . remove_child_catalog ( catalog_id = bank_id , child_id = child_id )
return self . _hierarchy_session . remove_child ( id_ = bank_id , child_id = child_id ) |
def _GetStatus ( self ) :
"""Retrieves status information .
Returns :
dict [ str , object ] : status attributes , indexed by name .""" | if self . _analysis_mediator :
number_of_produced_event_tags = ( self . _analysis_mediator . number_of_produced_event_tags )
number_of_produced_reports = ( self . _analysis_mediator . number_of_produced_analysis_reports )
else :
number_of_produced_event_tags = None
number_of_produced_reports = None
if self . _process_information :
used_memory = self . _process_information . GetUsedMemory ( ) or 0
else :
used_memory = 0
if self . _memory_profiler :
self . _memory_profiler . Sample ( 'main' , used_memory )
status = { 'display_name' : '' , 'identifier' : self . _name , 'number_of_consumed_event_tags' : None , 'number_of_consumed_events' : self . _number_of_consumed_events , 'number_of_consumed_reports' : None , 'number_of_consumed_sources' : None , 'number_of_consumed_warnings' : None , 'number_of_produced_event_tags' : number_of_produced_event_tags , 'number_of_produced_events' : None , 'number_of_produced_reports' : number_of_produced_reports , 'number_of_produced_sources' : None , 'number_of_produced_warnings' : None , 'processing_status' : self . _status , 'task_identifier' : None , 'used_memory' : used_memory }
if self . _status in ( definitions . STATUS_INDICATOR_ABORTED , definitions . STATUS_INDICATOR_COMPLETED ) :
self . _foreman_status_wait_event . set ( )
return status |
def all_instruments ( type = None , date = None ) :
"""获取某个国家市场的所有合约信息 。 使用者可以通过这一方法很快地对合约信息有一个快速了解 , 目前仅支持中国市场 。
: param str type : 需要查询合约类型 , 例如 : type = ' CS ' 代表股票 。 默认是所有类型
: param date : 查询时间点
: type date : ` str ` | ` datetime ` | ` date `
: return : ` pandas DataFrame ` 所有合约的基本信息 。
其中type参数传入的合约类型和对应的解释如下 :
合约类型 说明
CS Common Stock , 即股票
ETF Exchange Traded Fund , 即交易所交易基金
LOF Listed Open - Ended Fund , 即上市型开放式基金
FenjiMu Fenji Mu Fund , 即分级母基金
FenjiA Fenji A Fund , 即分级A类基金
FenjiB Fenji B Funds , 即分级B类基金
INDX Index , 即指数
Future Futures , 即期货 , 包含股指 、 国债和商品期货
: example :
获取中国市场所有分级基金的基础信息 :
. . code - block : : python3
: linenos :
[ In ] all _ instruments ( ' FenjiA ' )
[ Out ]
abbrev _ symbol order _ book _ id product sector _ code symbol
0 CYGA 150303 . XSHE null null 华安创业板50A
1 JY500A 150088 . XSHE null null 金鹰500A
2 TD500A 150053 . XSHE null null 泰达稳健
3 HS500A 150110 . XSHE null null 华商500A
4 QSAJ 150235 . XSHE null null 鹏华证券A""" | env = Environment . get_instance ( )
if date is None :
dt = env . trading_dt
else :
dt = pd . Timestamp ( date ) . to_pydatetime ( )
dt = min ( dt , env . trading_dt )
if type is not None :
if isinstance ( type , six . string_types ) :
type = [ type ]
types = set ( )
for t in type :
if t == "Stock" :
types . add ( "CS" )
elif t == "Fund" :
types . update ( [ "ETF" , "LOF" , "SF" , "FenjiA" , "FenjiB" , "FenjiMu" ] )
else :
types . add ( t )
else :
types = None
result = env . data_proxy . all_instruments ( types , dt )
if types is not None and len ( types ) == 1 :
return pd . DataFrame ( [ i . __dict__ for i in result ] )
return pd . DataFrame ( [ [ i . order_book_id , i . symbol , i . type , i . listed_date , i . de_listed_date ] for i in result ] , columns = [ "order_book_id" , "symbol" , "type" , "listed_date" , "de_listed_date" ] , ) |
def custom_repository ( self ) :
"""Return dictionary with repo name and url ( used external )""" | custom_dict_repo = { }
for line in self . custom_repositories_list . splitlines ( ) :
line = line . lstrip ( )
if not line . startswith ( "#" ) :
custom_dict_repo [ line . split ( ) [ 0 ] ] = line . split ( ) [ 1 ]
return custom_dict_repo |
def contains_interval ( self , other ) :
"""Whether other is contained in this Interval .
: param other : Interval
: return : True or False
: rtype : bool""" | return ( self . begin <= other . begin and self . end >= other . end ) |
def create ( ** data ) :
"""Create a Payment request .
: param data : data required to create the payment
: return : The payment resource
: rtype resources . Payment""" | http_client = HttpClient ( )
response , _ = http_client . post ( routes . url ( routes . PAYMENT_RESOURCE ) , data )
return resources . Payment ( ** response ) |
def calc_lfp_layer ( self ) :
"""Calculate the LFP from concatenated subpopulations residing in a
certain layer , e . g all L4E pops are summed , according to the ` mapping _ Yy `
attribute of the ` hybridLFPy . Population ` objects .""" | LFPdict = { }
lastY = None
for Y , y in self . mapping_Yy :
if lastY != Y :
try :
LFPdict . update ( { Y : self . LFPdict [ y ] } )
except KeyError :
pass
else :
try :
LFPdict [ Y ] += self . LFPdict [ y ]
except KeyError :
pass
lastY = Y
return LFPdict |
def file_sha1 ( path ) :
"""Compute SHA1 hash of a file .""" | sha1 = hashlib . sha1 ( )
with open ( path , "rb" ) as f :
while True :
block = f . read ( 2 ** 10 )
if not block :
break
sha1 . update ( block )
return sha1 . hexdigest ( ) |
def _joint_likelihood ( self , logits : torch . Tensor , tags : torch . Tensor , mask : torch . LongTensor ) -> torch . Tensor :
"""Computes the numerator term for the log - likelihood , which is just score ( inputs , tags )""" | batch_size , sequence_length , _ = logits . data . shape
# Transpose batch size and sequence dimensions :
logits = logits . transpose ( 0 , 1 ) . contiguous ( )
mask = mask . float ( ) . transpose ( 0 , 1 ) . contiguous ( )
tags = tags . transpose ( 0 , 1 ) . contiguous ( )
# Start with the transition scores from start _ tag to the first tag in each input
if self . include_start_end_transitions :
score = self . start_transitions . index_select ( 0 , tags [ 0 ] )
else :
score = 0.0
# Add up the scores for the observed transitions and all the inputs but the last
for i in range ( sequence_length - 1 ) : # Each is shape ( batch _ size , )
current_tag , next_tag = tags [ i ] , tags [ i + 1 ]
# The scores for transitioning from current _ tag to next _ tag
transition_score = self . transitions [ current_tag . view ( - 1 ) , next_tag . view ( - 1 ) ]
# The score for using current _ tag
emit_score = logits [ i ] . gather ( 1 , current_tag . view ( batch_size , 1 ) ) . squeeze ( 1 )
# Include transition score if next element is unmasked ,
# input _ score if this element is unmasked .
score = score + transition_score * mask [ i + 1 ] + emit_score * mask [ i ]
# Transition from last state to " stop " state . To start with , we need to find the last tag
# for each instance .
last_tag_index = mask . sum ( 0 ) . long ( ) - 1
last_tags = tags . gather ( 0 , last_tag_index . view ( 1 , batch_size ) ) . squeeze ( 0 )
# Compute score of transitioning to ` stop _ tag ` from each " last tag " .
if self . include_start_end_transitions :
last_transition_score = self . end_transitions . index_select ( 0 , last_tags )
else :
last_transition_score = 0.0
# Add the last input if it ' s not masked .
last_inputs = logits [ - 1 ]
# ( batch _ size , num _ tags )
last_input_score = last_inputs . gather ( 1 , last_tags . view ( - 1 , 1 ) )
# ( batch _ size , 1)
last_input_score = last_input_score . squeeze ( )
# ( batch _ size , )
score = score + last_transition_score + last_input_score * mask [ - 1 ]
return score |
def sample_wr ( lst ) :
"""Sample from lst , with replacement""" | arr = np . array ( lst )
indices = np . random . randint ( len ( lst ) , size = len ( lst ) )
sample = np . empty ( arr . shape , dtype = arr . dtype )
for i , ix in enumerate ( indices ) :
sample [ i ] = arr [ ix ]
return list ( sample ) |
def parse_ini_file ( self , path ) :
"""Parse ini file at ` ` path ` ` and return dict .""" | cfgobj = ConfigObj ( path , list_values = False )
def extract_section ( namespace , d ) :
cfg = { }
for key , val in d . items ( ) :
if isinstance ( d [ key ] , dict ) :
cfg . update ( extract_section ( namespace + [ key ] , d [ key ] ) )
else :
cfg [ '_' . join ( namespace + [ key ] ) . upper ( ) ] = val
return cfg
return extract_section ( [ ] , cfgobj . dict ( ) ) |
async def tcp_echo_client ( message , loop , host , port ) :
"""Generic python tcp echo client""" | print ( "Connecting to server at %s:%d" % ( host , port ) )
reader , writer = await asyncio . open_connection ( host , port , loop = loop )
writer . write ( message . encode ( ) )
print ( 'Sent: %r' % message )
data = await reader . read ( 100 )
print ( 'Received: %r' % data . decode ( ) )
writer . close ( ) |
def split_obj ( obj , prefix = None ) :
'''Split the object , returning a 3 - tuple with the flat object , optionally
followed by the key for the subobjects and a list of those subobjects .''' | # copy the object , optionally add the prefix before each key
new = obj . copy ( ) if prefix is None else { '{}_{}' . format ( prefix , k ) : v for k , v in obj . items ( ) }
# try to find the key holding the subobject or a list of subobjects
for k , v in new . items ( ) : # list of subobjects
if isinstance ( v , list ) : # this is a list of strings or ints , which is a one - to - many
# can ' t deal with that here , but leave the data in case it ' s
# useful downstream
if not isinstance ( v [ 0 ] , dict ) :
new [ k ] = ',' . join ( v )
return new , None , None
del new [ k ]
return new , k , v
# or just one subobject
elif isinstance ( v , dict ) :
del new [ k ]
return new , k , [ v ]
return new , None , None |
def extractVersion ( string , default = '?' ) :
"""Extracts a three digit standard format version number""" | return extract ( VERSION_PATTERN , string , condense = True , default = default , one = True ) |
def _update_asset_content_filename_on_disk_to_match_id ( self , ac ) :
"""Because we want the asset content filename to match the ac . ident ,
here we manipulate the saved file on disk after creating the
asset content""" | def has_secondary_storage ( ) :
return 'secondary_data_store_path' in self . _config_map
datastore_path = ''
secondary_data_store_path = ''
if 'data_store_full_path' in self . _config_map :
datastore_path = self . _config_map [ 'data_store_full_path' ]
if has_secondary_storage ( ) :
secondary_data_store_path = self . _config_map [ 'secondary_data_store_path' ]
relative_path = self . _config_map [ 'data_store_path' ]
filepath = os . path . join ( datastore_path , ac . _my_map [ 'url' ] )
old_filename = os . path . splitext ( os . path . basename ( filepath ) ) [ 0 ]
new_path = filepath . replace ( old_filename , ac . ident . identifier )
os . rename ( filepath , new_path )
# Should also rename the file stored in the secondary path
if has_secondary_storage ( ) :
old_path = '{0}/repository/AssetContent' . format ( relative_path )
filepath = os . path . join ( datastore_path , ac . _my_map [ 'url' ] ) . replace ( old_path , secondary_data_store_path )
old_filename = os . path . splitext ( os . path . basename ( filepath ) ) [ 0 ]
new_path = filepath . replace ( old_filename , ac . ident . identifier )
os . rename ( filepath , new_path ) |
def print_table ( column_names : IterableOfStrings , rows : IterableOfTuples , column_alignments : Optional [ IterableOfStrings ] = None , primary_column_idx : int = 0 , ) -> None :
"""Prints a table of information to the console . Automatically determines if the
console is wide enough , and if not , displays the information in list form .
: param column _ names : The heading labels
: param rows : A list of lists
: param column _ alignments : An optional list of strings , using either ' < ' or ' > '
to specify left or right alignment respectively
: param primary _ column _ idx : Used when displaying information in list form ,
to determine which label should be the top - most one . Defaults to the first
label in ` ` column _ names ` ` .""" | header_template = ''
row_template = ''
table_width = 0
type_formatters = { int : 'd' , float : 'f' , str : 's' }
types = [ type_formatters . get ( type ( x ) , 'r' ) for x in rows [ 0 ] ]
alignments = { int : '>' , float : '>' }
column_alignments = ( column_alignments or [ alignments . get ( type ( x ) , '<' ) for x in rows [ 0 ] ] )
def get_column_width ( idx ) :
header_length = len ( column_names [ idx ] )
content_length = max ( len ( str ( row [ idx ] ) ) for row in rows )
return ( content_length if content_length > header_length else header_length )
for i in range ( 0 , len ( column_names ) ) :
col_width = get_column_width ( i )
header_col_template = f'{{:{col_width}}}'
col_template = f'{{:{column_alignments[i]}{col_width}{types[i]}}}'
if i == 0 :
header_template += header_col_template
row_template += col_template
table_width += col_width
else :
header_template += ' ' + header_col_template
row_template += ' ' + col_template
table_width += 2 + col_width
# check if we can format the table horizontally
if table_width < get_terminal_width ( ) :
click . echo ( header_template . format ( * column_names ) )
click . echo ( '-' * table_width )
for row in rows :
try :
click . echo ( row_template . format ( * row ) )
except TypeError as e :
raise TypeError ( f'{e}: {row!r}' )
# otherwise format it vertically
else :
max_label_width = max ( * [ len ( label ) for label in column_names ] )
non_primary_columns = [ ( i , col ) for i , col in enumerate ( column_names ) if i != primary_column_idx ]
for row in rows :
type_ = types [ primary_column_idx ]
row_template = f'{{:>{max_label_width}s}}: {{:{type_}}}'
click . echo ( row_template . format ( column_names [ primary_column_idx ] , row [ primary_column_idx ] ) )
for i , label in non_primary_columns :
row_template = f'{{:>{max_label_width}s}}: {{:{types[i]}}}'
click . echo ( row_template . format ( label , row [ i ] ) )
click . echo ( ) |
def register ( self , new_outputs , * args , ** kwargs ) :
"""Register outputs and metadata .
* ` ` initial _ value ` ` - used in dynamic calculations
* ` ` size ` ` - number of elements per timestep
* ` ` uncertainty ` ` - in percent of nominal value
* ` ` variance ` ` - dictionary of covariances , diagonal is square of
uncertianties , no units
* ` ` jacobian ` ` - dictionary of sensitivities dxi / dfj
* ` ` isconstant ` ` - ` ` True ` ` if constant , ` ` False ` ` if periodic
* ` ` isproperty ` ` - ` ` True ` ` if output stays at last value during
thresholds , ` ` False ` ` if reverts to initial value
* ` ` timeseries ` ` - name of corresponding time series output , ` ` None ` ` if
no time series
* ` ` output _ source ` ` - name
: param new _ outputs : new outputs to register .""" | kwargs . update ( zip ( self . meta_names , args ) )
# call super method
super ( OutputRegistry , self ) . register ( new_outputs , ** kwargs ) |
def update ( self , ipv6s ) :
"""Method to update ipv6 ' s
: param ipv6s : List containing ipv6 ' s desired to updated
: return : None""" | data = { 'ips' : ipv6s }
ipv6s_ids = [ str ( ipv6 . get ( 'id' ) ) for ipv6 in ipv6s ]
return super ( ApiIPv6 , self ) . put ( 'api/v3/ipv6/%s/' % ';' . join ( ipv6s_ids ) , data ) |
def schema ( ) :
"""All tables , columns , steps , injectables and broadcasts registered with
Orca . Includes local columns on tables .""" | tables = orca . list_tables ( )
cols = { t : orca . get_table ( t ) . columns for t in tables }
steps = orca . list_steps ( )
injectables = orca . list_injectables ( )
broadcasts = orca . list_broadcasts ( )
return jsonify ( tables = tables , columns = cols , steps = steps , injectables = injectables , broadcasts = broadcasts ) |
def get ( self , remote , local = None , preserve_mode = True ) :
"""Download a file from the current connection to the local filesystem .
: param str remote :
Remote file to download .
May be absolute , or relative to the remote working directory .
. . note : :
Most SFTP servers set the remote working directory to the
connecting user ' s home directory , and ( unlike most shells ) do
* not * expand tildes ( ` ` ~ ` ` ) .
For example , instead of saying ` ` get ( " ~ / tmp / archive . tgz " ) ` ` ,
say ` ` get ( " tmp / archive . tgz " ) ` ` .
: param local :
Local path to store downloaded file in , or a file - like object .
* * If None or another ' falsey ' / empty value is given * * ( the default ) ,
the remote file is downloaded to the current working directory ( as
seen by ` os . getcwd ` ) using its remote filename .
* * If a string is given * * , it should be a path to a local directory
or file and is subject to similar behavior as that seen by common
Unix utilities or OpenSSH ' s ` ` sftp ` ` or ` ` scp ` ` tools .
For example , if the local path is a directory , the remote path ' s
base filename will be added onto it ( so ` ` get ( ' foo / bar / file . txt ' ,
' / tmp / ' ) ` ` would result in creation or overwriting of
` ` / tmp / file . txt ` ` ) .
. . note : :
When dealing with nonexistent file paths , normal Python file
handling concerns come into play - for example , a ` ` local ` `
path containing non - leaf directories which do not exist , will
typically result in an ` OSError ` .
* * If a file - like object is given * * , the contents of the remote file
are simply written into it .
: param bool preserve _ mode :
Whether to ` os . chmod ` the local file so it matches the remote
file ' s mode ( default : ` ` True ` ` ) .
: returns : A ` . Result ` object .
. . versionadded : : 2.0""" | # TODO : how does this API change if we want to implement
# remote - to - remote file transfer ? ( Is that even realistic ? )
# TODO : handle v1 ' s string interpolation bits , especially the default
# one , or at least think about how that would work re : split between
# single and multiple server targets .
# TODO : callback support
# TODO : how best to allow changing the behavior / semantics of
# remote / local ( e . g . users might want ' safer ' behavior that complains
# instead of overwriting existing files ) - this likely ties into the
# " how to handle recursive / rsync " and " how to handle scp " questions
# Massage remote path
if not remote :
raise ValueError ( "Remote path must not be empty!" )
orig_remote = remote
remote = posixpath . join ( self . sftp . getcwd ( ) or self . sftp . normalize ( "." ) , remote )
# Massage local path :
# - handle file - ness
# - if path , fill with remote name if empty , & make absolute
orig_local = local
is_file_like = hasattr ( local , "write" ) and callable ( local . write )
if not local :
local = posixpath . basename ( remote )
if not is_file_like :
local = os . path . abspath ( local )
# Run Paramiko - level . get ( ) ( side - effects only . womp . )
# TODO : push some of the path handling into Paramiko ; it should be
# responsible for dealing with path cleaning etc .
# TODO : probably preserve warning message from v1 when overwriting
# existing files . Use logging for that obviously .
# If local appears to be a file - like object , use sftp . getfo , not get
if is_file_like :
self . sftp . getfo ( remotepath = remote , fl = local )
else :
self . sftp . get ( remotepath = remote , localpath = local )
# Set mode to same as remote end
# TODO : Push this down into SFTPClient sometime ( requires backwards
# incompat release . )
if preserve_mode :
remote_mode = self . sftp . stat ( remote ) . st_mode
mode = stat . S_IMODE ( remote_mode )
os . chmod ( local , mode )
# Return something useful
return Result ( orig_remote = orig_remote , remote = remote , orig_local = orig_local , local = local , connection = self . connection , ) |
def save_process ( MAVExpLastGraph , child_pipe_console_input , child_pipe_graph_input , statusMsgs ) :
'''process for saving a graph''' | from MAVProxy . modules . lib import wx_processguard
from MAVProxy . modules . lib . wx_loader import wx
from MAVProxy . modules . lib . wxgrapheditor import GraphDialog
# This pipe is used to send text to the console
global pipe_console_input
pipe_console_input = child_pipe_console_input
# This pipe is used to send graph commands
global pipe_graph_input
pipe_graph_input = child_pipe_graph_input
# The valid expression messages , required to
# validate the expression in the dialog box
global msgs
msgs = statusMsgs
app = wx . App ( False )
if MAVExpLastGraph . description is None :
MAVExpLastGraph . description = ''
frame = GraphDialog ( 'Graph Editor' , MAVExpLastGraph , save_callback )
frame . ShowModal ( )
frame . Destroy ( ) |
def prepare_create_transaction ( * , signers , recipients = None , asset = None , metadata = None ) :
"""Prepares a ` ` " CREATE " ` ` transaction payload , ready to be
fulfilled .
Args :
signers ( : obj : ` list ` | : obj : ` tuple ` | : obj : ` str ` ) : One
or more public keys representing the issuer ( s ) of the asset
being created .
recipients ( : obj : ` list ` | : obj : ` tuple ` | : obj : ` str ` , optional ) :
One or more public keys representing the new recipients ( s )
of the asset being created . Defaults to ` ` None ` ` .
asset ( : obj : ` dict ` , optional ) : The asset to be created .
Defaults to ` ` None ` ` .
metadata ( : obj : ` dict ` , optional ) : Metadata associated with the
transaction . Defaults to ` ` None ` ` .
Returns :
dict : The prepared ` ` " CREATE " ` ` transaction .
. . important : :
* If ` ` asset ` ` is set , it MUST be in the form of : :
' data ' : {
* If ` ` recipients ` ` is not given , or evaluates to
` ` False ` ` , it will be set equal to ` ` signers ` ` : :
if not recipients :
recipients = signers""" | if not isinstance ( signers , ( list , tuple ) ) :
signers = [ signers ]
# NOTE : Needed for the time being . See
# https : / / github . com / bigchaindb / bigchaindb / issues / 797
elif isinstance ( signers , tuple ) :
signers = list ( signers )
if not recipients :
recipients = [ ( signers , 1 ) ]
elif not isinstance ( recipients , ( list , tuple ) ) :
recipients = [ ( [ recipients ] , 1 ) ]
# NOTE : Needed for the time being . See
# https : / / github . com / bigchaindb / bigchaindb / issues / 797
elif isinstance ( recipients , tuple ) :
recipients = [ ( list ( recipients ) , 1 ) ]
transaction = Transaction . create ( signers , recipients , metadata = metadata , asset = asset [ 'data' ] if asset else None , )
return transaction . to_dict ( ) |
def modify_storage ( self , storage , size , title , backup_rule = { } ) :
"""Modify a Storage object . Returns an object based on the API ' s response .""" | res = self . _modify_storage ( str ( storage ) , size , title , backup_rule )
return Storage ( cloud_manager = self , ** res [ 'storage' ] ) |
def create_deep_linking_urls ( self , url_params ) :
"""Bulk Creates Deep Linking URLs
See the URL https : / / dev . branch . io / references / http _ api / # bulk - creating - deep - linking - urls
: param url _ params : Array of values returned from " create _ deep _ link _ url ( . . . , skip _ api _ call = True ) "
: return : The response""" | url = "/v1/url/bulk/%s" % self . branch_key
method = "POST"
# Checks params
self . _check_param ( value = url_params , type = list , sub_type = dict , optional = False )
return self . make_api_call ( method , url , json_params = url_params ) |
def write_to_logfile ( out , err , logfile , samplelog = None , sampleerr = None , analysislog = None , analysiserr = None ) :
"""Writes out and err ( both should be strings ) to logfile .""" | # Run log
with open ( logfile + '_out.txt' , 'a+' ) as outfile :
outfile . write ( out + '\n' )
with open ( logfile + '_err.txt' , 'a+' ) as outfile :
outfile . write ( err + '\n' )
# Sample log
if samplelog :
with open ( samplelog , 'a+' ) as outfile :
outfile . write ( out + '\n' )
with open ( sampleerr , 'a+' ) as outfile :
outfile . write ( err + '\n' )
# Analysis log
if analysislog :
with open ( analysislog , 'a+' ) as outfile :
outfile . write ( out + '\n' )
with open ( analysiserr , 'a+' ) as outfile :
outfile . write ( err + '\n' ) |
def chaincode_query ( self , chaincode_name , type = CHAINCODE_LANG_GO , function = "query" , args = [ "a" ] , id = 1 , secure_context = None , confidentiality_level = CHAINCODE_CONFIDENTIAL_PUB , metadata = None ) :
"""" jsonrpc " : " 2.0 " ,
" method " : " query " ,
" params " : {
" type " : 1,
" chaincodeID " : {
" name " : " 52b0d803fc395b5e34d8d4a7cd69fb6aa00099b8fabed83504ac1c5d61a425aca5b3ad3bf96643ea4fdaac132c417c37b00f88fa800de7ece387d008a76d3586"
" ctorMsg " : {
" function " : " query " ,
" args " : [ " a " ]
" id " : 3
: return : json obj of the chaincode instance""" | return self . _exec_action ( method = "query" , type = type , chaincodeID = { "name" : chaincode_name } , function = function , args = args , id = id , secure_context = secure_context , confidentiality_level = confidentiality_level , metadata = metadata ) |
def check_in_lambda ( ) :
"""Return None if SDK is not loaded in AWS Lambda worker .
Otherwise drop a touch file and return a lambda context .""" | if not os . getenv ( LAMBDA_TASK_ROOT_KEY ) :
return None
try :
os . mkdir ( TOUCH_FILE_DIR )
except OSError :
log . debug ( 'directory %s already exists' , TOUCH_FILE_DIR )
try :
f = open ( TOUCH_FILE_PATH , 'w+' )
f . close ( )
# utime force second parameter in python2.7
os . utime ( TOUCH_FILE_PATH , None )
except ( IOError , OSError ) :
log . warning ( "Unable to write to %s. Failed to signal SDK initialization." % TOUCH_FILE_PATH )
return LambdaContext ( ) |
def reduced_chi_squareds ( self , p = None ) :
"""Returns the reduced chi squared for each massaged data set .
p = None means use the fit results .""" | if len ( self . _set_xdata ) == 0 or len ( self . _set_ydata ) == 0 :
return None
if p is None :
p = self . results [ 0 ]
r = self . studentized_residuals ( p )
# In case it ' s not possible to calculate
if r is None :
return
# calculate the number of points
N = 0
for i in range ( len ( r ) ) :
N += len ( r [ i ] )
# degrees of freedom
dof_per_point = self . degrees_of_freedom ( ) / N
for n in range ( len ( r ) ) :
r [ n ] = sum ( r [ n ] ** 2 ) / ( len ( r [ n ] ) * dof_per_point )
return r |
def sql_select ( self , fields , * args , ** kwargs ) :
"""Execute a simple SQL ` ` SELECT ` ` statement and returns values as new numpy rec array .
The arguments * fields * and the additional optional arguments
are simply concatenated with additional SQL statements
according to the template : :
SELECT < fields > FROM _ _ self _ _ [ args ]
The simplest fields argument is ` ` " * " ` ` .
Example :
Create a recarray in which students with average grade less than
3 are listed : :
result = T . SELECT ( " surname , subject , year , avg ( grade ) AS avg _ grade " ,
" WHERE avg _ grade < 3 " , " GROUP BY surname , subject " ,
" ORDER BY avg _ grade , surname " )
The resulting SQL would be : :
SELECT surname , subject , year , avg ( grade ) AS avg _ grade FROM _ _ self _ _
WHERE avg _ grade < 3
GROUP BY surname , subject
ORDER BY avg _ grade , surname
Note how one can use aggregate functions such avg ( ) .
The string * ' _ _ self _ _ ' * is automatically replaced with the table
name ( ` ` T . name ` ` ) ; this can be used for cartesian products such as : :
LEFT JOIN _ _ self _ _ WHERE . . .
. . Note : : See the documentation for : meth : ` ~ SQLarray . sql ` for more details on
the available keyword arguments and the use of ` ` ? ` ` parameter
interpolation .""" | SQL = "SELECT " + str ( fields ) + " FROM __self__ " + " " . join ( args )
return self . sql ( SQL , ** kwargs ) |
def getEdges ( self , fromVol ) :
"""Return the edges available from fromVol .""" | return [ self . toDict . diff ( d ) for d in self . butterStore . getEdges ( self . toObj . vol ( fromVol ) ) ] |
def build_columns ( self , X , verbose = False ) :
"""construct the model matrix columns for the term
Parameters
X : array - like
Input dataset with n rows
verbose : bool
whether to show warnings
Returns
scipy sparse array with n rows""" | X [ : , self . feature ] [ : , np . newaxis ]
splines = b_spline_basis ( X [ : , self . feature ] , edge_knots = self . edge_knots_ , spline_order = self . spline_order , n_splines = self . n_splines , sparse = True , periodic = self . basis in [ 'cp' ] , verbose = verbose )
if self . by is not None :
splines = splines . multiply ( X [ : , self . by ] [ : , np . newaxis ] )
return splines |
def convert_requirement ( req ) :
"""Converts a pkg _ resources . Requirement object into a list of Rez package
request strings .""" | pkg_name = convert_name ( req . project_name )
if not req . specs :
return [ pkg_name ]
req_strs = [ ]
for spec in req . specs :
op , ver = spec
ver = convert_version ( ver )
if op == "<" :
r = "%s-0+<%s" % ( pkg_name , ver )
req_strs . append ( r )
elif op == "<=" :
r = "%s-0+<%s|%s" % ( pkg_name , ver , ver )
req_strs . append ( r )
elif op == "==" :
r = "%s-%s" % ( pkg_name , ver )
req_strs . append ( r )
elif op == ">=" :
r = "%s-%s+" % ( pkg_name , ver )
req_strs . append ( r )
elif op == ">" :
r1 = "%s-%s+" % ( pkg_name , ver )
r2 = "!%s-%s" % ( pkg_name , ver )
req_strs . append ( r1 )
req_strs . append ( r2 )
elif op == "!=" :
r = "!%s-%s" % ( pkg_name , ver )
req_strs . append ( r )
else :
print >> sys . stderr , "Warning: Can't understand op '%s', just depending on unversioned package..." % op
req_strs . append ( pkg_name )
return req_strs |
def service_list ( ) :
'''List " services " on the REST server''' | r = salt . utils . http . query ( DETAILS [ 'url' ] + 'service/list' , decode_type = 'json' , decode = True )
return r [ 'dict' ] |
def chmod ( self , mode ) :
"""Set the mode . May be the new mode ( os . chmod behavior ) or a ` symbolic
mode < http : / / en . wikipedia . org / wiki / Chmod # Symbolic _ modes > ` _ .
. . seealso : : : func : ` os . chmod `""" | if isinstance ( mode , str ) :
mask = _multi_permission_mask ( mode )
mode = mask ( self . stat ( ) . st_mode )
os . chmod ( self , mode )
return self |
def supports_kwargs ( module_or_fn , kwargs_list ) :
"""Determines whether the provided callable supports all the kwargs .
This is useful when you have a module that might or might not support a
kwarg such as ` is _ training ` . Rather than calling the module and catching the
error , risking the potential modification of underlying state , this function
introspects the module to see what kwargs are actually supported , using
the python ` inspect ` module .
Note that many TF functions do not export a valid argspec object , rather they
have a generic * args , * * kwargs signature due to various layers of wrapping
( deprecation decorators , etc ) . In those circumstances we return
MAYBE _ SUPPORTED , and users will have to use another method to tell whether
the kwargs are supported ( e . g . by just calling the function ) .
Args :
module _ or _ fn : some callable , generally an object or a method of some object .
If an object is provided , we check wither ` module _ or _ fn . _ _ call _ _ ` supports
the provided kwargs , which for a Sonnet module will automatically check
the signature of _ build . If ` module _ or _ fn ` is a function / method , then
we check its signature directly , so non - Sonnet functions can be used .
kwargs _ list : string or iterable of strings of keyword arg names to test for .
If an empty iterable is provided this function will always return True .
Raises :
ValueError : if a non - string is provided in ` kwargs _ list ` .
Returns :
a string , one of ' supported ' , ' not _ supported ' or ' maybe _ supported ' .""" | if isinstance ( kwargs_list , six . string_types ) :
kwargs_list = [ kwargs_list ]
# If it ' s not a function or method , then assume it ' s a module , so introspect
# the _ _ call _ _ method . wrapt ensures that for Sonnet modules the _ build
# signature is available here .
if not ( inspect . isfunction ( module_or_fn ) or inspect . ismethod ( module_or_fn ) ) :
module_or_fn = module_or_fn . __call__
arg_spec = inspect . getargspec ( module_or_fn )
# If there is a keywords element , then an arbitrary kwargs will work , as far
# as we can tell from here .
takes_arbitrary_kwargs = ( arg_spec . keywords is not None )
for kwarg in kwargs_list :
if not isinstance ( kwarg , six . string_types ) :
raise ValueError ( "kwargs should be strings, instead got {}" . format ( kwarg ) )
if kwarg not in arg_spec . args :
if not takes_arbitrary_kwargs : # The function doesn ' t take * * kwargs , and this name is not in the
# regular args , so it would definitely cause an error to call this .
return NOT_SUPPORTED
else : # The function may accept the kwarg , but we can ' t say for sure . Even
# though this is only one kwarg , we can ' t be certain about the whole
# lot , so the combined answer is now " maybe " .
return MAYBE_SUPPORTED
# All the kwargs must actually be present in the specific args list
return SUPPORTED |
def windowed_run_count ( da , window , dim = 'time' ) :
"""Return the number of consecutive true values in array for runs at least as long as given duration .
Parameters
da : N - dimensional Xarray data array ( boolean )
Input data array
window : int
Minimum run length .
dim : Xarray dimension ( default = ' time ' )
Dimension along which to calculate consecutive run
Returns
out : N - dimensional xarray data array ( int )
Total number of true values part of a consecutive runs of at least ` window ` long .""" | d = rle ( da , dim = dim )
out = d . where ( d >= window , 0 ) . sum ( dim = dim )
return out |
def update_meta_data_for_port ( graphical_editor_view , item , handle ) :
"""This method updates the meta data of the states ports if they changed .
: param graphical _ editor _ view : Graphical Editor the change occurred in
: param item : State the port was moved in
: param handle : Handle of moved port or None if all ports are to be updated""" | from rafcon . gui . mygaphas . items . ports import IncomeView , OutcomeView , InputPortView , OutputPortView , ScopedVariablePortView
for port in item . get_all_ports ( ) :
if not handle or handle is port . handle :
rel_pos = ( port . handle . pos . x . value , port . handle . pos . y . value )
if isinstance ( port , ( IncomeView , OutcomeView , InputPortView , OutputPortView , ScopedVariablePortView ) ) :
port_m = port . model
cur_rel_pos = port_m . get_meta_data_editor ( ) [ 'rel_pos' ]
if rel_pos != cur_rel_pos :
port_m . set_meta_data_editor ( 'rel_pos' , rel_pos )
if handle :
graphical_editor_view . emit ( 'meta_data_changed' , port_m , "position" , True )
else :
continue
if handle : # If we were supposed to update the meta data of a specific port , we can stop here
break |
async def _load_container ( self , reader , container_type , params = None , container = None ) :
"""Loads container of elements from the reader . Supports the container ref .
Returns loaded container .
: param reader :
: param container _ type :
: param params :
: param container :
: return :""" | c_len = ( container_type . SIZE if container_type . FIX_SIZE else await load_uvarint ( reader ) )
if container and get_elem ( container ) and c_len != len ( container ) :
raise ValueError ( "Size mismatch" )
elem_type = container_elem_type ( container_type , params )
res = container if container else [ ]
for i in range ( c_len ) :
try :
self . tracker . push_index ( i )
fvalue = await self . load_field ( reader , elem_type , params [ 1 : ] if params else None , eref ( res , i ) if container else None , )
self . tracker . pop ( )
except Exception as e :
raise helpers . ArchiveException ( e , tracker = self . tracker ) from e
if not container :
res . append ( fvalue )
return res |
def choices ( cls , blank = False ) :
"""Choices for Enum
: return : List of tuples ( < value > , < human - readable value > )
: rtype : list""" | choices = sorted ( [ ( key , value ) for key , value in cls . values . items ( ) ] , key = lambda x : x [ 0 ] )
if blank :
choices . insert ( 0 , ( '' , Enum . Value ( '' , None , '' , cls ) ) )
return choices |
def get_load ( jid ) :
'''Return the load from a specified jid''' | log . debug ( 'sqlite3 returner <get_load> called jid: %s' , jid )
conn = _get_conn ( ret = None )
cur = conn . cursor ( )
sql = '''SELECT load FROM jids WHERE jid = :jid'''
cur . execute ( sql , { 'jid' : jid } )
data = cur . fetchone ( )
if data :
return salt . utils . json . loads ( data [ 0 ] . encode ( ) )
_close_conn ( conn )
return { } |
def z__update ( self ) :
"""Triple quoted baseline representation .
Return string with multiple triple quoted baseline strings when
baseline had been compared multiple times against varying strings .
: returns : source file baseline replacement text
: rtype : str""" | updates = [ ]
for text in self . _updates :
if self . _AVOID_RAW_FORM :
text_repr = multiline_repr ( text )
raw_char = ''
else :
text_repr = multiline_repr ( text , RAW_MULTILINE_CHARS )
if len ( text_repr ) == len ( text ) :
raw_char = 'r' if '\\' in text_repr else ''
else : # must have special characters that required added backslash
# escaping , use normal representation to get backslashes right
text_repr = multiline_repr ( text )
raw_char = ''
# use triple double quote , except use triple single quote when
# triple double quote is present to avoid syntax errors
quotes = '"""'
if quotes in text :
quotes = "'''"
# Wrap with blank lines when multi - line or when text ends with
# characters that would otherwise result in a syntax error in
# the formatted representation .
multiline = self . _indent or ( '\n' in text )
if multiline or text . endswith ( '\\' ) or text . endswith ( quotes [ 0 ] ) :
update = raw_char + quotes + '\n' + text_repr + '\n' + quotes
else :
update = raw_char + quotes + text_repr + quotes
updates . append ( update )
# sort updates so Python hash seed has no impact on regression test
update = '\n' . join ( sorted ( updates ) )
indent = ' ' * self . _indent
lines = ( ( indent + line ) if line else '' for line in update . split ( '\n' ) )
return '\n' . join ( lines ) . lstrip ( ) |
def _set_md5_auth ( self , v , load = False ) :
"""Setter method for md5 _ auth , mapped from YANG variable / routing _ system / interface / ve / ipv6 / ipv6 _ vrrp _ extended / auth _ type / md5 _ auth ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ md5 _ auth is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ md5 _ auth ( ) directly .
YANG Description : MD5 Authentication""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = md5_auth . md5_auth , is_container = 'container' , presence = False , yang_name = "md5-auth" , rest_name = "md5-auth" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'MD5 authentication' } } , namespace = 'urn:brocade.com:mgmt:brocade-vrrp' , defining_module = 'brocade-vrrp' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """md5_auth must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=md5_auth.md5_auth, is_container='container', presence=False, yang_name="md5-auth", rest_name="md5-auth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MD5 authentication'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)""" , } )
self . __md5_auth = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def _get_class ( self , classname , namespace , local_only = None , include_qualifiers = None , include_classorigin = None , property_list = None ) : # pylint : disable = invalid - name
"""Get class from repository . Gets the class defined by classname
from the repository , creates a copy , expands the copied class to
include superclass properties if not localonly , and filters the
class based on propertylist and includeClassOrigin .
It also sets the propagated attribute .
Parameters :
classname ( : term : ` string ` ) :
Name of class to retrieve
namespace ( : term : ` string ` ) :
Namespace from which to retrieve the class
local _ only ( : class : ` py : bool ` ) :
If ` True ` , only properties and methods in this specific class are
returned . Otherwise properties and methods from the superclasses
are included .
include _ qualifiers ( : class : ` py : bool ` ) :
If ` True ` , include qualifiers . Otherwise remove all qualifiers
include _ classorigin ( : class : ` py : bool ` ) :
If ` True ` return the class _ origin attributes of properties and
methods .
property _ list ( ) :
Properties to be included in returned class . If None , all
properties are returned . If empty , no properties are returned
Returns :
Copy of the class if found with superclass properties installed and
filtered per the keywords in params .
Raises :
CIMError : ( CIM _ ERR _ NOT _ FOUND ) if class Not found in repository or
CIMError : ( CIM _ ERR _ INVALID _ NAMESPACE ) if namespace does not exist""" | class_repo = self . _get_class_repo ( namespace )
# try to get the target class and create a copy for response
try :
c = class_repo [ classname ]
except KeyError :
raise CIMError ( CIM_ERR_NOT_FOUND , _format ( "Class {0!A} not found in namespace {1!A}." , classname , namespace ) )
cc = deepcopy ( c )
if local_only :
for prop , pvalue in cc . properties . items ( ) :
if pvalue . propagated :
del cc . properties [ prop ]
for method , mvalue in cc . methods . items ( ) :
if mvalue . propagated :
del cc . methods [ method ]
self . _filter_properties ( cc , property_list )
if not include_qualifiers :
self . _remove_qualifiers ( cc )
if not include_classorigin :
self . _remove_classorigin ( cc )
return cc |
def parse_options ( arguments = None ) :
"""Reads command - line arguments
> > > parse _ options ( ' - - indent - comments ' )""" | if arguments is None :
arguments = sys . argv [ 1 : ]
if isinstance ( arguments , str ) :
arguments = arguments . split ( )
if isinstance ( arguments , argparse . Namespace ) :
return arguments
parser = create_args_parser ( )
args = parser . parse_args ( arguments )
# pprint ( args . _ _ dict _ _ )
args . dialect = args . dialect . lower ( )
if args . dialect not in [ 'lisp' , 'newlisp' , 'clojure' , 'scheme' , 'all' , '' ] :
parser . error ( "`{0}' is not a recognized dialect" . format ( args . dialect ) )
args . backup_dir = os . path . expanduser ( args . backup_dir )
if not os . path . exists ( args . backup_dir ) :
parser . error ( "Directory `{0}' does not exist" . format ( args . backup_dir ) )
if len ( args . files ) > 1 and args . output_file :
parser . error ( 'Cannot use the -o flag when more than one file is specified' )
if not args . files : # Indentation from standard input
if args . modify and not args . output_file :
args . modify = False
args . backup = False
args . warning = False
if args . output_diff : # If someone requests a diff we assume he / she doesn ' t want the file to be
# modified
args . modify = False
return args |
def _skip_file ( self , d , files ) :
'''The function passed into shutil . copytree to ignore certain patterns and filetypes
Currently Skipped
Directories - handled by copytree
Symlinks - handled by copytree
Write - only files ( stuff in / proc )
Binaries ( can ' t scan them )''' | skip_list = [ ]
for f in files :
f_full = os . path . join ( d , f )
if not os . path . isdir ( f_full ) :
if not os . path . islink ( f_full ) : # mode = oct ( os . stat ( f _ full ) . st _ mode ) [ - 3 : ]
# executing as root makes this first if clause useless .
# i thought i ' d already removed it . - jduncan
# if mode = = ' 200 ' or mode = = ' 444 ' or mode = = ' 400 ' :
# skip _ list . append ( f )
mime_type = content_type . from_file ( f_full )
if 'text' not in mime_type :
skip_list . append ( f )
return skip_list |
def _validate_required_settings ( self , application_id , application_config , required_settings ) :
"""All required keys must be present""" | for setting_key in required_settings :
if setting_key not in application_config . keys ( ) :
raise ImproperlyConfigured ( MISSING_SETTING . format ( application_id = application_id , setting = setting_key ) ) |
def _launch_instance ( self ) :
"""Create new test instance in a resource group with the same name .""" | self . running_instance_id = ipa_utils . generate_instance_name ( 'azure-ipa-test' )
self . logger . debug ( 'ID of instance: %s' % self . running_instance_id )
self . _set_default_resource_names ( )
try : # Try block acts as a transaction . If an exception is raised
# attempt to cleanup the resource group and all created resources .
# Create resource group .
self . _create_resource_group ( self . region , self . running_instance_id )
if self . subnet_id : # Use existing vnet / subnet .
subnet = self . network . subnets . get ( self . vnet_resource_group , self . vnet_name , self . subnet_id )
else :
self . subnet_id = '' . join ( [ self . running_instance_id , '-subnet' ] )
self . vnet_name = '' . join ( [ self . running_instance_id , '-vnet' ] )
# Create new vnet
self . _create_virtual_network ( self . region , self . running_instance_id , self . vnet_name )
# Create new subnet in new vnet
subnet = self . _create_subnet ( self . running_instance_id , self . subnet_id , self . vnet_name )
# Setup interface and public ip in resource group .
public_ip = self . _create_public_ip ( self . public_ip_name , self . running_instance_id , self . region )
interface = self . _create_network_interface ( self . ip_config_name , self . nic_name , public_ip , self . region , self . running_instance_id , subnet , self . accelerated_networking )
# Get dictionary of VM parameters and create instance .
vm_config = self . _create_vm_config ( interface )
self . _create_vm ( vm_config )
except Exception :
try :
self . _terminate_instance ( )
except Exception :
pass
raise
else : # Ensure VM is in the running state .
self . _wait_on_instance ( 'VM running' , timeout = self . timeout ) |
def get_asset_repository_session ( self ) :
"""Gets the session for retrieving asset to repository mappings .
return : ( osid . repository . AssetRepositorySession ) - an
` ` AssetRepositorySession ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ asset _ repository ( ) ` ` is
` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ asset _ repository ( ) ` ` is ` ` true ` ` . *""" | if not self . supports_asset_repository ( ) :
raise errors . Unimplemented ( )
# pylint : disable = no - member
return sessions . AssetRepositorySession ( runtime = self . _runtime ) |
def get_inheritors ( cls ) :
"""Get a set of all classes that inherit from the given class .""" | subclasses = set ( )
work = [ cls ]
while work :
parent = work . pop ( )
for child in parent . __subclasses__ ( ) :
if child not in subclasses :
subclasses . add ( child )
work . append ( child )
return subclasses |
def add_atr ( self , periods = 14 , str = None , name = '' , ** kwargs ) :
"""Add Average True Range ( ATR ) study to QuantFigure . studies
Parameters :
periods : int or list ( int )
Number of periods
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used :
{ name } : Name of the column
{ study } : Name of the study
{ period } : Period used
Examples :
' study : { study } - period : { period } '
kwargs :
legendgroup : bool
If true , all legend items are grouped into a
single one
All formatting values available on iplot ( )""" | study = { 'kind' : 'atr' , 'name' : name , 'params' : { 'periods' : periods , 'high' : self . _d [ 'high' ] , 'low' : self . _d [ 'low' ] , 'close' : self . _d [ 'close' ] , 'str' : str } , 'display' : utils . merge_dict ( { 'legendgroup' : False } , kwargs ) }
self . _add_study ( study ) |
def __GetServiceVersionDescription ( protocol , server , port , path , sslContext ) :
"""Private method that returns a root from an ElementTree describing the API versions
supported by the specified server . The result will be vimServiceVersions . xml
if it exists , otherwise vimService . wsdl if it exists , otherwise None .
@ param protocol : What protocol to use for the connection ( e . g . https or http ) .
@ type protocol : string
@ param server : Which server to connect to .
@ type server : string
@ param port : Port
@ type port : int
@ param path : Path
@ type path : string
@ param sslContext : SSL Context describing the various SSL options . It is only
supported in Python 2.7.9 or higher .
@ type sslContext : SSL . Context""" | tree = __GetElementTree ( protocol , server , port , path + "/vimServiceVersions.xml" , sslContext )
if tree is not None :
return tree
tree = __GetElementTree ( protocol , server , port , path + "/vimService.wsdl" , sslContext )
return tree |
def post_user_bookmarks_save ( self , id , ** data ) :
"""POST / users / : id / bookmarks / save /
Adds a new bookmark for the user . Returns ` ` { " created " : true } ` ` .
A user is only authorized to save his / her own events .""" | return self . post ( "/users/{0}/bookmarks/save/" . format ( id ) , data = data ) |
def nvmlSystemGetTopologyGpuSet ( cpuNumber ) :
r"""* Retrieve the set of GPUs that have a CPU affinity with the given CPU number
* For all products .
* Supported on Linux only .
* @ param cpuNumber The CPU number
* @ param count When zero , is set to the number of matching GPUs such that \ a deviceArray
* can be malloc ' d . When non - zero , \ a deviceArray will be filled with \ a count
* number of device handles .
* @ param deviceArray An array of device handles for GPUs found with affinity to \ a cpuNumber
* @ return
* - \ ref NVML _ SUCCESS if \ a deviceArray or \ a count ( if initially zero ) has been set
* - \ ref NVML _ ERROR _ INVALID _ ARGUMENT if \ a cpuNumber , or \ a count is invalid , or \ a deviceArray is NULL with a non - zero \ a count
* - \ ref NVML _ ERROR _ NOT _ SUPPORTED if the device or OS does not support this feature
* - \ ref NVML _ ERROR _ UNKNOWN an error has occurred in underlying topology discovery
nvmlReturn _ t DECLDIR nvmlSystemGetTopologyGpuSet""" | c_count = c_uint ( 0 )
fn = _nvmlGetFunctionPointer ( "nvmlSystemGetTopologyGpuSet" )
# First call will get the size
ret = fn ( cpuNumber , byref ( c_count ) , None )
if ret != NVML_SUCCESS :
raise NVMLError ( ret )
print ( c_count . value )
# call again with a buffer
device_array = c_nvmlDevice_t * c_count . value
c_devices = device_array ( )
ret = fn ( cpuNumber , byref ( c_count ) , c_devices )
_nvmlCheckReturn ( ret )
return list ( c_devices [ 0 : c_count . value ] ) |
def update ( self , event = None , force = False ) :
"""highlight the current line""" | line_no = self . text . index ( tkinter . INSERT ) . split ( "." ) [ 0 ]
# if not force :
# if line _ no = = self . current _ line :
# log . critical ( " no highlight line needed . " )
# return
# log . critical ( " highlight line : % s " % line _ no )
# self . current _ line = line _ no
self . text . tag_remove ( self . tag_current_line . id , "1.0" , "end" )
self . text . tag_add ( self . tag_current_line . id , "%s.0" % line_no , "%s.0+1lines" % line_no ) |
def content ( self ) :
"""Content of the response , in bytes or unicode
( if available ) .""" | if self . _content is not None :
return self . _content
if self . _content_consumed :
raise RuntimeError ( 'The content for this response was ' 'already consumed' )
# Read the contents .
try :
self . _content = self . raw . read ( )
except AttributeError :
return None
# Decode GZip ' d content .
if 'gzip' in self . headers . get ( 'content-encoding' , '' ) :
try :
self . _content = decode_gzip ( self . _content )
except zlib . error :
pass
# Decode unicode content .
if self . config . get ( 'decode_unicode' ) :
self . _content = get_unicode_from_response ( self )
self . _content_consumed = True
return self . _content |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.