signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def emit ( self , ctx , modules , fd ) :
"""Main control function .
Set up the top - level parts of the sample document , then process
recursively all nodes in all data trees , and finally emit the
sample XML document ."""
|
if ctx . opts . sample_path is not None :
path = ctx . opts . sample_path . split ( '/' )
if path [ 0 ] == '' :
path = path [ 1 : ]
else :
path = [ ]
for ( epos , etag , eargs ) in ctx . errors :
if error . is_error ( error . err_level ( etag ) ) :
raise error . EmitError ( "sample-xml-skeleton plugin needs a valid module" )
self . doctype = ctx . opts . doctype
if self . doctype not in ( "config" , "data" ) :
raise error . EmitError ( "Unsupported document type: %s" % self . doctype )
self . annots = ctx . opts . sample_annots
self . defaults = ctx . opts . sample_defaults
self . node_handler = { "container" : self . container , "leaf" : self . leaf , "anyxml" : self . anyxml , "choice" : self . process_children , "case" : self . process_children , "list" : self . list , "leaf-list" : self . leaf_list }
self . ns_uri = { }
for yam in modules :
self . ns_uri [ yam ] = yam . search_one ( "namespace" ) . arg
self . top = etree . Element ( self . doctype , { "xmlns" : "urn:ietf:params:xml:ns:netconf:base:1.0" } )
tree = etree . ElementTree ( self . top )
for yam in modules :
self . process_children ( yam , self . top , None , path )
if sys . version > "3" :
fd . write ( str ( etree . tostring ( tree , pretty_print = True , encoding = "UTF-8" , xml_declaration = True ) , "UTF-8" ) )
elif sys . version > "2.7" :
tree . write ( fd , encoding = "UTF-8" , pretty_print = True , xml_declaration = True )
else :
tree . write ( fd , pretty_print = True , encoding = "UTF-8" )
|
def snli_dataset ( directory = 'data/' , train = False , dev = False , test = False , train_filename = 'snli_1.0_train.jsonl' , dev_filename = 'snli_1.0_dev.jsonl' , test_filename = 'snli_1.0_test.jsonl' , extracted_name = 'snli_1.0' , check_files = [ 'snli_1.0/snli_1.0_train.jsonl' ] , url = 'http://nlp.stanford.edu/projects/snli/snli_1.0.zip' ) :
"""Load the Stanford Natural Language Inference ( SNLI ) dataset .
The SNLI corpus ( version 1.0 ) is a collection of 570k human - written English sentence pairs
manually labeled for balanced classification with the labels entailment , contradiction , and
neutral , supporting the task of natural language inference ( NLI ) , also known as recognizing
textual entailment ( RTE ) . We aim for it to serve both as a benchmark for evaluating
representational systems for text , especially including those induced by representation
learning methods , as well as a resource for developing NLP models of any kind .
* * Reference : * * https : / / nlp . stanford . edu / projects / snli /
* * Citation : * *
Samuel R . Bowman , Gabor Angeli , Christopher Potts , and Christopher D . Manning . 2015 . A large
annotated corpus for learning natural language inference . In Proceedings of the 2015 Conference
on Empirical Methods in Natural Language Processing ( EMNLP ) .
Args :
directory ( str , optional ) : Directory to cache the dataset .
train ( bool , optional ) : If to load the training split of the dataset .
dev ( bool , optional ) : If to load the development split of the dataset .
test ( bool , optional ) : If to load the test split of the dataset .
train _ filename ( str , optional ) : The filename of the training split .
dev _ filename ( str , optional ) : The filename of the development split .
test _ filename ( str , optional ) : The filename of the test split .
extracted _ name ( str , optional ) : Name of the extracted dataset directory .
check _ files ( str , optional ) : Check if these files exist , then this download was successful .
url ( str , optional ) : URL of the dataset ` tar . gz ` file .
Returns :
: class : ` tuple ` of : class : ` torchnlp . datasets . Dataset ` or : class : ` torchnlp . datasets . Dataset ` :
Returns between one and all dataset splits ( train , dev and test ) depending on if their
respective boolean argument is ` ` True ` ` .
Example :
> > > from torchnlp . datasets import snli _ dataset # doctest : + SKIP
> > > train = snli _ dataset ( train = True ) # doctest : + SKIP
> > > train [ 0 ] # doctest : + SKIP
' premise ' : ' Kids are on a amusement ride . ' ,
' hypothesis ' : ' A car is broke down on the side of the road . ' ,
' label ' : ' contradiction ' ,
' premise _ transitions ' : [ ' shift ' , ' shift ' , ' shift ' , ' shift ' , ' shift ' , ' shift ' , . . . ] ,
' hypothesis _ transitions ' : [ ' shift ' , ' shift ' , ' shift ' , ' shift ' , ' shift ' , ' shift ' , . . . ] ,"""
|
download_file_maybe_extract ( url = url , directory = directory , check_files = check_files )
get_transitions = lambda parse : [ 'reduce' if t == ')' else 'shift' for t in parse if t != '(' ]
ret = [ ]
splits = [ ( train , train_filename ) , ( dev , dev_filename ) , ( test , test_filename ) ]
splits = [ f for ( requested , f ) in splits if requested ]
for filename in splits :
full_path = os . path . join ( directory , extracted_name , filename )
examples = [ ]
with io . open ( full_path , encoding = 'utf-8' ) as f :
for line in f :
line = line . strip ( )
line = json . loads ( line )
examples . append ( { 'premise' : line [ 'sentence1' ] , 'hypothesis' : line [ 'sentence2' ] , 'label' : line [ 'gold_label' ] , 'premise_transitions' : get_transitions ( line [ 'sentence1_binary_parse' ] ) , 'hypothesis_transitions' : get_transitions ( line [ 'sentence2_binary_parse' ] ) } )
ret . append ( Dataset ( examples ) )
if len ( ret ) == 1 :
return ret [ 0 ]
else :
return tuple ( ret )
|
def create_continuous_query ( self , name , select , database = None , resample_opts = None ) :
r"""Create a continuous query for a database .
: param name : the name of continuous query to create
: type name : str
: param select : select statement for the continuous query
: type select : str
: param database : the database for which the continuous query is
created . Defaults to current client ' s database
: type database : str
: param resample _ opts : resample options
: type resample _ opts : str
: Example :
> > select _ clause = ' SELECT mean ( " value " ) INTO " cpu _ mean " ' \
. . . ' FROM " cpu " GROUP BY time ( 1m ) '
> > client . create _ continuous _ query (
. . . ' cpu _ mean ' , select _ clause , ' db _ name ' , ' EVERY 10s FOR 2m '
> > client . get _ list _ continuous _ queries ( )
' db _ name ' : [
' name ' : ' cpu _ mean ' ,
' query ' : ' CREATE CONTINUOUS QUERY " cpu _ mean " '
' ON " db _ name " '
' RESAMPLE EVERY 10s FOR 2m '
' BEGIN SELECT mean ( " value " ) '
' INTO " cpu _ mean " FROM " cpu " '
' GROUP BY time ( 1m ) END '"""
|
query_string = ( "CREATE CONTINUOUS QUERY {0} ON {1}{2} BEGIN {3} END" ) . format ( quote_ident ( name ) , quote_ident ( database or self . _database ) , ' RESAMPLE ' + resample_opts if resample_opts else '' , select )
self . query ( query_string )
|
def warn ( self , collection ) :
"""Checks this code element for documentation related problems ."""
|
if not self . has_docstring ( ) :
collection . append ( "WARNING: no docstring on code element {}" . format ( self . name ) )
|
def validate_file_ownership ( config ) :
"""Verify that configuration files are owned by the correct user / group ."""
|
files = config . get ( 'files' , { } )
for file_name , options in files . items ( ) :
for key in options . keys ( ) :
if key not in [ "owner" , "group" , "mode" ] :
raise RuntimeError ( "Invalid ownership configuration: {}" . format ( key ) )
owner = options . get ( 'owner' , config . get ( 'owner' , 'root' ) )
group = options . get ( 'group' , config . get ( 'group' , 'root' ) )
optional = options . get ( 'optional' , config . get ( 'optional' , 'False' ) )
if '*' in file_name :
for file in glob . glob ( file_name ) :
if file not in files . keys ( ) :
if os . path . isfile ( file ) :
_validate_file_ownership ( owner , group , file , optional )
else :
if os . path . isfile ( file_name ) :
_validate_file_ownership ( owner , group , file_name , optional )
|
def addTrack ( self , track ) :
"""Add a : class : ` MediaStreamTrack ` to the set of media tracks which
will be transmitted to the remote peer ."""
|
# check state is valid
self . __assertNotClosed ( )
if track . kind not in [ 'audio' , 'video' ] :
raise InternalError ( 'Invalid track kind "%s"' % track . kind )
# don ' t add track twice
self . __assertTrackHasNoSender ( track )
for transceiver in self . __transceivers :
if transceiver . kind == track . kind :
if transceiver . sender . track is None :
transceiver . sender . replaceTrack ( track )
transceiver . direction = or_direction ( transceiver . direction , 'sendonly' )
return transceiver . sender
transceiver = self . __createTransceiver ( direction = 'sendrecv' , kind = track . kind , sender_track = track )
return transceiver . sender
|
def get ( self , bucket = None , key = None , version_id = None , upload_id = None , uploads = None , download = None ) :
"""Get object or list parts of a multpart upload .
: param bucket : The bucket ( instance or id ) to get the object from .
( Default : ` ` None ` ` )
: param key : The file key . ( Default : ` ` None ` ` )
: param version _ id : The version ID . ( Default : ` ` None ` ` )
: param upload _ id : The upload ID . ( Default : ` ` None ` ` )
: param download : The download flag . ( Default : ` ` None ` ` )
: returns : A Flask response ."""
|
if upload_id :
return self . multipart_listparts ( bucket , key , upload_id )
else :
obj = self . get_object ( bucket , key , version_id )
# If ' download ' is missing from query string it will have
# the value None .
return self . send_object ( bucket , obj , as_attachment = download is not None )
|
def _flat_crossproduct_scatter ( process , # type : WorkflowJobStep
joborder , # type : MutableMapping [ Text , Any ]
scatter_keys , # type : MutableSequence [ Text ]
callback , # type : ReceiveScatterOutput
startindex , # type : int
runtimeContext # type : RuntimeContext
) : # type : ( . . . ) - > Tuple [ List [ Generator ] , int ]
"""Inner loop ."""
|
scatter_key = scatter_keys [ 0 ]
jobl = len ( joborder [ scatter_key ] )
steps = [ ]
put = startindex
for index in range ( 0 , jobl ) :
sjob = copy . copy ( joborder )
sjob [ scatter_key ] = joborder [ scatter_key ] [ index ]
if len ( scatter_keys ) == 1 :
if runtimeContext . postScatterEval is not None :
sjob = runtimeContext . postScatterEval ( sjob )
steps . append ( process . job ( sjob , functools . partial ( callback . receive_scatter_output , put ) , runtimeContext ) )
put += 1
else :
( add , _ ) = _flat_crossproduct_scatter ( process , sjob , scatter_keys [ 1 : ] , callback , put , runtimeContext )
put += len ( add )
steps . extend ( add )
return ( steps , put )
|
def good_surts_from_default ( default_surt ) :
'''Takes a standard surt without scheme and without trailing comma , and
returns a list of " good " surts that together match the same set of
urls . For example :
good _ surts _ from _ default ( ' com , example ) / path ' )
returns
[ ' http : / / ( com , example , ) / path ' ,
' https : / / ( com , example , ) / path ' ,
' http : / / ( com , example , www , ) / path ' ,
' https : / / ( com , example , www , ) / path ' ]'''
|
if default_surt == '' :
return [ '' ]
parts = default_surt . split ( ')' , 1 )
if len ( parts ) == 2 :
orig_host_part , path_part = parts
good_surts = [ 'http://(%s,)%s' % ( orig_host_part , path_part ) , 'https://(%s,)%s' % ( orig_host_part , path_part ) , 'http://(%s,www,)%s' % ( orig_host_part , path_part ) , 'https://(%s,www,)%s' % ( orig_host_part , path_part ) , ]
else : # no path part
host_part = parts [ 0 ]
good_surts = [ 'http://(%s' % host_part , 'https://(%s' % host_part , ]
return good_surts
|
def cyclone ( adata , marker_pairs , gene_names , sample_names , iterations = 1000 , min_iter = 100 , min_pairs = 50 ) :
"""Assigns scores and predicted class to observations [ Scialdone15 ] _ [ Fechtner18 ] _ .
Calculates scores for each observation and each phase and assigns prediction
based on marker pairs indentified by sandbag .
This reproduces the approach of [ Scialdone15 ] _ in the implementation of
[ Fechtner18 ] _ .
Parameters
adata : : class : ` ~ anndata . AnnData `
The annotated data matrix .
marker _ pairs : ` dict `
Dictionary of marker pairs . See : func : ` ~ scanpy . api . sandbag ` output .
gene _ names : ` list `
List of genes .
sample _ names : ` list `
List of samples .
iterations : ` int ` , optional ( default : 1000)
An integer scalar specifying the number of
iterations for random sampling to obtain a cycle score .
min _ iter : ` int ` , optional ( default : 100)
An integer scalar specifying the minimum number of iterations
for score estimation
min _ pairs : ` int ` , optional ( default : 50)
An integer scalar specifying the minimum number of iterations
for score estimation
Returns
A : class : ` ~ pandas . DataFrame ` with samples as index and categories as columns with scores for each category for each
sample and a additional column with the name of the max scoring category for each sample .
If marker pairs contain only the cell cycle categories G1 , S and G2M an additional column
` ` pypairs _ cc _ prediction ` ` will be added . Where category S is assigned to samples where G1 and G2M score are
below 0.5."""
|
try :
from pypairs import __version__ as pypairsversion
from distutils . version import LooseVersion
if LooseVersion ( pypairsversion ) < LooseVersion ( "v3.0.9" ) :
raise ImportError ( 'Please only use `pypairs` >= v3.0.9 ' )
except ImportError :
raise ImportError ( 'You need to install the package `pypairs`.' )
from pypairs . pairs import cyclone
from . import settings
from pypairs import settings as pp_settings
pp_settings . verbosity = settings . verbosity
pp_settings . n_jobs = settings . n_jobs
pp_settings . writedir = settings . writedir
pp_settings . cachedir = settings . cachedir
pp_settings . logfile = settings . logfile
return cyclone ( data = adata , marker_pairs = marker_pairs , gene_names = gene_names , sample_names = sample_names , iterations = iterations , min_iter = min_iter , min_pairs = min_pairs )
|
def calledOnce ( cls , spy ) : # pylint : disable = invalid - name
"""Checking the inspector is called once
Args : SinonSpy"""
|
cls . __is_spy ( spy )
if not ( spy . calledOnce ) :
raise cls . failException ( cls . message )
|
def reference ( self ) :
"""A : class : ` ~ google . cloud . bigquery . model . ModelReference ` pointing to
this model .
Read - only .
Returns :
google . cloud . bigquery . model . ModelReference : pointer to this model ."""
|
ref = ModelReference ( )
ref . _proto = self . _proto . model_reference
return ref
|
async def copy_context_with ( ctx : commands . Context , * , author = None , channel = None , ** kwargs ) :
"""Makes a new : class : ` Context ` with changed message properties ."""
|
# copy the message and update the attributes
alt_message : discord . Message = copy . copy ( ctx . message )
alt_message . _update ( channel or alt_message . channel , kwargs )
# pylint : disable = protected - access
if author is not None :
alt_message . author = author
# obtain and return a context of the same type
return await ctx . bot . get_context ( alt_message , cls = type ( ctx ) )
|
def collect ( self , step , content ) :
'''given a name of a configuration key and the provided content , collect
the required metadata from the user .
Parameters
step : the key in the configuration . Can be one of :
user _ message _ < name >
runtime _ arg _ < name >
record _ asciinema
record _ environment
user _ prompt _ < name >
content : the default value or boolean to indicate doing the step .'''
|
# Option 1 : The step is just a message to print to the user
if step . startswith ( 'user_message' ) :
print ( content )
# Option 2 : The step is to collect a user prompt ( if not at runtime )
elif step . startswith ( 'user_prompt' ) :
self . collect_argument ( step , content )
# Option 3 : The step is to record an asciinema !
elif step == 'record_asciinema' :
self . record_asciinema ( )
# Option 4 : Record the user environment
elif step == "record_environment" :
self . record_environment ( )
bot . debug ( self . data )
|
def to_underscore_case ( camelcase_str ) :
r"""References :
http : / / stackoverflow . com / questions / 1175208 / convert - camelcase
Example :
> > > # ENABLE _ DOCTEST
> > > from utool . util _ str import * # NOQA
> > > camelcase _ str = ' UnderscoreFuncname '
> > > camel _ case _ str = to _ underscore _ case ( camelcase _ str )
> > > result = ( ' underscore _ str = % s ' % ( str ( camel _ case _ str ) , ) )
> > > print ( result )
underscore _ str = underscore _ funcname"""
|
s1 = re . sub ( '(.)([A-Z][a-z]+)' , r'\1_\2' , camelcase_str )
return re . sub ( '([a-z0-9])([A-Z])' , r'\1_\2' , s1 ) . lower ( )
|
def assemble ( self ) :
"""Assemble an array from a distributed array of object IDs ."""
|
first_block = ray . get ( self . objectids [ ( 0 , ) * self . ndim ] )
dtype = first_block . dtype
result = np . zeros ( self . shape , dtype = dtype )
for index in np . ndindex ( * self . num_blocks ) :
lower = DistArray . compute_block_lower ( index , self . shape )
upper = DistArray . compute_block_upper ( index , self . shape )
result [ [ slice ( l , u ) for ( l , u ) in zip ( lower , upper ) ] ] = ray . get ( self . objectids [ index ] )
return result
|
def accel_next ( self , * args ) :
"""Callback to go to the next tab . Called by the accel key ."""
|
if self . get_notebook ( ) . get_current_page ( ) + 1 == self . get_notebook ( ) . get_n_pages ( ) :
self . get_notebook ( ) . set_current_page ( 0 )
else :
self . get_notebook ( ) . next_page ( )
return True
|
def resolve ( self , key ) :
"""Resolves the requested key to an object instance , raising a KeyError if the key is missing"""
|
registration = self . _registrations . get ( key )
if registration is None :
raise KeyError ( "Unknown key: '{0}'" . format ( key ) )
return registration . resolve ( self , key )
|
def _cast_to_pod ( val ) :
"""Try cast to int , float , bool , str , in that order ."""
|
bools = { "True" : True , "False" : False }
if val in bools :
return bools [ val ]
try :
return int ( val )
except ValueError :
try :
return float ( val )
except ValueError :
return tf . compat . as_text ( val )
|
def get_all_credit_notes ( self , params = None ) :
"""Get all credit notes
This will iterate over all pages until it gets all elements .
So if the rate limit exceeded it will throw an Exception and you will get nothing
: param params : search params
: return : list"""
|
if not params :
params = { }
return self . _iterate_through_pages ( self . get_credit_notes_per_page , resource = CREDIT_NOTES , ** { 'params' : params } )
|
def get_merge_direction ( cell1 , cell2 ) :
"""Determine the side of cell1 that can be merged with cell2.
This is based on the location of the two cells in the table as well
as the compatability of their height and width .
For example these cells can merge : :
cell1 cell2 merge " RIGHT "
| foo | | dog | | foo | dog |
| | | cat | | | cat |
| | | bird | | | bird |
But these cells cannot merge : :
| foo | | dog |
| | | cat |
Parameters
cell1 : dashtable . data2rst . Cell
cell2 : dashtable . data2rst . Cell
Returns
str
The side onto which cell2 can be merged . Will be one of
[ " LEFT " , " RIGHT " , " BOTTOM " , " TOP " , " NONE " ]"""
|
cell1_left = cell1 . column
cell1_right = cell1 . column + cell1 . column_count
cell1_top = cell1 . row
cell1_bottom = cell1 . row + cell1 . row_count
cell2_left = cell2 . column
cell2_right = cell2 . column + cell2 . column_count
cell2_top = cell2 . row
cell2_bottom = cell2 . row + cell2 . row_count
if ( cell1_right == cell2_left and cell1_top == cell2_top and cell1_bottom == cell2_bottom and cell1 . right_sections >= cell2 . left_sections ) :
return "RIGHT"
elif ( cell1_left == cell2_left and cell1_right == cell2_right and cell1_top == cell2_bottom and cell1 . top_sections >= cell2 . bottom_sections ) :
return "TOP"
elif ( cell1_left == cell2_left and cell1_right == cell2_right and cell1_bottom == cell2_top and cell1 . bottom_sections >= cell2 . top_sections ) :
return "BOTTOM"
elif ( cell1_left == cell2_right and cell1_top == cell2_top and cell1_bottom == cell2_bottom and cell1 . left_sections >= cell2 . right_sections ) :
return "LEFT"
else :
return "NONE"
|
def sendInstanceChange ( self , view_no : int , suspicion = Suspicions . PRIMARY_DEGRADED ) :
"""Broadcast an instance change request to all the remaining nodes
: param view _ no : the view number when the instance change is requested"""
|
# If not found any sent instance change messages in last
# ` ViewChangeWindowSize ` seconds or the last sent instance change
# message was sent long enough ago then instance change message can be
# sent otherwise no .
canSendInsChange , cooldown = self . insChngThrottler . acquire ( )
if canSendInsChange :
logger . info ( "{}{} sending an instance change with view_no {}" " since {}" . format ( VIEW_CHANGE_PREFIX , self , view_no , suspicion . reason ) )
logger . info ( "{}{} metrics for monitor: {}" . format ( MONITORING_PREFIX , self , self . provider . pretty_metrics ( ) ) )
msg = self . _create_instance_change_msg ( view_no , suspicion . code )
self . send ( msg )
# record instance change vote for self and try to change the view
# if quorum is reached
self . _on_verified_instance_change_msg ( msg , self . name )
else :
logger . info ( "{} cannot send instance change sooner then {} seconds" . format ( self , cooldown ) )
|
def handle_tags ( repo , ** kwargs ) :
""": return : repo . tags ( )"""
|
log . info ( 'tags: %s %s' % ( repo , kwargs ) )
return [ str ( t ) for t in repo . tags ( ** kwargs ) ]
|
def set_result ( self , values , visible_columns = { } ) :
"""Set the result of this run .
Use this method instead of manually setting the run attributes and calling after _ execution ( ) ,
this method handles all this by itself .
@ param values : a dictionary with result values as returned by RunExecutor . execute _ run ( ) ,
may also contain arbitrary additional values
@ param visible _ columns : a set of keys of values that should be visible by default
( i . e . , not marked as hidden ) , apart from those that BenchExec shows by default anyway"""
|
exitcode = values . pop ( 'exitcode' , None )
if exitcode is not None :
self . values [ '@exitcode' ] = exitcode
exitcode = util . ProcessExitCode . from_raw ( exitcode )
if exitcode . signal :
self . values [ '@exitsignal' ] = exitcode . signal
else :
self . values [ '@returnvalue' ] = exitcode . value
for key , value in values . items ( ) :
if key == 'walltime' :
self . walltime = value
elif key == 'cputime' :
self . cputime = value
elif key == 'memory' :
self . values [ 'memUsage' ] = value
elif key == 'cpuenergy' and not isinstance ( value , ( str , bytes ) ) :
energy = intel_cpu_energy . format_energy_results ( value )
for energy_key , energy_value in energy . items ( ) :
if energy_key != 'cpuenergy' :
energy_key = '@' + energy_key
self . values [ energy_key ] = energy_value
elif key == 'cpuenergy' :
self . values [ key ] = value
elif key in visible_columns :
self . values [ key ] = value
else :
self . values [ '@' + key ] = value
self . after_execution ( exitcode , termination_reason = values . get ( 'terminationreason' ) )
|
def focus_prev ( self ) :
"""focus previous message in depth first order"""
|
mid = self . get_selected_mid ( )
localroot = self . _sanitize_position ( ( mid , ) )
if localroot == self . get_focus ( ) [ 1 ] :
newpos = self . _tree . prev_position ( mid )
if newpos is not None :
newpos = self . _sanitize_position ( ( newpos , ) )
else :
newpos = localroot
if newpos is not None :
self . body . set_focus ( newpos )
|
def write ( self , * messages ) :
"""Push a message list to this context ' s input queue .
: param mixed value : message"""
|
for message in messages :
if not isinstance ( message , Token ) :
message = ensure_tuple ( message , cls = self . _input_type , length = self . _input_length )
if self . _input_length is None :
self . _input_length = len ( message )
self . input . put ( message )
|
def addDrizKeywords ( self , hdr , versions ) :
"""Add drizzle parameter keywords to header ."""
|
# Extract some global information for the keywords
_geom = 'User parameters'
_imgnum = 0
for pl in self . parlist : # Start by building up the keyword prefix based
# on the image number for the chip
# _ keyprefix = ' D % 03d ' % _ imgnum
_imgnum += 1
drizdict = DRIZ_KEYWORDS . copy ( )
# Update drizdict with current values
drizdict [ 'VER' ] [ 'value' ] = pl [ 'driz_version' ] [ : 44 ]
drizdict [ 'DATA' ] [ 'value' ] = pl [ 'data' ] [ : 64 ]
drizdict [ 'DEXP' ] [ 'value' ] = pl [ 'exptime' ]
drizdict [ 'OUDA' ] [ 'value' ] = pl [ 'outFinal' ] [ : 64 ]
drizdict [ 'OUWE' ] [ 'value' ] = pl [ 'outWeight' ] [ : 64 ]
if pl [ 'outContext' ] is None :
outcontext = ""
else :
outcontext = pl [ 'outContext' ] [ : 64 ]
drizdict [ 'OUCO' ] [ 'value' ] = outcontext
if self . single :
drizdict [ 'MASK' ] [ 'value' ] = pl [ 'singleDrizMask' ] [ : 64 ]
else :
drizdict [ 'MASK' ] [ 'value' ] = pl [ 'finalMask' ] [ : 64 ]
# Process the values of WT _ SCL to be consistent with
# what IRAF Drizzle would output
if 'wt_scl_val' in pl :
_wtscl = pl [ 'wt_scl_val' ]
else :
if pl [ 'wt_scl' ] == 'exptime' :
_wtscl = pl [ 'exptime' ]
elif pl [ 'wt_scl' ] == 'expsq' :
_wtscl = pl [ 'exptime' ] * pl [ 'exptime' ]
else :
_wtscl = pl [ 'wt_scl' ]
drizdict [ 'WTSC' ] [ 'value' ] = _wtscl
drizdict [ 'KERN' ] [ 'value' ] = pl [ 'kernel' ]
drizdict [ 'PIXF' ] [ 'value' ] = pl [ 'pixfrac' ]
drizdict [ 'OUUN' ] [ 'value' ] = self . units
if pl [ 'fillval' ] is None :
_fillval = 'INDEF'
else :
_fillval = pl [ 'fillval' ]
drizdict [ 'FVAL' ] [ 'value' ] = _fillval
drizdict [ 'WKEY' ] [ 'value' ] = pl [ 'driz_wcskey' ]
drizdict [ 'SCAL' ] = { 'value' : pl [ 'scale' ] , 'comment' : 'Drizzle, pixel size (arcsec) of output image' }
drizdict [ 'ISCL' ] = { 'value' : pl [ 'idcscale' ] , 'comment' : 'Drizzle, default IDCTAB pixel size(arcsec)' }
# Now update header with values
writeDrizKeywords ( hdr , _imgnum , drizdict )
del drizdict
# Add version information as HISTORY cards to the header
if versions is not None :
ver_str = "AstroDrizzle processing performed using: "
hdr . add_history ( ver_str )
for k in versions . keys ( ) :
ver_str = ' ' + str ( k ) + ' Version ' + str ( versions [ k ] )
hdr . add_history ( ver_str )
|
def flush ( self ) :
"""flush ( ) - > List of decoded messages .
Decodes the packets in the internal buffer .
This enables the continuation of the processing
of received packets after a : exc : ` ProtocolError `
has been handled .
: return : A ( possibly empty ) list of decoded messages from the buffered packets .
: rtype : list ( bytes )
: raises ProtocolError : An invalid byte sequence has been detected ."""
|
messages = [ ]
while self . _packets :
p = self . _packets . popleft ( )
try :
msg = decode ( p )
except ProtocolError : # Add any already decoded messages to the exception
self . _messages = messages
raise
messages . append ( msg )
return messages
|
def _create_L_ind ( self , L ) :
"""Convert T label matrices with labels in 0 . . . K _ t to a one - hot format
Here we can view e . g . the $ ( i , j ) $ entries of the $ T $ label matrices as
a _ label vector _ emitted by LF j for data point i .
Args :
L : a T - length list of [ n , m ] scipy . sparse label matrices with values
in { 0,1 , . . . , k }
Returns :
L _ ind : An [ n , m * k ] dense np . ndarray with values in { 0,1}
Note that no column is required for 0 ( abstain ) labels ."""
|
# TODO : Update LabelModel to keep L , L _ ind , L _ aug as sparse matrices
# throughout and remove this line .
if issparse ( L [ 0 ] ) :
L = [ L_t . todense ( ) for L_t in L ]
# Make sure converted to numpy here
L = self . _to_numpy ( L )
L_ind = np . ones ( ( self . n , self . m * self . k ) )
for yi , y in enumerate ( self . task_graph . feasible_set ( ) ) :
for t in range ( self . t ) : # A [ x : : y ] slices A starting at x at intervals of y
# e . g . , np . arange ( 9 ) [ 0 : : 3 ] = = np . array ( [ 0,3,6 ] )
L_ind [ : , yi : : self . k ] *= np . where ( np . logical_or ( L [ t ] == y [ t ] , L [ t ] == 0 ) , 1 , 0 )
# Set LFs that abstained on all feasible label vectors to all 0s
L_ind [ : , yi : : self . k ] *= np . where ( sum ( L ) != 0 , 1 , 0 )
return L_ind
|
def _add_raster_layer ( self , raster_layer , layer_name , save_style = False ) :
"""Add a raster layer to the folder .
: param raster _ layer : The layer to add .
: type raster _ layer : QgsRasterLayer
: param layer _ name : The name of the layer in the datastore .
: type layer _ name : str
: param save _ style : If we have to save a QML too . Default to False .
Not implemented in geopackage !
: type save _ style : bool
: returns : A two - tuple . The first element will be True if we could add
the layer to the datastore . The second element will be the layer
name which has been used or the error message .
: rtype : ( bool , str )
. . versionadded : : 4.0"""
|
source = gdal . Open ( raster_layer . source ( ) )
array = source . GetRasterBand ( 1 ) . ReadAsArray ( )
x_size = source . RasterXSize
y_size = source . RasterYSize
output = self . raster_driver . Create ( self . uri . absoluteFilePath ( ) , x_size , y_size , 1 , gdal . GDT_Byte , [ 'APPEND_SUBDATASET=YES' , 'RASTER_TABLE=%s' % layer_name ] )
output . SetGeoTransform ( source . GetGeoTransform ( ) )
output . SetProjection ( source . GetProjection ( ) )
output . GetRasterBand ( 1 ) . WriteArray ( array )
# Once we ' re done , close properly the dataset
output = None
source = None
return True , layer_name
|
def from_ids ( cls , path : PathOrStr , vocab : Vocab , train_ids : Collection [ Collection [ int ] ] , valid_ids : Collection [ Collection [ int ] ] , test_ids : Collection [ Collection [ int ] ] = None , train_lbls : Collection [ Union [ int , float ] ] = None , valid_lbls : Collection [ Union [ int , float ] ] = None , classes : Collection [ Any ] = None , processor : PreProcessor = None , ** kwargs ) -> DataBunch :
"Create a ` TextDataBunch ` from ids , labels and a ` vocab ` . ` kwargs ` are passed to the dataloader creation ."
|
src = ItemLists ( path , TextList ( train_ids , vocab , path = path , processor = [ ] ) , TextList ( valid_ids , vocab , path = path , processor = [ ] ) )
src = src . label_for_lm ( ) if cls == TextLMDataBunch else src . label_from_lists ( train_lbls , valid_lbls , classes = classes , processor = [ ] )
if not is1d ( train_lbls ) :
src . train . y . one_hot , src . valid . y . one_hot = True , True
if test_ids is not None :
src . add_test ( TextList ( test_ids , vocab , path = path ) , label = train_lbls [ 0 ] )
src . valid . x . processor = ifnone ( processor , [ TokenizeProcessor ( ) , NumericalizeProcessor ( vocab = vocab ) ] )
return src . databunch ( ** kwargs )
|
def fmt_cell ( self , value , width , cell_formating , ** text_formating ) :
"""Format sigle table cell ."""
|
strptrn = " {:" + '{:s}{:d}' . format ( cell_formating . get ( 'align' , '<' ) , width ) + "s} "
strptrn = self . fmt_text ( strptrn , ** text_formating )
return strptrn . format ( value )
|
def fixtags ( self , text ) :
"""Clean up special characters , only run once , next - to - last before doBlockLevels"""
|
# french spaces , last one Guillemet - left
# only if there is something before the space
text = _guillemetLeftPat . sub ( ur'\1 \2' , text )
# french spaces , Guillemet - right
text = _guillemetRightPat . sub ( ur'\1 ' , text )
return text
|
def get_cosmosdb_account_keys ( access_token , subscription_id , rgname , account_name ) :
'''Get the access keys for the specified Cosmos DB account .
Args :
access _ token ( str ) : A valid Azure authentication token .
subscription _ id ( str ) : Azure subscription id .
rgname ( str ) : Azure resource group name .
account _ name ( str ) : Name of the Cosmos DB account .
Returns :
HTTP response . JSON body of Cosmos DB account keys .'''
|
endpoint = '' . join ( [ get_rm_endpoint ( ) , '/subscriptions/' , subscription_id , '/resourcegroups/' , rgname , '/providers/Microsoft.DocumentDB/databaseAccounts/' , account_name , '/listKeys' , '?api-version=' , COSMOSDB_API ] )
return do_post ( endpoint , '' , access_token )
|
def match_patterns ( codedata ) :
"""Match patterns by shaman . PatternMatcher
Get average ratio of pattern and language"""
|
ret = { }
for index1 , pattern in enumerate ( shaman . PatternMatcher . PATTERNS ) :
print ( 'Matching pattern %d "%s"' % ( index1 + 1 , pattern ) )
matcher = shaman . PatternMatcher ( pattern )
tmp = { }
for index2 , ( language , code ) in enumerate ( codedata ) :
if language not in shaman . SUPPORTING_LANGUAGES :
continue
if len ( code ) <= 20 or len ( code ) > 100000 :
continue
if language not in tmp :
tmp [ language ] = [ ]
ratio = matcher . getratio ( code )
tmp [ language ] . append ( ratio )
print ( 'Matching patterns %d/%d ' % ( index2 , len ( codedata ) ) , end = '\r' )
ret [ pattern ] = { }
for language , data in tmp . items ( ) :
ret [ pattern ] [ language ] = sum ( tmp [ language ] ) / max ( len ( tmp [ language ] ) , 1 )
print ( 'Matching patterns completed ' )
return ret
|
def ptime ( timestr = None , tzone = None , fail = 0 , fmt = "%Y-%m-%d %H:%M:%S" ) :
"""Translate % Y - % m - % d % H : % M : % S into timestamp .
: param timestr : string like 2018-03-15 01:27:56 , or time . time ( ) if not set .
: param tzone : time compensation , int ( - time . timezone / 3600 ) by default ,
( can be set with Config . TIMEZONE ) .
: param fail : while raising an exception , return it .
: param fmt : % Y - % m - % d % H : % M : % S , % z not work .
: rtype : int
> > > ptime ( ' 2018-03-15 01:27:56 ' )
1521048476"""
|
tzone = Config . TIMEZONE if tzone is None else tzone
fix_tz = - ( tzone * 3600 + time . timezone )
# : str ( timestr ) for datetime . datetime object
timestr = str ( timestr or ttime ( ) )
try :
return int ( time . mktime ( time . strptime ( timestr , fmt ) ) + fix_tz )
except :
return fail
|
def get_logout_url ( self , redirect_url = None ) :
"""Generates CAS logout URL"""
|
url = urllib_parse . urljoin ( self . server_url , 'logout' )
if redirect_url :
params = { self . logout_redirect_param_name : redirect_url }
url += '?' + urllib_parse . urlencode ( params )
return url
|
def get_all_keys ( tweet , parent_key = '' ) :
"""Takes a tweet object and recursively returns a list of all keys contained
in this level and all nexstted levels of the tweet .
Args :
tweet ( Tweet ) : the tweet dict
parent _ key ( str ) : key from which this process will start , e . g . , you can
get keys only under some key that is not the top - level key .
Returns :
list of all keys in nested dicts .
Example :
> > > import tweet _ parser . tweet _ checking as tc
> > > tweet = { " created _ at " : 124125125125 , " text " : " just setting up my twttr " ,
. . . " nested _ field " : { " nested _ 1 " : " field " , " nested _ 2 " : " field2 " } }
> > > tc . get _ all _ keys ( tweet )
[ ' created _ at ' , ' text ' , ' nested _ field nested _ 1 ' , ' nested _ field nested _ 2 ' ]"""
|
items = [ ]
for k , v in tweet . items ( ) :
new_key = parent_key + " " + k
if isinstance ( v , dict ) :
items . extend ( get_all_keys ( v , parent_key = new_key ) )
else :
items . append ( new_key . strip ( " " ) )
return items
|
def send ( self , to , subject , body , reply_to = None , ** kwargs ) :
"""Send email via AWS SES .
: returns string : message id
Composes an email message based on input data , and then immediately
queues the message for sending .
: type to : list of strings or string
: param to : The To : field ( s ) of the message .
: type subject : string
: param subject : The subject of the message : A short summary of the
content , which will appear in the recipient ' s inbox .
: type body : string
: param body : The message body .
: sender : email address of the sender . String or typle ( name , email )
: reply _ to : email to reply to
* * kwargs :
: type cc _ addresses : list of strings or string
: param cc _ addresses : The CC : field ( s ) of the message .
: type bcc _ addresses : list of strings or string
: param bcc _ addresses : The BCC : field ( s ) of the message .
: type format : string
: param format : The format of the message ' s body , must be either " text "
or " html " .
: type return _ path : string
: param return _ path : The email address to which bounce notifications are
to be forwarded . If the message cannot be delivered
to the recipient , then an error message will be
returned from the recipient ' s ISP ; this message
will then be forwarded to the email address
specified by the ReturnPath parameter .
: type text _ body : string
: param text _ body : The text body to send with this email .
: type html _ body : string
: param html _ body : The html body to send with this email ."""
|
if not self . sender :
raise AttributeError ( "Sender email 'sender' or 'source' is not provided" )
kwargs [ "to_addresses" ] = to
kwargs [ "subject" ] = subject
kwargs [ "body" ] = body
kwargs [ "source" ] = self . _get_sender ( self . sender ) [ 0 ]
kwargs [ "reply_addresses" ] = self . _get_sender ( reply_to or self . reply_to ) [ 2 ]
response = self . ses . send_email ( ** kwargs )
return response [ "SendEmailResponse" ] [ "SendEmailResult" ] [ "MessageId" ]
|
def get_feed ( self , username ) :
"""Gets a user ' s feed .
: param str username : User to fetch feed from ."""
|
r = self . _query_ ( '/users/%s/feed' % username , 'GET' )
results = [ Story ( item ) for item in r . json ( ) ]
return results
|
def create_derivative ( self , word ) :
'''Creates derivative of ( base ) word by adding any affixes that apply'''
|
result = None
if self . char_to_strip != '' :
if self . opt == "PFX" :
result = word [ len ( self . char_to_strip ) : len ( word ) ]
result = self . affix + result
else : # SFX
result = word [ 0 : len ( word ) - len ( self . char_to_strip ) ]
result = result + self . affix
else : # No characters to strip
if self . opt == "PFX" :
result = self . affix + word
else : # SFX
result = word + self . affix
# None means word does not meet the set condition
return result
|
def dispatch_write ( self , buf ) :
"""There is new stuff to write when possible"""
|
if self . state != STATE_DEAD and self . enabled :
super ( ) . dispatch_write ( buf )
return True
return False
|
def create_repo ( self , name ) :
"""todo : Docstring for create _ repo
: param name : arg description
: type name : type description
: return :
: rtype :"""
|
name = os . path . abspath ( name )
logger . debug ( "create_repo %s" , name )
self . safe_mkdir ( name )
udir = os . path . join ( name , '_upkg' )
self . safe_mkdir ( udir )
ctx = { 'pkg_name' : os . path . basename ( name ) , }
for t in TMPLS :
self . mk_tmpl ( os . path . join ( udir , t [ 'name' ] ) , t [ 'tmpl' ] , ctx , t . get ( 'mode' ) )
|
def check_config_mode ( self , check_string = "config" , pattern = "" ) :
"""Checks if the device is in configuration mode or not ."""
|
if not pattern :
pattern = re . escape ( self . base_prompt )
return super ( CiscoWlcSSH , self ) . check_config_mode ( check_string , pattern )
|
def iter_parents ( self , paths = '' , ** kwargs ) :
"""Iterate _ all _ parents of this commit .
: param paths :
Optional path or list of paths limiting the Commits to those that
contain at least one of the paths
: param kwargs : All arguments allowed by git - rev - list
: return : Iterator yielding Commit objects which are parents of self"""
|
# skip ourselves
skip = kwargs . get ( "skip" , 1 )
if skip == 0 : # skip ourselves
skip = 1
kwargs [ 'skip' ] = skip
return self . iter_items ( self . repo , self , paths , ** kwargs )
|
def outformat_is_text ( ) :
"""Only safe to call within a click context ."""
|
ctx = click . get_current_context ( )
state = ctx . ensure_object ( CommandState )
return state . outformat_is_text ( )
|
def _parse_example_proto ( example_serialized ) :
"""Parses an Example proto containing a training example of an image .
The output of the build _ image _ data . py image preprocessing script is a dataset
containing serialized Example protocol buffers . Each Example proto contains
the following fields ( values are included as examples ) :
image / height : 462
image / width : 581
image / colorspace : ' RGB '
image / channels : 3
image / class / label : 615
image / class / synset : ' n03623198'
image / class / text : ' knee pad '
image / object / bbox / xmin : 0.1
image / object / bbox / xmax : 0.9
image / object / bbox / ymin : 0.2
image / object / bbox / ymax : 0.6
image / object / bbox / label : 615
image / format : ' JPEG '
image / filename : ' ILSVRC2012 _ val _ 00041207 . JPEG '
image / encoded : < JPEG encoded string >
Args :
example _ serialized : scalar Tensor tf . string containing a serialized
Example protocol buffer .
Returns :
image _ buffer : Tensor tf . string containing the contents of a JPEG file .
label : Tensor tf . int32 containing the label .
bbox : 3 - D float Tensor of bounding boxes arranged [ 1 , num _ boxes , coords ]
where each coordinate is [ 0 , 1 ) and the coordinates are arranged as
[ ymin , xmin , ymax , xmax ] ."""
|
# Dense features in Example proto .
feature_map = { 'image/encoded' : tf . FixedLenFeature ( [ ] , dtype = tf . string , default_value = '' ) , 'image/class/label' : tf . FixedLenFeature ( [ ] , dtype = tf . int64 , default_value = - 1 ) , 'image/class/text' : tf . FixedLenFeature ( [ ] , dtype = tf . string , default_value = '' ) , }
sparse_float32 = tf . VarLenFeature ( dtype = tf . float32 )
# Sparse features in Example proto .
feature_map . update ( { k : sparse_float32 for k in [ 'image/object/bbox/xmin' , 'image/object/bbox/ymin' , 'image/object/bbox/xmax' , 'image/object/bbox/ymax' ] } )
features = tf . parse_single_example ( example_serialized , feature_map )
label = tf . cast ( features [ 'image/class/label' ] , dtype = tf . int32 )
return features [ 'image/encoded' ] , label
|
def _maybe_mask_result ( self , result , mask , other , op_name ) :
"""Parameters
result : array - like
mask : array - like bool
other : scalar or array - like
op _ name : str"""
|
# may need to fill infs
# and mask wraparound
if is_float_dtype ( result ) :
mask |= ( result == np . inf ) | ( result == - np . inf )
# if we have a float operand we are by - definition
# a float result
# or our op is a divide
if ( ( is_float_dtype ( other ) or is_float ( other ) ) or ( op_name in [ 'rtruediv' , 'truediv' , 'rdiv' , 'div' ] ) ) :
result [ mask ] = np . nan
return result
return type ( self ) ( result , mask , copy = False )
|
def create_agency ( self ) :
"""Create an agency text file of definitions ."""
|
agency = self . agency
links = self . find_table_links ( )
definition_dict = self . find_definition_urls ( links )
with open ( agency + '.txt' , 'w' ) as f :
f . write ( str ( definition_dict ) )
|
def make_hash ( obj ) :
'''Make a hash from an arbitrary nested dictionary , list , tuple or set , used to generate
ID ' s for operations based on their name & arguments .'''
|
if isinstance ( obj , ( set , tuple , list ) ) :
hash_string = '' . join ( [ make_hash ( e ) for e in obj ] )
elif isinstance ( obj , dict ) :
hash_string = '' . join ( '' . join ( ( key , make_hash ( value ) ) ) for key , value in six . iteritems ( obj ) )
else :
hash_string = ( # Constants - the values can change between hosts but we should still
# group them under the same operation hash .
'_PYINFRA_CONSTANT' if obj in ( True , False , None ) # Plain strings
else obj if isinstance ( obj , six . string_types ) # Objects with _ _ name _ _ s
else obj . __name__ if hasattr ( obj , '__name__' ) # Objects with names
else obj . name if hasattr ( obj , 'name' ) # Repr anything else
else repr ( obj ) )
return sha1_hash ( hash_string )
|
def preprocess_belscript ( lines ) :
"""Convert any multi - line SET statements into single line SET statements"""
|
set_flag = False
for line in lines :
if set_flag is False and re . match ( "SET" , line ) :
set_flag = True
set_line = [ line . rstrip ( ) ]
# SET following SET
elif set_flag and re . match ( "SET" , line ) :
yield f"{' '.join(set_line)}\n"
set_line = [ line . rstrip ( ) ]
# Blank line following SET yields single line SET
elif set_flag and re . match ( "\s+$" , line ) :
yield f"{' '.join(set_line)}\n"
yield line
set_flag = False
# Append second , third , . . . lines to SET
elif set_flag :
set_line . append ( line . rstrip ( ) )
else :
yield line
|
def get_absolute_path ( some_path ) :
"""This function will return an appropriate absolute path for the path it is
given . If the input is absolute , it will return unmodified ; if the input is
relative , it will be rendered as relative to the current working directory ."""
|
if os . path . isabs ( some_path ) :
return some_path
else :
return evaluate_relative_path ( os . getcwd ( ) , some_path )
|
def push ( self , field ) :
'''Add a field to the container , if the field is a Container itself , it should be poped ( ) when done pushing into it
: param field : BaseField to push'''
|
kassert . is_of_types ( field , BaseField )
container = self . _container ( )
field . enclosing = self
if isinstance ( field , Container ) :
self . _containers . append ( field )
if container :
container . push ( field )
else :
name = field . get_name ( )
if name in self . _fields_dict :
raise KittyException ( 'field with the name (%s) already exists in this container' % ( name ) )
if name :
self . _fields_dict [ name ] = field
self . _fields . append ( field )
return True
|
def git_clone ( prettyname : str , url : str , directory : str , branch : str = None , commit : str = None , clone_options : List [ str ] = None , run_func : Callable [ [ List [ str ] ] , Any ] = None ) -> bool :
"""Fetches a Git repository , unless we have it already .
Args :
prettyname : name to display to user
url : URL
directory : destination directory
branch : repository branch
commit : repository commit tag
clone _ options : additional options to pass to ` ` git clone ` `
run _ func : function to use to call an external command
Returns :
did we need to do anything ?"""
|
run_func = run_func or subprocess . check_call
clone_options = clone_options or [ ]
# type : List [ str ]
if os . path . isdir ( directory ) :
log . info ( "Not re-cloning {} Git repository: using existing source " "in {}" . format ( prettyname , directory ) )
return False
log . info ( "Fetching {} source from {} into {}" , prettyname , url , directory )
require_executable ( GIT )
gitargs = [ GIT , "clone" ] + clone_options
if branch :
gitargs += [ "--branch" , branch ]
gitargs += [ url , directory ]
run_func ( gitargs )
if commit :
log . info ( "Resetting {} local Git repository to commit {}" , prettyname , commit )
run_func ( [ GIT , "-C" , directory , "reset" , "--hard" , commit ] )
# Using a Git repository that ' s not in the working directory :
# https : / / stackoverflow . com / questions / 1386291 / git - git - dir - not - working - as - expected # noqa
return True
|
def _node_is_match ( qualified_name , package_names , fqn ) :
"""Determine if a qualfied name matches an fqn , given the set of package
names in the graph .
: param List [ str ] qualified _ name : The components of the selector or node
name , split on ' . ' .
: param Set [ str ] package _ names : The set of pacakge names in the graph .
: param List [ str ] fqn : The node ' s fully qualified name in the graph ."""
|
if len ( qualified_name ) == 1 and fqn [ - 1 ] == qualified_name [ 0 ] :
return True
if qualified_name [ 0 ] in package_names :
if is_selected_node ( fqn , qualified_name ) :
return True
for package_name in package_names :
local_qualified_node_name = [ package_name ] + qualified_name
if is_selected_node ( fqn , local_qualified_node_name ) :
return True
return False
|
def get_room_config ( self , mucjid ) :
"""Query and return the room configuration form for the given MUC .
: param mucjid : JID of the room to query
: type mucjid : bare : class : ` ~ . JID `
: return : data form template for the room configuration
: rtype : : class : ` aioxmpp . forms . Data `
. . seealso : :
: class : ` ~ . ConfigurationForm `
for a form template to work with the returned form
. . versionadded : : 0.7"""
|
if mucjid is None or not mucjid . is_bare :
raise ValueError ( "mucjid must be bare JID" )
iq = aioxmpp . stanza . IQ ( type_ = aioxmpp . structs . IQType . GET , to = mucjid , payload = muc_xso . OwnerQuery ( ) , )
return ( yield from self . client . send ( iq ) ) . form
|
def record ( self ) : # type : ( ) - > bytes
'''A method to generate the string representing this UDF Entity ID .
Parameters :
None .
Returns :
A string representing this UDF Entity ID .'''
|
if not self . _initialized :
raise pycdlibexception . PyCdlibInternalError ( 'UDF Entity ID not initialized' )
return struct . pack ( self . FMT , self . flags , self . identifier , self . suffix )
|
def split_curve ( obj , param , ** kwargs ) :
"""Splits the curve at the input parametric coordinate .
This method splits the curve into two pieces at the given parametric coordinate , generates two different
curve objects and returns them . It does not modify the input curve .
Keyword Arguments :
* ` ` find _ span _ func ` ` : FindSpan implementation . * Default : * : func : ` . helpers . find _ span _ linear `
* ` ` insert _ knot _ func ` ` : knot insertion algorithm implementation . * Default : * : func : ` . operations . insert _ knot `
: param obj : Curve to be split
: type obj : abstract . Curve
: param param : parameter
: type param : float
: return : a list of curve segments
: rtype : list"""
|
# Validate input
if not isinstance ( obj , abstract . Curve ) :
raise GeomdlException ( "Input shape must be an instance of abstract.Curve class" )
if param == obj . knotvector [ 0 ] or param == obj . knotvector [ - 1 ] :
raise GeomdlException ( "Cannot split on the corner points" )
# Keyword arguments
span_func = kwargs . get ( 'find_span_func' , helpers . find_span_linear )
# FindSpan implementation
insert_knot_func = kwargs . get ( 'insert_knot_func' , insert_knot )
# Knot insertion algorithm
# Find multiplicity of the knot and define how many times we need to add the knot
ks = span_func ( obj . degree , obj . knotvector , len ( obj . ctrlpts ) , param ) - obj . degree + 1
s = helpers . find_multiplicity ( param , obj . knotvector )
r = obj . degree - s
# Create backups of the original curve
temp_obj = copy . deepcopy ( obj )
# Insert knot
insert_knot_func ( temp_obj , [ param ] , num = [ r ] , check_num = False )
# Knot vectors
knot_span = span_func ( temp_obj . degree , temp_obj . knotvector , len ( temp_obj . ctrlpts ) , param ) + 1
curve1_kv = list ( temp_obj . knotvector [ 0 : knot_span ] )
curve1_kv . append ( param )
curve2_kv = list ( temp_obj . knotvector [ knot_span : ] )
for _ in range ( 0 , temp_obj . degree + 1 ) :
curve2_kv . insert ( 0 , param )
# Control points ( use Pw if rational )
cpts = temp_obj . ctrlptsw if obj . rational else temp_obj . ctrlpts
curve1_ctrlpts = cpts [ 0 : ks + r ]
curve2_ctrlpts = cpts [ ks + r - 1 : ]
# Create a new curve for the first half
curve1 = temp_obj . __class__ ( )
curve1 . degree = temp_obj . degree
curve1 . set_ctrlpts ( curve1_ctrlpts )
curve1 . knotvector = curve1_kv
# Create another curve fot the second half
curve2 = temp_obj . __class__ ( )
curve2 . degree = temp_obj . degree
curve2 . set_ctrlpts ( curve2_ctrlpts )
curve2 . knotvector = curve2_kv
# Return the split curves
ret_val = [ curve1 , curve2 ]
return ret_val
|
def hide_node ( self , node ) :
"""Hides a node from the graph . The incoming and outgoing edges of the
node will also be hidden . The node may be unhidden at some later time ."""
|
try :
all_edges = self . all_edges ( node )
self . hidden_nodes [ node ] = ( self . nodes [ node ] , all_edges )
for edge in all_edges :
self . hide_edge ( edge )
del self . nodes [ node ]
except KeyError :
raise GraphError ( 'Invalid node %s' % node )
|
def iterative_select ( obj , dimensions , selects , depth = None ) :
"""Takes the output of group _ select selecting subgroups iteratively ,
avoiding duplicating select operations ."""
|
ndims = len ( dimensions )
depth = depth if depth is not None else ndims
items = [ ]
if isinstance ( selects , dict ) :
for k , v in selects . items ( ) :
items += iterative_select ( obj . select ( ** { dimensions [ ndims - depth ] : k } ) , dimensions , v , depth - 1 )
else :
for s in selects :
items . append ( ( s , obj . select ( ** { dimensions [ - 1 ] : s [ - 1 ] } ) ) )
return items
|
def write ( self , byte ) :
"""Writes a byte buffer to the underlying output file .
Raise exception when file is already closed ."""
|
if self . is_closed_flag :
raise Exception ( "Unable to write - already closed!" )
self . written += len ( byte )
self . file . write ( byte )
|
def replace_discount_promotion_by_id ( cls , discount_promotion_id , discount_promotion , ** kwargs ) :
"""Replace DiscountPromotion
Replace all attributes of DiscountPromotion
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . replace _ discount _ promotion _ by _ id ( discount _ promotion _ id , discount _ promotion , async = True )
> > > result = thread . get ( )
: param async bool
: param str discount _ promotion _ id : ID of discountPromotion to replace ( required )
: param DiscountPromotion discount _ promotion : Attributes of discountPromotion to replace ( required )
: return : DiscountPromotion
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _replace_discount_promotion_by_id_with_http_info ( discount_promotion_id , discount_promotion , ** kwargs )
else :
( data ) = cls . _replace_discount_promotion_by_id_with_http_info ( discount_promotion_id , discount_promotion , ** kwargs )
return data
|
def weld_agg ( array , weld_type , aggregations ) :
"""Multiple aggregations , optimized .
Parameters
array : numpy . ndarray or WeldObject
Input array .
weld _ type : WeldType
Type of each element in the input array .
aggregations : list of str
Which aggregations to compute .
Returns
WeldObject
Representation of this computation ."""
|
from functools import reduce
obj_id , weld_obj = create_weld_object ( array )
# find which aggregation computations are actually needed
to_compute = reduce ( lambda x , y : x | y , ( { agg } | _agg_dependencies [ agg ] for agg in aggregations ) )
# get priorities and sort in the proper order of computation
to_compute = sorted ( ( ( agg , _agg_priorities [ agg ] ) for agg in to_compute ) , key = lambda x : x [ 1 ] )
# remove the priorities
to_compute = ( agg_pair [ 0 ] for agg_pair in to_compute )
aggs = '\n' . join ( ( 'let agg_{} = {};' . format ( agg , _agg_code [ agg ] ) for agg in to_compute ) )
# these are the aggregations requested
merges = '\n' . join ( ( 'let res = merge(res, {});' . format ( 'agg_{}' . format ( agg ) ) for agg in aggregations ) )
mergers = """let res = appender[f64];
{merges}
result(res)
"""
mergers = mergers . format ( merges = merges )
weld_template = '{}\n{}' . format ( aggs , mergers )
weld_obj . weld_code = weld_template . format ( array = obj_id , type = weld_type )
return weld_obj
|
def make_response ( self , data : Any = None , ** kwargs : Any ) -> Any :
r"""Validate response data and wrap it inside response factory .
: param data : Response data . Could be ommited .
: param \ * \ * kwargs : Keyword arguments to be passed to response factory ."""
|
if not self . _valid_request :
logger . error ( 'Request not validated, cannot make response' )
raise self . make_error ( 'Request not validated before, cannot make ' 'response' )
if data is None and self . response_factory is None :
logger . error ( 'Response data omit, but no response factory is used' )
raise self . make_error ( 'Response data could be omitted only when ' 'response factory is used' )
response_schema = getattr ( self . module , 'response' , None )
if response_schema is not None :
self . _validate ( data , response_schema )
if self . response_factory is not None :
return self . response_factory ( * ( [ data ] if data is not None else [ ] ) , ** kwargs )
return data
|
def get_dir_indices ( msg , dirs ) :
'''Return path ( s ) indices of directory list from user input
Args
msg : str
String with message to display before pass selection input
dir _ list : array - like
list of paths to be displayed and selected from
Return
input _ dir _ indices : array - like
list of index positions of user selected path from input'''
|
import os
# Get user input for paths to process
usage = ( '\nEnter numbers preceeding paths seperated by commas (e.g. ' '`0,2,3`).\nTo select all paths type `all`.\nSingle directories ' 'can also be entered (e.g. `0`)\n\n' )
# Generate paths text to display
dirs_str = [ '{:2} {:60}\n' . format ( i , p ) for i , p in enumerate ( dirs ) ]
dirs_str = '' . join ( dirs_str )
# Concatenate ` msg ` , usage txt , and paths list for display before input
input_dirs = recursive_input ( '' . join ( [ msg , usage , dirs_str , '\n' ] ) , str )
# Determine type of input
if ',' in input_dirs :
input_dir_indices = [ int ( x . strip ( ) ) for x in input_dirs . split ( ',' ) ]
elif 'all' in input_dirs :
input_dir_indices = range ( len ( dirs ) )
else :
try :
input_dir_indices = [ int ( input_dirs . strip ( ) ) , ]
except :
raise ValueError ( 'Could not determine input type for input: ' '{}' . format ( input_dirs ) )
return input_dir_indices
|
def create_ethereum_client ( uri , timeout = 60 , * , loop = None ) :
"""Create client to ethereum node based on schema .
: param uri : Host on ethereum node
: type uri : str
: param timeout : An optional total time of timeout call
: type timeout : int
: param loop : An optional * event loop * instance
( uses : func : ` asyncio . get _ event _ loop ` if not specified ) .
: type loop : : ref : ` EventLoop < asyncio - event - loop > `
: return : : class : ` BaseAsyncIOClient ` instance ."""
|
if loop is None :
loop = asyncio . get_event_loop ( )
presult = urlparse ( uri )
if presult . scheme in ( 'ipc' , 'unix' ) :
reader , writer = yield from asyncio . open_unix_connection ( presult . path , loop = loop )
return AsyncIOIPCClient ( reader , writer , uri , timeout , loop = loop )
elif presult . scheme in ( 'http' , 'https' ) :
tls = presult . scheme [ - 1 ] == 's'
netloc = presult . netloc . split ( ':' )
host = netloc . pop ( 0 )
port = netloc . pop ( 0 ) if netloc else ( 443 if tls else 80 )
return AsyncIOHTTPClient ( host , port , tls , timeout , loop = loop )
else :
raise RuntimeError ( 'This scheme does not supported.' )
|
def _namematcher ( regex ) :
"""Checks if a target name matches with an input regular expression ."""
|
matcher = re_compile ( regex )
def match ( target ) :
target_name = getattr ( target , '__name__' , '' )
result = matcher . match ( target_name )
return result
return match
|
def init_default ( self ) :
"""Initializes object with its default values
Tries to load self . default _ filename from default
data directory . For safety , filename is reset to None so that it doesn ' t point to the
original file ."""
|
import f311
if self . default_filename is None :
raise RuntimeError ( "Class '{}' has no default filename" . format ( self . __class__ . __name__ ) )
fullpath = f311 . get_default_data_path ( self . default_filename , class_ = self . __class__ )
self . load ( fullpath )
self . filename = None
|
def limit ( s , length = 72 ) :
"""If the length of the string exceeds the given limit , it will be cut
off and three dots will be appended .
@ param s : the string to limit
@ type s : string
@ param length : maximum length
@ type length : non - negative integer
@ return : limited string , at most length + 3 characters long"""
|
assert length >= 0 , "length limit must be a non-negative integer"
if not s or len ( s ) <= length :
return s
if length == 0 :
return ""
return "%s..." % s [ : length ]
|
def unmount ( self , cid ) :
"""Unmounts and cleans - up after a previous mount ( ) ."""
|
driver = self . client . info ( ) [ 'Driver' ]
driver_unmount_fn = getattr ( self , "_unmount_" + driver , self . _unsupported_backend )
driver_unmount_fn ( cid )
|
def report ( ) :
"""Show analysis report of the specified image ( s ) .
The analysis report includes information on :
Image Id - The image id ( as a hash )
Type - The type of image ( - - imagetype option used when anchore analyze was run )
CurrentTags - The current set of repo tags on the image
AllTags - The set of all repo tags that have been on the image during analysis passes
GateStatus - The overall aggregate gate output status : GO | STOP | WARN
Size - The size in bytes of the image on disk
Counts - The counts for various attributes of the images such as packages , files , and suid files
BaseDiffs - Differences of this image from its base image
Report outputs these entries in a table format by default ."""
|
ecode = 0
try :
nav = init_nav_contexts ( )
result = nav . generate_reports ( )
# result = generate _ reports ( imagelist , showall = all , showdetails = details )
if result :
anchore_utils . print_result ( config , result )
except :
anchore_print_err ( "operation failed" )
ecode = 1
contexts [ 'anchore_allimages' ] . clear ( )
sys . exit ( ecode )
|
def create_aws_clients ( region , profile , * clients ) :
"""Create boto3 clients for one or more AWS services . These are the services used within the libs :
cloudformation , cloudfront , ec2 , iam , lambda , route53 , waf
Args :
region : the region in which to create clients that are region - specific ( all but IAM )
profile : Name of profile ( in . aws / credentials ) . Pass the value None if using instance credentials on EC2 or Lambda
clients : names of the clients to create ( lowercase , must match what boto3 expects )
Returns :
A dictionary of < key > , < value > pairs for several AWS services , using the labels above as keys , e . g . :
{ " cloudfront " : < cloudfront _ client > , . . . }
Dictionary contains an extra record , " SESSION " - pointing to the session that created the clients"""
|
if not profile :
profile = None
client_key = ( region , profile )
aws_clients = client_cache . get ( client_key , { } )
requested_clients = set ( clients )
new_clients = requested_clients . difference ( aws_clients )
if not new_clients :
return aws_clients
session = aws_clients . get ( "SESSION" )
try :
if not session :
session = boto3 . Session ( region_name = region , profile_name = profile )
aws_clients [ "SESSION" ] = session
# build clients
client_dict = { c : session . client ( c ) for c in new_clients }
# append the session itself in case it ' s needed by the client code - can ' t get it from the clients themselves
aws_clients . update ( client_dict )
# add the created clients to the cache
client_cache [ client_key ] = aws_clients
return aws_clients
except ClientError as error :
raise RuntimeError ( "Exception logging in with Session() and creating clients" , error )
|
def setupRenderModels ( self ) :
"Purpose : Create / destroy GL Render Models"
|
self . m_rTrackedDeviceToRenderModel = [ None ] * openvr . k_unMaxTrackedDeviceCount
if self . m_pHMD is None :
return
for unTrackedDevice in range ( openvr . k_unTrackedDeviceIndex_Hmd + 1 , openvr . k_unMaxTrackedDeviceCount ) :
if not self . m_pHMD . isTrackedDeviceConnected ( unTrackedDevice ) :
continue
self . setupRenderModelForTrackedDevice ( unTrackedDevice )
|
def template_shebang ( template , renderers , default , blacklist , whitelist , input_data ) :
'''Check the template shebang line and return the list of renderers specified
in the pipe .
Example shebang lines : :
# ! yaml _ jinja
# ! yaml _ mako
# ! mako | yaml
# ! jinja | yaml
# ! jinja | mako | yaml
# ! mako | yaml | stateconf
# ! jinja | yaml | stateconf
# ! mako | yaml _ odict
# ! mako | yaml _ odict | stateconf'''
|
line = ''
# Open up the first line of the sls template
if template == ':string:' :
line = input_data . split ( ) [ 0 ]
else :
with salt . utils . files . fopen ( template , 'r' ) as ifile :
line = salt . utils . stringutils . to_unicode ( ifile . readline ( ) )
# Check if it starts with a shebang and not a path
if line . startswith ( '#!' ) and not line . startswith ( '#!/' ) : # pull out the shebang data
# If the shebang does not contain recognized / not - blacklisted / whitelisted
# renderers , do not fall back to the default renderer
return check_render_pipe_str ( line . strip ( ) [ 2 : ] , renderers , blacklist , whitelist )
else :
return check_render_pipe_str ( default , renderers , blacklist , whitelist )
|
def create_node_tables ( self ) :
"""Create node and link tables if they don ' t exist ."""
|
self . cursor . execute ( 'PRAGMA foreign_keys=1' )
self . cursor . execute ( '''
CREATE TABLE IF NOT EXISTS datasets (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
key TEXT NOT NULL
)
''' )
self . cursor . execute ( '''
CREATE TABLE IF NOT EXISTS nodes (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
value TEXT NOT NULL
)
''' )
self . cursor . execute ( '''
CREATE TABLE IF NOT EXISTS links (
dataset REFERENCES datasets (id),
source REFERENCES nodes (id),
target REFERENCES nodes (id),
value TEXT,
bvalue TEXT,
count INTEGER NOT NULL DEFAULT 1
)
''' )
self . cursor . execute ( 'CREATE UNIQUE INDEX IF NOT EXISTS node ON nodes (value)' )
self . cursor . execute ( 'CREATE INDEX IF NOT EXISTS link_source ON links (source, dataset)' )
self . cursor . execute ( 'CREATE INDEX IF NOT EXISTS link_target ON links (target, dataset)' )
|
def udp_recv ( udpsock , peername , peerlen ) :
"""Receive zframe from UDP socket , and set address of peer that sent it
The peername must be a char [ INET _ ADDRSTRLEN ] array if IPv6 is disabled or
NI _ MAXHOST if it ' s enabled . Returns NULL when failing to get peer address .
* * * This is for CZMQ internal use only and may change arbitrarily * * *"""
|
return Zframe ( lib . zsys_udp_recv ( udpsock , peername , peerlen ) , False )
|
def format_plugin ( plugin ) :
"""Serialise ` plugin `
Attributes :
name : Name of Python class
id : Unique identifier
version : Plug - in version
category : Optional category
requires : Plug - in requirements
order : Plug - in order
optional : Is the plug - in optional ?
doc : The plug - in documentation
hasRepair : Can the plug - in perform a repair ?
hasCompatible : Does the plug - in have any compatible instances ?
type : Which baseclass does the plug - in stem from ? E . g . Validator
module : File in which plug - in was defined
contextEnabled : Does it process the Context ?
instanceEnabled : Does it process Instance ( s ) ?"""
|
type = "Other"
for order , _type in { pyblish . plugin . CollectorOrder : "Collector" , pyblish . plugin . ValidatorOrder : "Validator" , pyblish . plugin . ExtractorOrder : "Extractor" , pyblish . plugin . IntegratorOrder : "Integrator" } . items ( ) :
if pyblish . lib . inrange ( plugin . order , base = order ) :
type = _type
module = plugin . __module__
if module == "__main__" : # Support for in - memory plug - ins .
path = "mem:%s" % plugin . __name__
else :
try :
path = os . path . abspath ( sys . modules [ module ] . __file__ )
except Exception :
path = "unknown"
has_repair = False
args = inspect . getargspec ( plugin . repair ) . args
if "context" in args or "instance" in args :
has_repair = True
# Legacy abilities
if hasattr ( plugin , "repair_context" ) or hasattr ( plugin , "repair_instance" ) :
has_repair = True
output = { "label" : plugin . label , "id" : plugin . id , "version" : plugin . version , "category" : getattr ( plugin , "category" , None ) , "requires" : plugin . requires , "order" : plugin . order , "optional" : plugin . optional , "hosts" : plugin . hosts , "families" : plugin . families , # New in pyblish - base 1.5.2
"targets" : getattr ( plugin , "targets" , list ( ) ) , "doc" : inspect . getdoc ( plugin ) , "active" : plugin . active , "match" : plugin . match , # Metadata
"__pre11__" : plugin . __pre11__ , "__contextEnabled__" : plugin . __contextEnabled__ , "__instanceEnabled__" : plugin . __instanceEnabled__ , "path" : path , "pre11" : plugin . __pre11__ , "contextEnabled" : plugin . __contextEnabled__ , "instanceEnabled" : plugin . __instanceEnabled__ , "name" : plugin . __name__ , "type" : type , "module" : module , "hasRepair" : has_repair , "process" : { "args" : inspect . getargspec ( plugin . process ) . args , } , "repair" : { "args" : inspect . getargspec ( plugin . repair ) . args , } , "actions" : [ format_action ( a ) for a in plugin . actions ] , }
if os . getenv ( "PYBLISH_SAFE" ) :
schema . validate ( output , "plugin" )
return output
|
def update ( self , date , data = None , inow = None ) :
"""Update security with a given date and optionally , some data .
This will update price , value , weight , etc ."""
|
# filter for internal calls when position has not changed - nothing to
# do . Internal calls ( stale root calls ) have None data . Also want to
# make sure date has not changed , because then we do indeed want to
# update .
if date == self . now and self . _last_pos == self . _position :
return
if inow is None :
if date == 0 :
inow = 0
else :
inow = self . data . index . get_loc ( date )
# date change - update price
if date != self . now : # update now
self . now = date
if self . _prices_set :
self . _price = self . _prices . values [ inow ]
# traditional data update
elif data is not None :
prc = data [ self . name ]
self . _price = prc
self . _prices . values [ inow ] = prc
self . _positions . values [ inow ] = self . _position
self . _last_pos = self . _position
if np . isnan ( self . _price ) :
if self . _position == 0 :
self . _value = 0
else :
raise Exception ( 'Position is open (non-zero) and latest price is NaN ' 'for security %s. Cannot update node value.' % self . name )
else :
self . _value = self . _position * self . _price * self . multiplier
self . _values . values [ inow ] = self . _value
if self . _weight == 0 and self . _position == 0 :
self . _needupdate = False
# save outlay to outlays
if self . _outlay != 0 :
self . _outlays . values [ inow ] = self . _outlay
# reset outlay back to 0
self . _outlay = 0
|
def params ( self ) :
"""Return a * copy * ( we hope ) of the parameters .
DANGER : Altering properties directly doesn ' t call model . _ cache"""
|
params = odict ( [ ] )
for key , model in self . models . items ( ) :
params . update ( model . params )
return params
|
def GetMemSharedSavedMB ( self ) :
'''Retrieves the estimated amount of physical memory on the host saved
from copy - on - write ( COW ) shared guest physical memory .'''
|
counter = c_uint ( )
ret = vmGuestLib . VMGuestLib_GetMemSharedSavedMB ( self . handle . value , byref ( counter ) )
if ret != VMGUESTLIB_ERROR_SUCCESS :
raise VMGuestLibException ( ret )
return counter . value
|
def cancel_job ( self , job_id = None , job_name = None ) :
"""Cancel a running job .
Args :
job _ id ( str , optional ) : Identifier of job to be canceled .
job _ name ( str , optional ) : Name of job to be canceled .
Returns :
dict : JSON response for the job cancel operation ."""
|
payload = { }
if job_name is not None :
payload [ 'job_name' ] = job_name
if job_id is not None :
payload [ 'job_id' ] = job_id
jobs_url = self . _get_url ( 'jobs_path' )
res = self . rest_client . session . delete ( jobs_url , params = payload )
_handle_http_errors ( res )
return res . json ( )
|
def bs ( self ) :
""": example ' integrate extensible convergence '"""
|
result = [ ]
for word_list in self . bsWords :
result . append ( self . random_element ( word_list ) )
return " " . join ( result )
|
def get_port_profile_for_intf_input_request_type_get_request_interface_type ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
get_port_profile_for_intf = ET . Element ( "get_port_profile_for_intf" )
config = get_port_profile_for_intf
input = ET . SubElement ( get_port_profile_for_intf , "input" )
request_type = ET . SubElement ( input , "request-type" )
get_request = ET . SubElement ( request_type , "get-request" )
interface_type = ET . SubElement ( get_request , "interface-type" )
interface_type . text = kwargs . pop ( 'interface_type' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def output_text ( account , all_data , show_hourly = False ) :
"""Format data to get a readable output ."""
|
print ( """
#################################
# Hydro Quebec data for account #
# {}
#################################""" . format ( account ) )
for contract , data in all_data . items ( ) :
data [ 'contract' ] = contract
if data [ 'period_total_bill' ] is None :
data [ 'period_total_bill' ] = 0.0
if data [ 'period_projection' ] is None :
data [ 'period_projection' ] = 0.0
if data [ 'period_mean_daily_bill' ] is None :
data [ 'period_mean_daily_bill' ] = 0.0
output = ( """
----------------------------------------------------------------
Contract: {d[contract]}
===================
Balance: {d[balance]:.2f} $
Period Info
===========
Period day number: {d[period_length]:d}
Period total days: {d[period_total_days]:d} days
Period current bill
===================
Total Bill: {d[period_total_bill]:.2f} $
Projection bill: {d[period_projection]:.2f} $
Mean Daily Bill: {d[period_mean_daily_bill]:.2f} $
Total period consumption
========================
Lower price: {d[period_lower_price_consumption]:.2f} kWh
Higher price: {d[period_higher_price_consumption]:.2f} kWh
Total: {d[period_total_consumption]:.2f} kWh
Mean daily: {d[period_mean_daily_consumption]:.2f} kWh""" )
print ( output . format ( d = data ) )
if data . get ( "period_average_temperature" ) is not None :
output2 = ( """Temperature: {d[period_average_temperature]:d} °C""" )
print ( output2 . format ( d = data ) )
if data . get ( "yesterday_average_temperature" ) is not None :
output3 = ( """
Yesterday consumption
=====================
Temperature: {d[yesterday_average_temperature]:d} °C
Lower price: {d[yesterday_lower_price_consumption]:.2f} kWh
Higher price: {d[yesterday_higher_price_consumption]:.2f} kWh
Total: {d[yesterday_total_consumption]:.2f} kWh""" )
print ( output3 . format ( d = data ) )
if show_hourly :
msg = ( """
Yesterday consumption details
-----------------------------
Hour | Temperature | Lower price consumption | Higher price consumption | total comsumption
""" )
for hdata in data [ 'yesterday_hourly_consumption' ] :
msg += ( "{d[hour]} | {d[temp]:8d} °C | {d[lower]:19.2f} kWh | " "{d[high]:20.2f} kWh | {d[total]:.2f} kWh\n" ) . format ( d = hdata )
print ( msg )
if data [ 'annual_total_bill' ] :
output3 = ( """
Annual Total
============
Start date: {d[annual_date_start]}
End date: {d[annual_date_end]}
Total bill: {d[annual_total_bill]} $
Mean daily bill: {d[annual_mean_daily_bill]} $
Total consumption: {d[annual_total_consumption]} kWh
Mean dailyconsumption: {d[annual_mean_daily_consumption]} kWh
kWh price: {d[annual_kwh_price_cent]} ¢
""" )
print ( output3 . format ( d = data ) )
|
def do_resolve ( self , definitions ) :
"""Resolve named references to other WSDL objects . Ports without SOAP
bindings are discarded .
@ param definitions : A definitions object .
@ type definitions : L { Definitions }"""
|
filtered = [ ]
for p in self . ports :
ref = qualify ( p . binding , self . root , definitions . tns )
binding = definitions . bindings . get ( ref )
if binding is None :
raise Exception ( "binding '%s', not-found" % ( p . binding , ) )
if binding . soap is None :
log . debug ( "binding '%s' - not a SOAP binding, discarded" , binding . name )
continue
# After we have been resolved , our caller will expect that the
# binding we are referencing has been fully constructed , i . e .
# resolved , as well . The only scenario where the operations binding
# might possibly not have already resolved its references , and
# where this explicit resolve ( ) call is required , is if we are
# dealing with a recursive WSDL import chain .
binding . resolve ( definitions )
p . binding = binding
filtered . append ( p )
self . ports = filtered
|
def post_periodic_filtered ( values , repeat_after , block ) :
"""After every * repeat _ after * items , blocks the next * block * items from
* values * . Note that unlike : func : ` pre _ periodic _ filtered ` , * repeat _ after *
can ' t be 0 . For example , to block every tenth item read from an ADC : :
from gpiozero import MCP3008
from gpiozero . tools import post _ periodic _ filtered
adc = MCP3008 ( channel = 0)
for value in post _ periodic _ filtered ( adc , 9 , 1 ) :
print ( value )"""
|
values = _normalize ( values )
if repeat_after < 1 :
raise ValueError ( "repeat_after must be 1 or larger" )
if block < 1 :
raise ValueError ( "block must be 1 or larger" )
it = iter ( values )
try :
while True :
for _ in range ( repeat_after ) :
yield next ( it )
for _ in range ( block ) :
next ( it )
except StopIteration :
pass
|
def setDropdown ( self , label , default = None , options = [ ] , description = 'Set Dropdown' , format = 'text' ) :
"""Set float in a notebook pipeline ( via interaction or with nbconvert )"""
|
obj = self . load ( label )
if obj == None :
obj = default
self . save ( obj , label )
# initialize with default
dropdownw = Dropdown ( value = obj , options = options , description = description )
hndl = interact ( self . save , obj = dropdownw , label = fixed ( label ) , format = fixed ( format ) )
|
def heightmap_add_voronoi ( hm : np . ndarray , nbPoints : Any , nbCoef : int , coef : Sequence [ float ] , rnd : Optional [ tcod . random . Random ] = None , ) -> None :
"""Add values from a Voronoi diagram to the heightmap .
Args :
hm ( numpy . ndarray ) : A numpy . ndarray formatted for heightmap functions .
nbPoints ( Any ) : Number of Voronoi sites .
nbCoef ( int ) : The diagram value is calculated from the nbCoef
closest sites .
coef ( Sequence [ float ] ) : The distance to each site is scaled by the
corresponding coef .
Closest site : coef [ 0 ] ,
second closest site : coef [ 1 ] , . . .
rnd ( Optional [ Random ] ) : A Random instance , or None ."""
|
nbPoints = len ( coef )
ccoef = ffi . new ( "float[]" , coef )
lib . TCOD_heightmap_add_voronoi ( _heightmap_cdata ( hm ) , nbPoints , nbCoef , ccoef , rnd . random_c if rnd else ffi . NULL , )
|
def stem ( self , s ) :
"""This should make the Stemmer picklable and unpicklable by not using bound methods"""
|
if self . _stemmer is None :
return passthrough ( s )
try : # try the local attribute ` stemmer ` , a StemmerI instance first
# if you use the self . stem method from an unpickled object it may not work
return getattr ( getattr ( self , '_stemmer' , None ) , 'stem' , None ) ( s )
except ( AttributeError , TypeError ) :
return getattr ( getattr ( self , '_stemmer' , self ) , 'lemmatize' , passthrough ) ( s )
|
def setup_logging ( verbosity , no_color , user_log_file ) :
"""Configures and sets up all of the logging
Returns the requested logging level , as its integer value ."""
|
# Determine the level to be logging at .
if verbosity >= 1 :
level = "DEBUG"
elif verbosity == - 1 :
level = "WARNING"
elif verbosity == - 2 :
level = "ERROR"
elif verbosity <= - 3 :
level = "CRITICAL"
else :
level = "INFO"
level_number = getattr ( logging , level )
# The " root " logger should match the " console " level * unless * we also need
# to log to a user log file .
include_user_log = user_log_file is not None
if include_user_log :
additional_log_file = user_log_file
root_level = "DEBUG"
else :
additional_log_file = "/dev/null"
root_level = level
# Disable any logging besides WARNING unless we have DEBUG level logging
# enabled for vendored libraries .
vendored_log_level = "WARNING" if level in [ "INFO" , "ERROR" ] else "DEBUG"
# Shorthands for clarity
log_streams = { "stdout" : "ext://sys.stdout" , "stderr" : "ext://sys.stderr" , }
handler_classes = { "stream" : "pip._internal.utils.logging.ColorizedStreamHandler" , "file" : "pip._internal.utils.logging.BetterRotatingFileHandler" , }
logging . config . dictConfig ( { "version" : 1 , "disable_existing_loggers" : False , "filters" : { "exclude_warnings" : { "()" : "pip._internal.utils.logging.MaxLevelFilter" , "level" : logging . WARNING , } , } , "formatters" : { "indent" : { "()" : IndentingFormatter , "format" : "%(message)s" , } , "indent_with_timestamp" : { "()" : IndentingFormatter , "format" : "%(message)s" , "add_timestamp" : True , } , } , "handlers" : { "console" : { "level" : level , "class" : handler_classes [ "stream" ] , "no_color" : no_color , "stream" : log_streams [ "stdout" ] , "filters" : [ "exclude_warnings" ] , "formatter" : "indent" , } , "console_errors" : { "level" : "WARNING" , "class" : handler_classes [ "stream" ] , "no_color" : no_color , "stream" : log_streams [ "stderr" ] , "formatter" : "indent" , } , "user_log" : { "level" : "DEBUG" , "class" : handler_classes [ "file" ] , "filename" : additional_log_file , "delay" : True , "formatter" : "indent_with_timestamp" , } , } , "root" : { "level" : root_level , "handlers" : [ "console" , "console_errors" ] + ( [ "user_log" ] if include_user_log else [ ] ) , } , "loggers" : { "pip._vendor" : { "level" : vendored_log_level } } , } )
return level_number
|
def plot_magnification ( self , labels = None , which_indices = None , resolution = 60 , marker = '<>^vsd' , legend = True , plot_limits = None , updates = False , mean = True , covariance = True , kern = None , num_samples = 1000 , scatter_kwargs = None , plot_scatter = True , ** imshow_kwargs ) :
"""Plot the magnification factor of the GP on the inputs . This is the
density of the GP as a gray scale .
: param array - like labels : a label for each data point ( row ) of the inputs
: param ( int , int ) which _ indices : which input dimensions to plot against each other
: param int resolution : the resolution at which we predict the magnification factor
: param str marker : markers to use - cycle if more labels then markers are given
: param bool legend : whether to plot the legend on the figure
: param plot _ limits : the plot limits for the plot
: type plot _ limits : ( xmin , xmax , ymin , ymax ) or ( ( xmin , xmax ) , ( ymin , ymax ) )
: param bool updates : if possible , make interactive updates using the specific library you are using
: param bool mean : use the mean of the Wishart embedding for the magnification factor
: param bool covariance : use the covariance of the Wishart embedding for the magnification factor
: param : py : class : ` ~ GPy . kern . Kern ` kern : the kernel to use for prediction
: param int num _ samples : the number of samples to plot maximally . We do a stratified subsample from the labels , if the number of samples ( in X ) is higher then num _ samples .
: param imshow _ kwargs : the kwargs for the imshow ( magnification factor )
: param kwargs : the kwargs for the scatter plots"""
|
input_1 , input_2 = which_indices = self . get_most_significant_input_dimensions ( which_indices ) [ : 2 ]
X = get_x_y_var ( self ) [ 0 ]
_ , _ , Xgrid , _ , _ , xmin , xmax , resolution = helper_for_plot_data ( self , X , plot_limits , which_indices , None , resolution )
canvas , imshow_kwargs = pl ( ) . new_canvas ( xlim = ( xmin [ 0 ] , xmax [ 0 ] ) , ylim = ( xmin [ 1 ] , xmax [ 1 ] ) , xlabel = 'latent dimension %i' % input_1 , ylabel = 'latent dimension %i' % input_2 , ** imshow_kwargs )
plots = { }
if legend and plot_scatter :
if ( labels is not None ) :
legend = find_best_layout_for_subplots ( len ( np . unique ( labels ) ) ) [ 1 ]
else :
labels = np . ones ( self . num_data )
legend = False
if plot_scatter :
plots [ 'scatters' ] = _plot_latent_scatter ( canvas , X , which_indices , labels , marker , num_samples , projection = '2d' , ** scatter_kwargs or { } )
plots [ 'view' ] = _plot_magnification ( self , canvas , which_indices , Xgrid , xmin , xmax , resolution , updates , mean , covariance , kern , ** imshow_kwargs )
retval = pl ( ) . add_to_canvas ( canvas , plots , legend = legend , )
_wait_for_updates ( plots [ 'view' ] , updates )
return retval
|
def time_range ( self , flag = None ) :
'''time range of the current dataset
: keyword flag : use a flag array to know the time range of an indexed slice of the object'''
|
if self . count == 0 :
return [ [ None , None ] , [ None , None ] ]
if flag is None :
return cnes_convert ( [ self . date . min ( ) , self . date . max ( ) ] )
else :
return cnes_convert ( [ self . date . compress ( flag ) . min ( ) , self . date . compress ( flag ) . max ( ) ] )
|
def _find_keep_files ( root , keep ) :
'''Compile a list of valid keep files ( and directories ) .
Used by _ clean _ dir ( )'''
|
real_keep = set ( )
real_keep . add ( root )
if isinstance ( keep , list ) :
for fn_ in keep :
if not os . path . isabs ( fn_ ) :
continue
fn_ = os . path . normcase ( os . path . abspath ( fn_ ) )
real_keep . add ( fn_ )
while True :
fn_ = os . path . abspath ( os . path . dirname ( fn_ ) )
real_keep . add ( fn_ )
drive , path = os . path . splitdrive ( fn_ )
if not path . lstrip ( os . sep ) :
break
return real_keep
|
def call_repeatedly ( func , interval , * args , ** kwargs ) :
"""Call a function at interval
Returns both the thread object and the loop stopper Event ."""
|
main_thead = threading . current_thread ( )
stopped = threading . Event ( )
def loop ( ) :
while not stopped . wait ( interval ) and main_thead . is_alive ( ) : # the first call is in ` interval ` secs
func ( * args , ** kwargs )
return
timer_thread = threading . Thread ( target = loop , daemon = True )
timer_thread . start ( )
atexit . register ( stopped . set )
return timer_thread , stopped . set
|
def _sample_in_stratum ( self , stratum_idx , replace = True ) :
"""Sample an item uniformly from a stratum
Parameters
stratum _ idx : int
stratum index to sample from
replace : bool , optional , default True
whether to sample with replacement
Returns
int
location of the randomly selected item in the original input array"""
|
if replace :
stratum_loc = np . random . choice ( self . sizes_ [ stratum_idx ] )
else : # Extract only the unsampled items
stratum_locs = np . where ( ~ self . _sampled [ stratum_idx ] ) [ 0 ]
stratum_loc = np . random . choice ( stratum_locs )
# Record that item has been sampled
self . _sampled [ stratum_idx ] [ stratum_loc ] = True
self . _n_sampled [ stratum_idx ] += 1
# Get generic location
loc = self . allocations_ [ stratum_idx ] [ stratum_loc ]
return loc
|
def _delete_map_from_user_by_id ( self , user , map_id ) :
"""Delete a mapfile entry from database ."""
|
map = self . _get_map_from_user_by_id ( user , map_id )
if map is None :
return None
Session . delete ( map )
Session . commit ( )
return map
|
def items ( self ) :
"""D . items ( ) - > a set - like object providing a view on D ' s items"""
|
keycol = self . _keycol
for row in self . __iter__ ( ) :
yield ( row [ keycol ] , dict ( row ) )
|
def getMe ( self ) :
'''returns User Object'''
|
response_str = self . _command ( 'getMe' )
if ( not response_str ) :
return False
response = json . loads ( response_str )
return response
|
def recognize_file ( self , file_path ) :
"""This causes OpenALPR to attempt to recognize an image by opening a file on
disk .
: param file _ path : The path to the image that will be analyzed
: return : An OpenALPR analysis in the form of a response dictionary"""
|
file_path = _convert_to_charp ( file_path )
ptr = self . _recognize_file_func ( self . alpr_pointer , file_path )
json_data = ctypes . cast ( ptr , ctypes . c_char_p ) . value
json_data = _convert_from_charp ( json_data )
response_obj = json . loads ( json_data )
self . _free_json_mem_func ( ctypes . c_void_p ( ptr ) )
return response_obj
|
def _rtg_add_summary_file ( eval_files , base_dir , data ) :
"""Parse output TP FP and FN files to generate metrics for plotting ."""
|
out_file = os . path . join ( base_dir , "validate-summary.csv" )
if not utils . file_uptodate ( out_file , eval_files . get ( "tp" , eval_files . get ( "fp" , eval_files [ "fn" ] ) ) ) :
with file_transaction ( data , out_file ) as tx_out_file :
with open ( tx_out_file , "w" ) as out_handle :
writer = csv . writer ( out_handle )
writer . writerow ( [ "sample" , "caller" , "vtype" , "metric" , "value" ] )
base = _get_sample_and_caller ( data )
for metric in [ "tp" , "fp" , "fn" ] :
for vtype , bcftools_types in [ ( "SNPs" , "--types snps" ) , ( "Indels" , "--exclude-types snps" ) ] :
in_file = eval_files . get ( metric )
if in_file and os . path . exists ( in_file ) :
cmd = ( "bcftools view {bcftools_types} {in_file} | grep -v ^# | wc -l" )
count = int ( subprocess . check_output ( cmd . format ( ** locals ( ) ) , shell = True ) )
else :
count = 0
writer . writerow ( base + [ vtype , metric , count ] )
eval_files [ "summary" ] = out_file
return eval_files
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.