signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def resend_sms_endpoint_verification ( self , endpoint_id ) :
"""Calls NWS function to resend verification message to endpoint ' s
phone number"""
|
self . _validate_uuid ( endpoint_id )
url = "/notification/v1/endpoint/{}/verification" . format ( endpoint_id )
response = NWS_DAO ( ) . postURL ( url , None , None )
if response . status != 202 :
raise DataFailureException ( url , response . status , response . data )
return response . status
|
def exchange_bind ( self , destination , source = '' , routing_key = '' , nowait = False , arguments = None ) :
"""This method binds an exchange to an exchange .
RULE :
A server MUST allow and ignore duplicate bindings - that
is , two or more bind methods for a specific exchanges ,
with identical arguments - without treating these as an
error .
RULE :
A server MUST allow cycles of exchange bindings to be
created including allowing an exchange to be bound to
itself .
RULE :
A server MUST not deliver the same message more than once
to a destination exchange , even if the topology of
exchanges and bindings results in multiple ( even infinite )
routes to that exchange .
PARAMETERS :
reserved - 1 : short
destination : shortstr
Specifies the name of the destination exchange to
bind .
RULE :
A client MUST NOT be allowed to bind a non -
existent destination exchange .
RULE :
The server MUST accept a blank exchange name to
mean the default exchange .
source : shortstr
Specifies the name of the source exchange to bind .
RULE :
A client MUST NOT be allowed to bind a non -
existent source exchange .
RULE :
The server MUST accept a blank exchange name to
mean the default exchange .
routing - key : shortstr
Specifies the routing key for the binding . The routing
key is used for routing messages depending on the
exchange configuration . Not all exchanges use a
routing key - refer to the specific exchange
documentation .
no - wait : bit
arguments : table
A set of arguments for the binding . The syntax and
semantics of these arguments depends on the exchange
class ."""
|
arguments = { } if arguments is None else arguments
args = AMQPWriter ( )
args . write_short ( 0 )
args . write_shortstr ( destination )
args . write_shortstr ( source )
args . write_shortstr ( routing_key )
args . write_bit ( nowait )
args . write_table ( arguments )
self . _send_method ( ( 40 , 30 ) , args )
if not nowait :
return self . wait ( allowed_methods = [ ( 40 , 31 ) , # Channel . exchange _ bind _ ok
] )
|
def get_pipeline_boxes ( self , pipeline_key , sort_by = None ) :
'''Gets a list of all box objects in a pipeline . Performs a single GET .
Args :
pipeline _ keykey for pipeline
sort _ byin desc order by ' creationTimestamp ' or ' lastUpdatedTimestamp '
Not sure if it is supported
returns ( status code for the GET request , dict of boxes )'''
|
if not pipeline_key :
return requests . codes . bad_request , None
uri = '/' . join ( [ self . api_uri , self . pipelines_suffix , pipeline_key ] )
if sort_by :
if sort_by in [ 'creationTimestamp' , 'lastUpdatedTimestamp' ] :
uri += self . sort_by_postfix + sort_by
else :
return requests . codes . bad_request , { 'success' : 'False' , 'error' : 'sortBy needs to be \'creationTimestamp\', or \'lastUpdatedTimestamp\'' }
return self . _req ( 'get' , uri )
|
def get_integer_index ( miller_index : bool , round_dp : int = 4 , verbose : bool = True ) -> Tuple [ int , int , int ] :
"""Attempt to convert a vector of floats to whole numbers .
Args :
miller _ index ( list of float ) : A list miller indexes .
round _ dp ( int , optional ) : The number of decimal places to round the
miller index to .
verbose ( bool , optional ) : Whether to print warnings .
Returns :
( tuple ) : The Miller index ."""
|
miller_index = np . asarray ( miller_index )
# deal with the case we have small irregular floats
# that are all equal or factors of each other
miller_index /= min ( [ m for m in miller_index if m != 0 ] )
miller_index /= np . max ( np . abs ( miller_index ) )
# deal with the case we have nice fractions
md = [ Fraction ( n ) . limit_denominator ( 12 ) . denominator for n in miller_index ]
miller_index *= reduce ( lambda x , y : x * y , md )
int_miller_index = np . int_ ( np . round ( miller_index , 1 ) )
miller_index /= np . abs ( reduce ( gcd , int_miller_index ) )
# round to a reasonable precision
miller_index = np . array ( [ round ( h , round_dp ) for h in miller_index ] )
# need to recalculate this after rounding as values may have changed
int_miller_index = np . int_ ( np . round ( miller_index , 1 ) )
if np . any ( np . abs ( miller_index - int_miller_index ) > 1e-6 ) and verbose :
warnings . warn ( "Non-integer encountered in Miller index" )
else :
miller_index = int_miller_index
# minimise the number of negative indexes
miller_index += 0
# converts - 0 to 0
def n_minus ( index ) :
return len ( [ h for h in index if h < 0 ] )
if n_minus ( miller_index ) > n_minus ( miller_index * - 1 ) :
miller_index *= - 1
# if only one index is negative , make sure it is the smallest
# e . g . ( - 2 1 0 ) - > ( 2 - 1 0)
if ( sum ( miller_index != 0 ) == 2 and n_minus ( miller_index ) == 1 and abs ( min ( miller_index ) ) > max ( miller_index ) ) :
miller_index *= - 1
return tuple ( miller_index )
|
def save_config ( self , cmd = "copy running-configuration startup-configuration" , confirm = False , confirm_response = "" , ) :
"""Saves Config"""
|
return super ( DellForce10SSH , self ) . save_config ( cmd = cmd , confirm = confirm , confirm_response = confirm_response )
|
def breathe_lights ( self , color , selector = 'all' , from_color = None , period = 1.0 , cycles = 1.0 , persist = False , power_on = True , peak = 0.5 ) :
"""Perform breathe effect on lights .
selector : String
The selector to limit which lights will run the effect .
default : all
color : required String
Color attributes to use during effect . See set _ state for more .
from _ color : String
The color to start the effect from . See set _ state for more .
default : current bulb color
period : Double
The time in seconds for one cyles of the effect .
default : 1.0
cycles : Double
The number of times to repeat the effect .
default : 1.0
persist : Boolean
If false set the light back to its previous
value when effect ends , if true leave the last effect color .
default : false
power _ on : Boolean
If true , turn the bulb on if it is not already on .
default : true
peak : String
Defines where in a period the target color is at its maximum .
Minimum 0.0 , maximum 1.0.
default : 0.5"""
|
argument_tuples = [ ( "color" , color ) , ( "from_color" , from_color ) , ( "period" , period ) , ( "cycles" , cycles ) , ( "persist" , persist ) , ( "power_on" , power_on ) , ( "peak" , peak ) , ]
return self . client . perform_request ( method = 'post' , endpoint = 'lights/{}/effects/breathe' , endpoint_args = [ selector ] , argument_tuples = argument_tuples )
|
def rollforward ( self , dt ) :
"""Roll provided date forward to next offset only if not on offset ."""
|
dt = as_timestamp ( dt )
if not self . onOffset ( dt ) :
dt = dt + self . __class__ ( 1 , normalize = self . normalize , ** self . kwds )
return dt
|
def end_element ( self , tag ) :
"""search for ending form values ."""
|
if tag == u'form' :
self . forms . append ( self . form )
self . form = None
|
def describe_keypairs ( self , xml_bytes ) :
"""Parse the XML returned by the C { DescribeKeyPairs } function .
@ param xml _ bytes : XML bytes with a C { DescribeKeyPairsResponse } root
element .
@ return : a C { list } of L { Keypair } ."""
|
results = [ ]
root = XML ( xml_bytes )
keypairs = root . find ( "keySet" )
if keypairs is None :
return results
for keypair_data in keypairs :
key_name = keypair_data . findtext ( "keyName" )
key_fingerprint = keypair_data . findtext ( "keyFingerprint" )
results . append ( model . Keypair ( key_name , key_fingerprint ) )
return results
|
def graft_neuron ( root_section ) :
'''Returns a neuron starting at root _ section'''
|
assert isinstance ( root_section , Section )
return Neuron ( soma = Soma ( root_section . points [ : 1 ] ) , neurites = [ Neurite ( root_section ) ] )
|
def create_pull_from_issue ( self , issue , base , head ) :
"""Create a pull request from issue # ` ` issue ` ` .
: param int issue : ( required ) , issue number
: param str base : ( required ) , e . g . , ' master '
: param str head : ( required ) , e . g . , ' username : branch '
: returns : : class : ` PullRequest < github3 . pulls . PullRequest > ` if
successful , else None"""
|
if int ( issue ) > 0 :
data = { 'issue' : issue , 'base' : base , 'head' : head }
return self . _create_pull ( data )
return None
|
def tosegwizard ( file , seglist , header = True , coltype = int ) :
"""Write the segmentlist seglist to the file object file in a
segwizard compatible format . If header is True , then the output
will begin with a comment line containing column names . The
segment boundaries will be coerced to type coltype and then passed
to str ( ) before output ."""
|
if header :
print >> file , "# seg\tstart \tstop \tduration"
for n , seg in enumerate ( seglist ) :
print >> file , "%d\t%s\t%s\t%s" % ( n , str ( coltype ( seg [ 0 ] ) ) , str ( coltype ( seg [ 1 ] ) ) , str ( coltype ( abs ( seg ) ) ) )
|
def build_items ( self ) :
"""get the items from STATS QUEUE
calculate self . stats
make new items from self . stats
put the new items for ITEM QUEUE"""
|
while not self . stats_queue . empty ( ) :
item = self . stats_queue . get ( )
self . calculate ( item )
for key , value in self . stats . iteritems ( ) :
if 'blackbird.queue.length' == key :
value = self . queue . qsize ( )
item = BlackbirdStatisticsItem ( key = key , value = value , host = self . options [ 'hostname' ] )
if self . enqueue ( item = item , queue = self . queue ) :
self . logger . debug ( 'Inserted {0} to the queue.' . format ( item . data ) )
|
def galcencyl_to_vxvyvz ( vR , vT , vZ , phi , vsun = [ 0. , 1. , 0. ] , Xsun = 1. , Zsun = 0. , _extra_rot = True ) :
"""NAME :
galcencyl _ to _ vxvyvz
PURPOSE :
transform cylindrical Galactocentric coordinates to XYZ ( wrt Sun ) coordinates for velocities
INPUT :
vR - Galactocentric radial velocity
vT - Galactocentric tangential velocity
vZ - Galactocentric vertical velocity
phi - Galactocentric azimuth
vsun - velocity of the sun in the GC frame ndarray [ 3 ] ( can be array of same length as vRg ; shape [ 3 , N ] )
Xsun - cylindrical distance to the GC ( can be array of same length as vRg )
Zsun - Sun ' s height above the midplane ( can be array of same length as vRg )
_ extra _ rot = ( True ) if True , perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy ' s definition
OUTPUT :
vx , vy , vz
HISTORY :
2011-02-24 - Written - Bovy ( NYU )
2017-10-24 - Allowed vsun / Xsun / Zsun to be arrays - Bovy ( NYU )"""
|
vXg , vYg , vZg = cyl_to_rect_vec ( vR , vT , vZ , phi )
return galcenrect_to_vxvyvz ( vXg , vYg , vZg , vsun = vsun , Xsun = Xsun , Zsun = Zsun , _extra_rot = _extra_rot )
|
def _run_pass ( self ) :
"""Read lines from a file and performs a callback against them"""
|
while True :
try :
data = self . _file . read ( 4096 )
except IOError , e :
if e . errno == errno . ESTALE :
self . active = False
return False
lines = self . _buffer_extract ( data )
if not lines : # Before returning , check if an event ( maybe partial ) is waiting for too long .
if self . _current_event and time . time ( ) - self . _last_activity > 1 :
event = '\n' . join ( self . _current_event )
self . _current_event . clear ( )
self . _callback_wrapper ( [ event ] )
break
self . _last_activity = time . time ( )
if self . _multiline_regex_after or self . _multiline_regex_before : # Multiline is enabled for this file .
events = multiline_merge ( lines , self . _current_event , self . _multiline_regex_after , self . _multiline_regex_before )
else :
events = lines
if events :
self . _callback_wrapper ( events )
if self . _sincedb_path :
current_line_count = len ( lines )
self . _sincedb_update_position ( lines = current_line_count )
self . _sincedb_update_position ( )
|
def _channel_loop ( tr , parameters , max_trigger_length = 60 , despike = False , debug = 0 ) :
"""Internal loop for parellel processing .
: type tr : obspy . core . trace
: param tr : Trace to look for triggers in .
: type parameters : list
: param parameters : List of TriggerParameter class for trace .
: type max _ trigger _ length : float
: type despike : bool
: type debug : int
: return : trigger
: rtype : list"""
|
for par in parameters :
if par [ 'station' ] == tr . stats . station and par [ 'channel' ] == tr . stats . channel :
parameter = par
break
else :
msg = 'No parameters set for station ' + str ( tr . stats . station )
warnings . warn ( msg )
return [ ]
triggers = [ ]
if debug > 0 :
print ( tr )
tr . detrend ( 'simple' )
if despike :
median_filter ( tr )
if parameter [ 'lowcut' ] and parameter [ 'highcut' ] :
tr . filter ( 'bandpass' , freqmin = parameter [ 'lowcut' ] , freqmax = parameter [ 'highcut' ] )
elif parameter [ 'lowcut' ] :
tr . filter ( 'highpass' , freq = parameter [ 'lowcut' ] )
elif parameter [ 'highcut' ] :
tr . filter ( 'lowpass' , freq = parameter [ 'highcut' ] )
# find triggers for each channel using recursive _ sta _ lta
df = tr . stats . sampling_rate
cft = recursive_sta_lta ( tr . data , int ( parameter [ 'sta_len' ] * df ) , int ( parameter [ 'lta_len' ] * df ) )
if max_trigger_length :
trig_args = { 'max_len_delete' : True }
trig_args [ 'max_len' ] = int ( max_trigger_length * df + 0.5 )
if debug > 3 :
plot_trigger ( tr , cft , parameter [ 'thr_on' ] , parameter [ 'thr_off' ] )
tmp_trigs = trigger_onset ( cft , float ( parameter [ 'thr_on' ] ) , float ( parameter [ 'thr_off' ] ) , ** trig_args )
for on , off in tmp_trigs :
cft_peak = tr . data [ on : off ] . max ( )
cft_std = tr . data [ on : off ] . std ( )
on = tr . stats . starttime + float ( on ) / tr . stats . sampling_rate
off = tr . stats . starttime + float ( off ) / tr . stats . sampling_rate
triggers . append ( ( on . timestamp , off . timestamp , tr . id , cft_peak , cft_std ) )
return triggers
|
def get_all_clusters_sites ( ) :
"""Get all the cluster of all the sites .
Returns :
dict corresponding to the mapping cluster uid to python - grid5000 site"""
|
result = { }
gk = get_api_client ( )
sites = gk . sites . list ( )
for site in sites :
clusters = site . clusters . list ( )
result . update ( { c . uid : site . uid for c in clusters } )
return result
|
def update_membership ( self , group_id , users = [ ] ) :
"""Update the group ' s membership
: type group _ id : int
: param group _ id : Group ID Number
: type users : list of str
: param users : List of emails
: rtype : dict
: return : dictionary of group information"""
|
data = { 'groupId' : group_id , 'users' : users , }
return _fix_group ( self . post ( 'updateMembership' , data ) )
|
def set_id ( self , dxid ) :
''': param dxid : New ID to be associated with the handler
: type dxid : string
Discards the currently stored ID and associates the handler with * dxid *'''
|
if dxid is not None :
verify_string_dxid ( dxid , self . _class )
self . _dxid = dxid
|
def state_dict ( self ) -> Dict [ str , Any ] :
"""A ` ` Trainer ` ` can use this to serialize the state of the metric tracker ."""
|
return { "best_so_far" : self . _best_so_far , "patience" : self . _patience , "epochs_with_no_improvement" : self . _epochs_with_no_improvement , "is_best_so_far" : self . _is_best_so_far , "should_decrease" : self . _should_decrease , "best_epoch_metrics" : self . best_epoch_metrics , "epoch_number" : self . _epoch_number , "best_epoch" : self . best_epoch }
|
def prevId ( self ) :
"""Previous passage Identifier
: rtype : CtsPassage
: returns : Previous passage at same level"""
|
if self . _prev_id is False : # Request the next urn
self . _prev_id , self . _next_id = self . getPrevNextUrn ( reference = self . urn . reference )
return self . _prev_id
|
def getResources ( self , ep , noResp = False , cacheOnly = False ) :
"""Get list of resources on an endpoint .
: param str ep : Endpoint to get the resources of
: param bool noResp : Optional - specify no response necessary from endpoint
: param bool cacheOnly : Optional - get results from cache on connector , do not wake up endpoint
: return : list of resources
: rtype : asyncResult"""
|
# load query params if set to other than defaults
q = { }
result = asyncResult ( )
result . endpoint = ep
if noResp or cacheOnly :
q [ 'noResp' ] = 'true' if noResp == True else 'false'
q [ 'cacheOnly' ] = 'true' if cacheOnly == True else 'false'
# make query
self . log . debug ( "ep = %s, query=%s" , ep , q )
data = self . _getURL ( "/endpoints/" + ep , query = q )
result . fill ( data )
# check sucess of call
if data . status_code == 200 : # sucess
result . error = False
self . log . debug ( "getResources sucess, status_code = `%s`, content = `%s`" , str ( data . status_code ) , data . content )
else : # fail
result . error = response_codes ( "get_resources" , data . status_code )
self . log . debug ( "getResources failed with error code `%s`" % str ( data . status_code ) )
result . is_done = True
return result
|
def solutions_as_2d_trajectories ( self , x_axis , y_axis ) :
"""Returns the : attr : ` InferenceResult . solutions ` as a plottable 2d trajectory .
: param x _ axis : the variable to be on the x axis of projection
: param y _ axis : the variable to be on the y axis of preojection
: return : a tuple x , y specifying lists of x and y coordinates of projection"""
|
if not self . solutions :
raise Exception ( 'No intermediate solutions returned. ' 'Re-run inference with return_intermediate_solutions=True' )
index_x = self . parameter_index ( x_axis )
index_y = self . parameter_index ( y_axis )
x , y = [ ] , [ ]
for parameters , initial_conditions in self . solutions :
all_values = parameters + initial_conditions
x . append ( all_values [ index_x ] )
y . append ( all_values [ index_y ] )
return x , y
|
def calc_rel_pos_to_parent ( canvas , item , handle ) :
"""This method calculates the relative position of the given item ' s handle to its parent
: param canvas : Canvas to find relative position in
: param item : Item to find relative position to parent
: param handle : Handle of item to find relative position to
: return : Relative position ( x , y )"""
|
from gaphas . item import NW
if isinstance ( item , ConnectionView ) :
return item . canvas . get_matrix_i2i ( item , item . parent ) . transform_point ( * handle . pos )
parent = canvas . get_parent ( item )
if parent :
return item . canvas . get_matrix_i2i ( item , parent ) . transform_point ( * handle . pos )
else :
return item . canvas . get_matrix_i2c ( item ) . transform_point ( * item . handles ( ) [ NW ] . pos )
|
def set_scm ( scm ) :
"""Sets the pants Scm ."""
|
if scm is not None :
if not isinstance ( scm , Scm ) :
raise ValueError ( 'The scm must be an instance of Scm, given {}' . format ( scm ) )
global _SCM
_SCM = scm
|
def defaults ( ) :
"""return a dictionary with default option values and description"""
|
return dict ( ( str ( k ) , str ( v ) ) for k , v in cma_default_options . items ( ) )
|
def trigger_audited ( self , id , rev , ** kwargs ) :
"""Triggers a build of a specific Build Configuration in a specific revision
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please define a ` callback ` function
to be invoked when receiving the response .
> > > def callback _ function ( response ) :
> > > pprint ( response )
> > > thread = api . trigger _ audited ( id , rev , callback = callback _ function )
: param callback function : The callback function
for asynchronous request . ( optional )
: param int id : Build Configuration id ( required )
: param int rev : Revision of a Build Configuration ( required )
: param str callback _ url : Optional Callback URL
: param bool temporary _ build : Is it a temporary build or a standard build ?
: param bool force _ rebuild : DEPRECATED : Use RebuildMode .
: param bool build _ dependencies : Should we build also dependencies of this BuildConfiguration ?
: param bool keep _ pod _ on _ failure : Should we keep the build container running , if the build fails ?
: param bool timestamp _ alignment : Should we add a timestamp during the alignment ? Valid only for temporary builds .
: param str rebuild _ mode : Rebuild Modes : FORCE : always rebuild the configuration ; EXPLICIT _ DEPENDENCY _ CHECK : check if any of user defined dependencies has been update ; IMPLICIT _ DEPENDENCY _ CHECK : check if any captured dependency has been updated ;
: return : BuildRecordSingleton
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'callback' ) :
return self . trigger_audited_with_http_info ( id , rev , ** kwargs )
else :
( data ) = self . trigger_audited_with_http_info ( id , rev , ** kwargs )
return data
|
def all_near_zero_mod ( a : Union [ float , complex , Iterable [ float ] , np . ndarray ] , period : float , * , atol : float = 1e-8 ) -> bool :
"""Checks if the tensor ' s elements are all near multiples of the period .
Args :
a : Tensor of elements that could all be near multiples of the period .
period : The period , e . g . 2 pi when working in radians .
atol : Absolute tolerance ."""
|
b = ( np . asarray ( a ) + period / 2 ) % period - period / 2
return np . all ( np . less_equal ( np . abs ( b ) , atol ) )
|
def _parse_sentencetree ( self , tree , parent_node_id = None , ignore_traces = True ) :
"""parse a sentence Tree into this document graph"""
|
def get_nodelabel ( node ) :
if isinstance ( node , nltk . tree . Tree ) :
return node . label ( )
elif isinstance ( node , unicode ) :
return node . encode ( 'utf-8' )
else :
raise ValueError ( "Unexpected node type: {0}, {1}" . format ( type ( node ) , node ) )
root_node_id = self . _node_id
self . node [ root_node_id ] [ 'label' ] = get_nodelabel ( tree )
for subtree in tree :
self . _node_id += 1
node_label = get_nodelabel ( subtree )
# unescape the node label , if necessary
node_label = PTB_BRACKET_UNESCAPE . get ( node_label , node_label )
# TODO : refactor this , so we don ' t need to query this all the time
if ignore_traces and node_label == '-NONE-' : # ignore tokens annotated for traces
continue
if isinstance ( subtree , nltk . tree . Tree ) :
if len ( subtree ) > 1 : # subtree is a syntactic category
node_attrs = { 'label' : node_label , self . ns + ':cat' : node_label }
layers = { self . ns , self . ns + ':syntax' }
else : # subtree represents a token and its POS tag
node_attrs = { 'label' : node_label }
layers = { self . ns }
edge_type = dg . EdgeTypes . dominance_relation
self . add_node ( self . _node_id , layers = layers , attr_dict = node_attrs )
self . add_edge ( root_node_id , self . _node_id , edge_type = edge_type )
else : # isinstance ( subtree , unicode ) ; subtree is a token
# we ' ll have to modify the parent node of a token , since
# in NLTK Trees , even a leaf node ( with its POS tag ) is
# represented as a Tree ( an iterator over a single unicode
# string ) , e . g . ` ` Tree ( ' NNS ' , [ ' prices ' ] ) ` `
pos_tag = self . node [ parent_node_id ] [ 'label' ]
token_attrs = { 'label' : node_label , self . ns + ':token' : node_label , self . ns + ':pos' : pos_tag }
self . node [ parent_node_id ] . update ( token_attrs )
self . tokens . append ( parent_node_id )
if isinstance ( subtree , nltk . tree . Tree ) :
self . _parse_sentencetree ( subtree , parent_node_id = self . _node_id )
|
def list_folder ( self , path ) :
"""Looks up folder contents of ` path . `"""
|
# Inspired by https : / / github . com / rspivak / sftpserver / blob / 0.3 / src / sftpserver / stub _ sftp . py # L70
try :
folder_contents = [ ]
for f in os . listdir ( path ) :
attr = paramiko . SFTPAttributes . from_stat ( os . stat ( os . path . join ( path , f ) ) )
attr . filename = f
folder_contents . append ( attr )
return folder_contents
except OSError as e :
return SFTPServer . convert_errno ( e . errno )
|
def make_proper_simple_record ( record , force_shrink = False ) :
"""Prepares and ships an individual simplified durable table record over to SNS / SQS for future processing .
: param record :
: param force _ shrink :
: return :"""
|
# Convert to a simple object
item = { 'arn' : record [ 'dynamodb' ] [ 'Keys' ] [ 'arn' ] [ 'S' ] , 'event_time' : record [ 'dynamodb' ] [ 'NewImage' ] [ 'eventTime' ] [ 'S' ] , 'tech' : HISTORICAL_TECHNOLOGY }
# We need to de - serialize the raw DynamoDB object into the proper PynamoDB obj :
prepped_new_record = _get_durable_pynamo_obj ( record [ 'dynamodb' ] [ 'NewImage' ] , DURABLE_MAPPING . get ( HISTORICAL_TECHNOLOGY ) )
item [ 'item' ] = dict ( prepped_new_record )
# Get the initial blob and determine if it is too big for SNS / SQS :
blob = json . dumps ( item )
size = math . ceil ( sys . getsizeof ( blob ) / 1024 )
# If it is too big , then we need to send over a smaller blob to inform the recipient that it needs to go out and
# fetch the item from the Historical table !
if size >= 200 or force_shrink :
del item [ 'item' ]
item [ EVENT_TOO_BIG_FLAG ] = True
blob = json . dumps ( item )
return blob . replace ( '<empty>' , '' )
|
def get_lu_from_synset ( self , syn_id , lemma = None ) :
"""Returns ( lu _ id , synonyms = [ ( word , lu _ id ) ] ) tuple given a synset ID and a lemma"""
|
if not lemma :
return self . get_lus_from_synset ( syn_id )
# alias
if not isinstance ( lemma , unicode ) :
lemma = unicode ( lemma , 'utf-8' )
root = self . get_synset_xml ( syn_id )
elem_synonyms = root . find ( ".//synonyms" )
lu_id = None
synonyms = [ ]
for elem_synonym in elem_synonyms :
synonym_str = elem_synonym . get ( "c_lu_id-previewtext" )
# get " c _ lu _ id - previewtext " attribute
# synonym _ str ends with " : < num > "
synonym = synonym_str . split ( ':' ) [ 0 ] . strip ( )
if synonym != lemma :
synonyms . append ( ( synonym , elem_synonym . get ( "c_lu_id" ) ) )
if self . debug :
printf ( "synonym add: %s" % synonym )
else :
lu_id = elem_synonym . get ( "c_lu_id" )
# get " c _ lu _ id " attribute
if self . debug :
printf ( "lu_id: %s" % lu_id )
printf ( "synonym skip lemma: %s" % synonym )
return lu_id , synonyms
|
def _chattrib ( name , key , value , param , root = None ) :
'''Change an attribute for a named user'''
|
pre_info = info ( name , root = root )
if not pre_info :
return False
if value == pre_info [ key ] :
return True
cmd = [ 'groupmod' ]
if root is not None :
cmd . extend ( ( '-R' , root ) )
cmd . extend ( ( param , value , name ) )
__salt__ [ 'cmd.run' ] ( cmd , python_shell = False )
return info ( name , root = root ) . get ( key ) == value
|
def z ( self , * args , ** kwargs ) :
"""NAME :
PURPOSE :
return vertical height
INPUT :
t - ( optional ) time at which to get the vertical height
ro = ( Object - wide default ) physical scale for distances to use to convert
use _ physical = use to override Object - wide default for using a physical scale for output
OUTPUT :
z ( t )
HISTORY :
2010-09-21 - Written - Bovy ( NYU )"""
|
if len ( self . vxvv ) < 5 :
raise AttributeError ( "linear and planar orbits do not have z()" )
thiso = self ( * args , ** kwargs )
onet = ( len ( thiso . shape ) == 1 )
if onet :
return thiso [ 3 ]
else :
return thiso [ 3 , : ]
|
def keysyms_from_strings ( ) :
"""Yields the tuple ` ` ( character , symbol name ) ` ` for all keysyms ."""
|
for number , codepoint , status , name in keysym_definitions ( ) : # Ignore keysyms that do not map to unicode characters
if all ( c == '0' for c in codepoint ) :
continue
# Ignore keysyms that are not well established
if status != '.' :
continue
yield ( codepoint , name )
|
def BGE ( self , params ) :
"""BGE label
Branch to the instruction at label if the N flag is the same as the V flag"""
|
label = self . get_one_parameter ( self . ONE_PARAMETER , params )
self . check_arguments ( label_exists = ( label , ) )
# BGE label
def BGE_func ( ) :
if self . is_N_set ( ) == self . is_V_set ( ) :
self . register [ 'PC' ] = self . labels [ label ]
return BGE_func
|
def do_rmfilter ( self , arg ) :
"""Removes the test case filter that limits which results are included in plots / tables ."""
|
if arg in self . curargs [ "tfilter" ] :
if arg == "*" :
msg . warn ( "The default filter cannot be removed." )
else :
self . curargs [ "tfilter" ] . remove ( arg )
self . do_filter ( "list" )
|
def csv ( cls , d , order = None , header = None , sort_keys = True ) :
"""prints a table in csv format
: param d : A a dict with dicts of the same type .
: type d : dict
: param order : The order in which the columns are printed .
The order is specified by the key names of the dict .
: type order :
: param header : The Header of each of the columns
: type header : list or tuple of field names
: param sort _ keys : TODO : not yet implemented
: type sort _ keys : bool
: return : a string representing the table in csv format"""
|
first_element = list ( d ) [ 0 ]
def _keys ( ) :
return list ( d [ first_element ] )
# noinspection PyBroadException
def _get ( element , key ) :
try :
tmp = str ( d [ element ] [ key ] )
except :
tmp = ' '
return tmp
if d is None or d == { } :
return None
if order is None :
order = _keys ( )
if header is None and order is not None :
header = order
elif header is None :
header = _keys ( )
table = ""
content = [ ]
for attribute in order :
content . append ( attribute )
table = table + "," . join ( [ str ( e ) for e in content ] ) + "\n"
for job in d :
content = [ ]
for attribute in order :
try :
content . append ( d [ job ] [ attribute ] )
except :
content . append ( "None" )
table = table + "," . join ( [ str ( e ) for e in content ] ) + "\n"
return table
|
def ask_yes_no ( question , default = 'no' , answer = None ) :
u"""Will ask a question and keeps prompting until
answered .
Args :
question ( str ) : Question to ask end user
default ( str ) : Default answer if user just press enter at prompt
answer ( str ) : Used for testing
Returns :
( bool ) Meaning :
True - Answer is yes
False - Answer is no"""
|
default = default . lower ( )
yes = [ u'yes' , u'ye' , u'y' ]
no = [ u'no' , u'n' ]
if default in no :
help_ = u'[N/y]?'
default = False
else :
default = True
help_ = u'[Y/n]?'
while 1 :
display = question + '\n' + help_
if answer is None :
log . debug ( u'Under None' )
answer = six . moves . input ( display )
answer = answer . lower ( )
if answer == u'' :
log . debug ( u'Under blank' )
return default
if answer in yes :
log . debug ( u'Must be true' )
return True
elif answer in no :
log . debug ( u'Must be false' )
return False
else :
sys . stdout . write ( u'Please answer yes or no only!\n\n' )
sys . stdout . flush ( )
answer = None
six . moves . input ( u'Press enter to continue' )
sys . stdout . write ( '\n\n\n\n\n' )
sys . stdout . flush ( )
|
def _parse_routes ( iface , opts ) :
'''Filters given options and outputs valid settings for
the route settings file .'''
|
# Normalize keys
opts = dict ( ( k . lower ( ) , v ) for ( k , v ) in six . iteritems ( opts ) )
result = { }
if 'routes' not in opts :
_raise_error_routes ( iface , 'routes' , 'List of routes' )
for opt in opts :
result [ opt ] = opts [ opt ]
return result
|
def _on_response ( self , action , table , attempt , start , response , future , measurements ) :
"""Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided .
: param str action : The action that was taken
: param str table : The table name the action was made against
: param int attempt : The attempt number for the action
: param float start : When the request was submitted
: param tornado . concurrent . Future response : The HTTP request future
: param tornado . concurrent . Future future : The action execution future
: param list measurements : The measurement accumulator"""
|
self . logger . debug ( '%s on %s request #%i = %r' , action , table , attempt , response )
now , exception = time . time ( ) , None
try :
future . set_result ( self . _process_response ( response ) )
except aws_exceptions . ConfigNotFound as error :
exception = exceptions . ConfigNotFound ( str ( error ) )
except aws_exceptions . ConfigParserError as error :
exception = exceptions . ConfigParserError ( str ( error ) )
except aws_exceptions . NoCredentialsError as error :
exception = exceptions . NoCredentialsError ( str ( error ) )
except aws_exceptions . NoProfileError as error :
exception = exceptions . NoProfileError ( str ( error ) )
except aws_exceptions . AWSError as error :
exception = exceptions . DynamoDBException ( error )
except ( ConnectionError , ConnectionResetError , OSError , aws_exceptions . RequestException , ssl . SSLError , _select . error , ssl . socket_error , socket . gaierror ) as error :
exception = exceptions . RequestException ( str ( error ) )
except TimeoutError :
exception = exceptions . TimeoutException ( )
except httpclient . HTTPError as error :
if error . code == 599 :
exception = exceptions . TimeoutException ( )
else :
exception = exceptions . RequestException ( getattr ( getattr ( error , 'response' , error ) , 'body' , str ( error . code ) ) )
except Exception as error :
exception = error
if exception :
future . set_exception ( exception )
measurements . append ( Measurement ( now , action , table , attempt , max ( now , start ) - start , exception . __class__ . __name__ if exception else exception ) )
|
def get_disks ( self ) :
"""Return a list of all the Disks attached to this VM
The disks are returned in a sham . storage . volumes . Volume
object"""
|
disks = [ disk for disk in self . xml . iter ( 'disk' ) ]
disk_objs = [ ]
for disk in disks :
source = disk . find ( 'source' )
if source is None :
continue
path = source . attrib [ 'file' ]
diskobj = self . domain . connect ( ) . storageVolLookupByPath ( path )
disk_objs . append ( diskobj )
return [ Volume ( d , StoragePool ( d . storagePoolLookupByVolume ( ) ) ) for d in disk_objs ]
|
def get_bin_hierarchy_design_session ( self ) :
"""Gets the bin hierarchy design session .
return : ( osid . resource . BinHierarchyDesignSession ) - a
` ` BinHierarchyDesignSession ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ bin _ hierarchy _ design ( ) ` ` is
` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ bin _ hierarchy _ design ( ) ` ` is ` ` true ` ` . *"""
|
if not self . supports_bin_hierarchy_design ( ) :
raise errors . Unimplemented ( )
# pylint : disable = no - member
return sessions . BinHierarchyDesignSession ( runtime = self . _runtime )
|
def hdel ( self , * args ) :
"""This command on the model allow deleting many instancehash fields with
only one redis call . You must pass hash names to retrieve as arguments"""
|
if args and not any ( arg in self . _instancehash_fields for arg in args ) :
raise ValueError ( "Only InstanceHashField can be used here." )
# Set indexes for indexable fields .
for field_name in args :
field = self . get_field ( field_name )
if field . indexable :
field . deindex ( )
# Return the number of fields really deleted
return self . _call_command ( 'hdel' , * args )
|
def specific_actions ( hazard , exposure ) :
"""Return actions which are specific for a given hazard and exposure .
: param hazard : The hazard definition .
: type hazard : safe . definition . hazard
: param exposure : The exposure definition .
: type hazard : safe . definition . exposure
: return : List of actions specific .
: rtype : list"""
|
for item in ITEMS :
if item [ 'hazard' ] == hazard and item [ 'exposure' ] == exposure :
return item . get ( 'actions' , [ ] )
return [ ]
|
def fix_auth_url_version_prefix ( auth_url ) :
"""Fix up the auth url if an invalid or no version prefix was given .
People still give a v2 auth _ url even when they specify that they want v3
authentication . Fix the URL to say v3 in this case and add version if it is
missing entirely . This should be smarter and use discovery ."""
|
auth_url = _augment_url_with_version ( auth_url )
url_fixed = False
if get_keystone_version ( ) >= 3 and has_in_url_path ( auth_url , [ "/v2.0" ] ) :
url_fixed = True
auth_url = url_path_replace ( auth_url , "/v2.0" , "/v3" , 1 )
return auth_url , url_fixed
|
def execute_dry_run ( self , dialect = None , billing_tier = None ) :
"""Dry run a query , to check the validity of the query and return some useful statistics .
Args :
dialect : { ' legacy ' , ' standard ' } , default ' legacy '
' legacy ' : Use BigQuery ' s legacy SQL dialect .
' standard ' : Use BigQuery ' s standard SQL ( beta ) , which is
compliant with the SQL 2011 standard .
billing _ tier : Limits the billing tier for this job . Queries that have resource
usage beyond this tier will fail ( without incurring a charge ) . If unspecified , this
will be set to your project default . This can also be used to override your
project - wide default billing tier on a per - query basis .
Returns :
A dict with ' cacheHit ' and ' totalBytesProcessed ' fields .
Raises :
An exception if the query was malformed ."""
|
try :
query_result = self . _api . jobs_insert_query ( self . _sql , self . _code , self . _imports , dry_run = True , table_definitions = self . _external_tables , dialect = dialect , billing_tier = billing_tier )
except Exception as e :
raise e
return query_result [ 'statistics' ] [ 'query' ]
|
def mtf_image_transformer_base ( ) :
"""Set of hyperparameters ."""
|
hparams = common_hparams . basic_params1 ( )
hparams . no_data_parallelism = True
hparams . use_fixed_batch_size = True
hparams . batch_size = 1
hparams . max_length = 3072
hparams . hidden_size = 256
hparams . label_smoothing = 0.0
# 8 - way model - parallelism
hparams . add_hparam ( "mesh_shape" , "batch:8" )
hparams . add_hparam ( "layout" , "batch:batch" )
hparams . add_hparam ( "mtf_mode" , True )
hparams . add_hparam ( "num_heads" , 8 )
hparams . add_hparam ( "filter_size" , 1024 )
hparams . add_hparam ( "num_encoder_layers" , 0 )
hparams . add_hparam ( "num_decoder_layers" , 6 )
hparams . add_hparam ( "attention_key_size" , 256 )
hparams . add_hparam ( "attention_value_size" , 256 )
# Share weights between input and target embeddings
hparams . shared_embedding = True
# mixture of experts hparams
hparams . add_hparam ( "ffn_layer" , "dense_relu_dense" )
hparams . add_hparam ( "moe_overhead_train" , 1.0 )
hparams . add_hparam ( "moe_overhead_eval" , 2.0 )
hparams . moe_num_experts = 16
hparams . moe_loss_coef = 1e-3
hparams . shared_embedding_and_softmax_weights = True
hparams . optimizer = "Adafactor"
hparams . learning_rate_schedule = "rsqrt_decay"
hparams . learning_rate_warmup_steps = 10000
hparams . add_hparam ( "d_kv" , 64 )
hparams . add_hparam ( "d_ff" , 2048 )
# Image related hparams
hparams . add_hparam ( "img_len" , 32 )
hparams . add_hparam ( "num_channels" , 3 )
hparams . add_hparam ( "unconditional" , True )
# Local Attention related params
hparams . add_hparam ( "block_length" , 128 )
hparams . add_hparam ( "block_height" , 16 )
hparams . add_hparam ( "block_width" , 16 )
hparams . add_hparam ( "attention_type" , "local1d" )
return hparams
|
def guided_relu ( ) :
"""Returns :
A context where the gradient of : meth : ` tf . nn . relu ` is replaced by
guided back - propagation , as described in the paper :
` Striving for Simplicity : The All Convolutional Net
< https : / / arxiv . org / abs / 1412.6806 > ` _"""
|
from tensorflow . python . ops import gen_nn_ops
# noqa
@ tf . RegisterGradient ( "GuidedReLU" )
def GuidedReluGrad ( op , grad ) :
return tf . where ( 0. < grad , gen_nn_ops . _relu_grad ( grad , op . outputs [ 0 ] ) , tf . zeros ( grad . get_shape ( ) ) )
g = tf . get_default_graph ( )
with g . gradient_override_map ( { 'Relu' : 'GuidedReLU' } ) :
yield
|
def _callbacks_grouped_by_name ( self ) :
"""Group callbacks by name and collect names set by the user ."""
|
callbacks , names_set_by_user = OrderedDict ( ) , set ( )
for name , cb , named_by_user in self . _yield_callbacks ( ) :
if named_by_user :
names_set_by_user . add ( name )
callbacks [ name ] = callbacks . get ( name , [ ] ) + [ cb ]
return callbacks , names_set_by_user
|
def initialize ( self ) :
"""Sets up initial ensime - vim editor settings ."""
|
# TODO : This seems wrong , the user setting value is never used anywhere .
if 'EnErrorStyle' not in self . _vim . vars :
self . _vim . vars [ 'EnErrorStyle' ] = 'EnError'
self . _vim . command ( 'highlight EnErrorStyle ctermbg=red gui=underline' )
# TODO : this SHOULD be a buffer - local setting only , and since it should
# apply to all Scala files , ftplugin is the ideal place to set it . I ' m
# not even sure how this is currently working when only set once .
self . _vim . command ( 'set omnifunc=EnCompleteFunc' )
# TODO : custom filetype ftplugin
self . _vim . command ( 'autocmd FileType package_info nnoremap <buffer> <Space> :call EnPackageDecl()<CR>' )
self . _vim . command ( 'autocmd FileType package_info setlocal splitright' )
|
def create_settings ( pkg , repo_dest , db_user , db_name , db_password , db_host , db_port ) :
"""Creates a local settings file out of the distributed template .
This also fills in database settings and generates a secret key , etc ."""
|
vars = { 'pkg' : pkg , 'db_user' : db_user , 'db_name' : db_name , 'db_password' : db_password or '' , 'db_host' : db_host or '' , 'db_port' : db_port or '' , 'hmac_date' : datetime . now ( ) . strftime ( '%Y-%m-%d' ) , 'hmac_key' : generate_key ( 32 ) , 'secret_key' : generate_key ( 32 ) }
with dir_path ( repo_dest ) :
shutil . copyfile ( '%s/settings/local.py-dist' % pkg , '%s/settings/local.py' % pkg )
patch ( """\
--- a/%(pkg)s/settings/local.py
+++ b/%(pkg)s/settings/local.py
@@ -9,11 +9,11 @@ from . import base
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
- 'NAME': 'playdoh_app',
- 'USER': 'root',
- 'PASSWORD': '',
- 'HOST': '',
- 'PORT': '',
+ 'NAME': '%(db_name)s',
+ 'USER': '%(db_user)s',
+ 'PASSWORD': '%(db_password)s',
+ 'HOST': '%(db_host)s',
+ 'PORT': '%(db_port)s',
'OPTIONS': {
'init_command': 'SET storage_engine=InnoDB',
'charset' : 'utf8',
@@ -51,14 +51,14 @@ DEV = True
# Playdoh ships with Bcrypt+HMAC by default because it's the most secure.
# To use bcrypt, fill in a secret HMAC key. It cannot be blank.
HMAC_KEYS = {
- #'2012-06-06': 'some secret',
+ '%(hmac_date)s': '%(hmac_key)s',
}
from django_sha2 import get_password_hashers
PASSWORD_HASHERS = get_password_hashers(base.BASE_PASSWORD_HASHERS, HMAC_KEYS)
# Make this unique, and don't share it with anybody. It cannot be blank.
-SECRET_KEY = ''
+SECRET_KEY = '%(secret_key)s'
# Uncomment these to activate and customize Celery:
# CELERY_ALWAYS_EAGER = False # required to activate celeryd
""" % vars )
|
def attended_by ( self , email ) :
"""Check if user attended the event"""
|
for attendee in self [ "attendees" ] or [ ] :
if ( attendee [ "email" ] == email and attendee [ "responseStatus" ] == "accepted" ) :
return True
return False
|
def _k_prototypes_iter ( Xnum , Xcat , centroids , cl_attr_sum , cl_memb_sum , cl_attr_freq , membship , num_dissim , cat_dissim , gamma , random_state ) :
"""Single iteration of the k - prototypes algorithm"""
|
moves = 0
for ipoint in range ( Xnum . shape [ 0 ] ) :
clust = np . argmin ( num_dissim ( centroids [ 0 ] , Xnum [ ipoint ] ) + gamma * cat_dissim ( centroids [ 1 ] , Xcat [ ipoint ] , X = Xcat , membship = membship ) )
if membship [ clust , ipoint ] : # Point is already in its right place .
continue
# Move point , and update old / new cluster frequencies and centroids .
moves += 1
old_clust = np . argwhere ( membship [ : , ipoint ] ) [ 0 ] [ 0 ]
# Note that membship gets updated by kmodes . move _ point _ cat .
# move _ point _ num only updates things specific to the k - means part .
cl_attr_sum , cl_memb_sum = move_point_num ( Xnum [ ipoint ] , clust , old_clust , cl_attr_sum , cl_memb_sum )
cl_attr_freq , membship , centroids [ 1 ] = kmodes . move_point_cat ( Xcat [ ipoint ] , ipoint , clust , old_clust , cl_attr_freq , membship , centroids [ 1 ] )
# Update old and new centroids for numerical attributes using
# the means and sums of all values
for iattr in range ( len ( Xnum [ ipoint ] ) ) :
for curc in ( clust , old_clust ) :
if cl_memb_sum [ curc ] :
centroids [ 0 ] [ curc , iattr ] = cl_attr_sum [ curc , iattr ] / cl_memb_sum [ curc ]
else :
centroids [ 0 ] [ curc , iattr ] = 0.
# In case of an empty cluster , reinitialize with a random point
# from largest cluster .
if not cl_memb_sum [ old_clust ] :
from_clust = membship . sum ( axis = 1 ) . argmax ( )
choices = [ ii for ii , ch in enumerate ( membship [ from_clust , : ] ) if ch ]
rindx = random_state . choice ( choices )
cl_attr_sum , cl_memb_sum = move_point_num ( Xnum [ rindx ] , old_clust , from_clust , cl_attr_sum , cl_memb_sum )
cl_attr_freq , membship , centroids [ 1 ] = kmodes . move_point_cat ( Xcat [ rindx ] , rindx , old_clust , from_clust , cl_attr_freq , membship , centroids [ 1 ] )
return centroids , moves
|
def get_compute_environment ( self , identifier ) :
"""Get compute environment by name or ARN
: param identifier : Name or ARN
: type identifier : str
: return : Compute Environment or None
: rtype : ComputeEnvironment or None"""
|
env = self . get_compute_environment_by_arn ( identifier )
if env is None :
env = self . get_compute_environment_by_name ( identifier )
return env
|
def copy ( self , * args , ** kwargs ) :
"""Make a copy of this object .
Note :
Copies both field data and field values .
See Also :
For arguments and description of behavior see ` pandas docs ` _ .
. . _ pandas docs : http : / / pandas . pydata . org / pandas - docs / stable / generated / pandas . Series . copy . html"""
|
cls = self . __class__
# Note that type conversion does not perform copy
data = pd . DataFrame ( self ) . copy ( * args , ** kwargs )
values = [ field . copy ( ) for field in self . field_values ]
return cls ( data , field_values = values )
|
def get_subarray_sbi_ids ( sub_array_id ) :
"""Return list of scheduling block Id ' s associated with the given
sub _ array _ id"""
|
ids = [ ]
for key in sorted ( DB . keys ( pattern = 'scheduling_block/*' ) ) :
config = json . loads ( DB . get ( key ) )
if config [ 'sub_array_id' ] == sub_array_id :
ids . append ( config [ 'id' ] )
return ids
|
def delete_by_hash ( self , file_hash ) :
"""Remove file / archive by it ' s ` file _ hash ` .
Args :
file _ hash ( str ) : Hash , which is used to find the file in storage .
Raises :
IOError : If the file for given ` file _ hash ` was not found in storage ."""
|
full_path = self . file_path_from_hash ( file_hash )
return self . delete_by_path ( full_path )
|
def _blobs_page_start ( iterator , page , response ) :
"""Grab prefixes after a : class : ` ~ google . cloud . iterator . Page ` started .
: type iterator : : class : ` ~ google . api _ core . page _ iterator . Iterator `
: param iterator : The iterator that is currently in use .
: type page : : class : ` ~ google . cloud . api . core . page _ iterator . Page `
: param page : The page that was just created .
: type response : dict
: param response : The JSON API response for a page of blobs ."""
|
page . prefixes = tuple ( response . get ( "prefixes" , ( ) ) )
iterator . prefixes . update ( page . prefixes )
|
def verify_jwt ( signed_request , expected_aud , secret , validators = [ ] , required_keys = ( 'request.pricePoint' , 'request.name' , 'request.description' , 'response.transactionID' ) , algorithms = None ) :
"""Verifies a postback / chargeback JWT .
Returns the trusted JSON data from the original request .
When there ' s an error , an exception derived from
: class : ` mozpay . exc . InvalidJWT `
will be raised .
This is an all - in - one function that does all verification you ' d
need . There are some low - level functions you can use to just
verify certain parts of a JWT .
Arguments :
* * signed _ request * *
JWT byte string .
* * expected _ aud * *
The expected value for the aud ( audience ) of the JWT .
See : func : ` mozpay . verify . verify _ audience ` .
* * secret * *
A shared secret to validate the JWT with .
See : func : ` mozpay . verify . verify _ sig ` .
* * validators * *
A list of extra callables . Each one is passed a JSON Python dict
representing the JWT after it has passed all other checks .
* * required _ keys * *
A list of JWT keys to validate . See
: func : ` mozpay . verify . verify _ keys ` .
* * algorithms * *
A list of valid JWT algorithms to accept .
By default this will only include HS256 because that ' s
what the Firefox Marketplace uses ."""
|
if not algorithms :
algorithms = [ 'HS256' ]
issuer = _get_issuer ( signed_request = signed_request )
app_req = verify_sig ( signed_request , secret , issuer = issuer , algorithms = algorithms , expected_aud = expected_aud )
# I think this call can be removed after
# https : / / github . com / jpadilla / pyjwt / issues / 121
verify_claims ( app_req , issuer = issuer )
verify_keys ( app_req , required_keys , issuer = issuer )
for vl in validators :
vl ( app_req )
return app_req
|
def open ( path_or_url ) :
"""Wrapper for opening an IO object to a local file or URL
: param path _ or _ url :
: return :"""
|
is_url = re . compile ( r'^(?:http|ftp)s?://' # http : / / or https : / /
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain . . .
r'localhost|' # localhost . . .
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # . . . or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$' , re . IGNORECASE )
if re . match ( is_url , path_or_url ) :
return urllib . request . urlopen ( path_or_url )
else :
return builtins . open ( path_or_url )
|
def kaldi_pitch ( wav_dir : str , feat_dir : str ) -> None :
"""Extract Kaldi pitch features . Assumes 16k mono wav files ."""
|
logger . debug ( "Make wav.scp and pitch.scp files" )
# Make wav . scp and pitch . scp files
prefixes = [ ]
for fn in os . listdir ( wav_dir ) :
prefix , ext = os . path . splitext ( fn )
if ext == ".wav" :
prefixes . append ( prefix )
wav_scp_path = os . path . join ( feat_dir , "wavs.scp" )
with open ( wav_scp_path , "w" ) as wav_scp :
for prefix in prefixes :
logger . info ( "Writing wav file: %s" , os . path . join ( wav_dir , prefix + ".wav" ) )
print ( prefix , os . path . join ( wav_dir , prefix + ".wav" ) , file = wav_scp )
pitch_scp_path = os . path . join ( feat_dir , "pitch_feats.scp" )
with open ( pitch_scp_path , "w" ) as pitch_scp :
for prefix in prefixes :
logger . info ( "Writing scp file: %s" , os . path . join ( feat_dir , prefix + ".pitch.txt" ) )
print ( prefix , os . path . join ( feat_dir , prefix + ".pitch.txt" ) , file = pitch_scp )
# Call Kaldi pitch feat extraction
args = [ os . path . join ( config . KALDI_ROOT , "src/featbin/compute-kaldi-pitch-feats" ) , "scp:%s" % ( wav_scp_path ) , "scp,t:%s" % pitch_scp_path ]
logger . info ( "Extracting pitch features from wavs listed in {}" . format ( wav_scp_path ) )
subprocess . run ( args )
# Convert the Kaldi pitch * . txt files to numpy arrays .
for fn in os . listdir ( feat_dir ) :
if fn . endswith ( ".pitch.txt" ) :
pitch_feats = [ ]
with open ( os . path . join ( feat_dir , fn ) ) as f :
for line in f :
sp = line . split ( )
if len ( sp ) > 1 :
pitch_feats . append ( [ float ( sp [ 0 ] ) , float ( sp [ 1 ] ) ] )
prefix , _ = os . path . splitext ( fn )
out_fn = prefix + ".npy"
a = np . array ( pitch_feats )
np . save ( os . path . join ( feat_dir , out_fn ) , a )
|
def nth ( self , n , dropna = None ) :
"""Take the nth row from each group if n is an int , or a subset of rows
if n is a list of ints .
If dropna , will take the nth non - null row , dropna is either
Truthy ( if a Series ) or ' all ' , ' any ' ( if a DataFrame ) ;
this is equivalent to calling dropna ( how = dropna ) before the
groupby .
Parameters
n : int or list of ints
a single nth value for the row or a list of nth values
dropna : None or str , optional
apply the specified dropna operation before counting which row is
the nth row . Needs to be None , ' any ' or ' all '
% ( see _ also ) s
Examples
> > > df = pd . DataFrame ( { ' A ' : [ 1 , 1 , 2 , 1 , 2 ] ,
. . . ' B ' : [ np . nan , 2 , 3 , 4 , 5 ] } , columns = [ ' A ' , ' B ' ] )
> > > g = df . groupby ( ' A ' )
> > > g . nth ( 0)
1 NaN
2 3.0
> > > g . nth ( 1)
1 2.0
2 5.0
> > > g . nth ( - 1)
1 4.0
2 5.0
> > > g . nth ( [ 0 , 1 ] )
1 NaN
1 2.0
2 3.0
2 5.0
Specifying ` dropna ` allows count ignoring ` ` NaN ` `
> > > g . nth ( 0 , dropna = ' any ' )
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
> > > g . nth ( 3 , dropna = ' any ' )
1 NaN
2 NaN
Specifying ` as _ index = False ` in ` groupby ` keeps the original index .
> > > df . groupby ( ' A ' , as _ index = False ) . nth ( 1)
A B
1 1 2.0
4 2 5.0"""
|
if isinstance ( n , int ) :
nth_values = [ n ]
elif isinstance ( n , ( set , list , tuple ) ) :
nth_values = list ( set ( n ) )
if dropna is not None :
raise ValueError ( "dropna option with a list of nth values is not supported" )
else :
raise TypeError ( "n needs to be an int or a list/set/tuple of ints" )
nth_values = np . array ( nth_values , dtype = np . intp )
self . _set_group_selection ( )
if not dropna :
mask_left = np . in1d ( self . _cumcount_array ( ) , nth_values )
mask_right = np . in1d ( self . _cumcount_array ( ascending = False ) + 1 , - nth_values )
mask = mask_left | mask_right
out = self . _selected_obj [ mask ]
if not self . as_index :
return out
ids , _ , _ = self . grouper . group_info
out . index = self . grouper . result_index [ ids [ mask ] ]
return out . sort_index ( ) if self . sort else out
if dropna not in [ 'any' , 'all' ] :
if isinstance ( self . _selected_obj , Series ) and dropna is True :
warnings . warn ( "the dropna={dropna} keyword is deprecated," "use dropna='all' instead. " "For a Series groupby, dropna must be " "either None, 'any' or 'all'." . format ( dropna = dropna ) , FutureWarning , stacklevel = 2 )
dropna = 'all'
else : # Note : when agg - ing picker doesn ' t raise this ,
# just returns NaN
raise ValueError ( "For a DataFrame groupby, dropna must be " "either None, 'any' or 'all', " "(was passed {dropna})." . format ( dropna = dropna ) )
# old behaviour , but with all and any support for DataFrames .
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self . obj . dropna ( how = dropna , axis = self . axis )
# get a new grouper for our dropped obj
if self . keys is None and self . level is None : # we don ' t have the grouper info available
# ( e . g . we have selected out
# a column that is not in the current object )
axis = self . grouper . axis
grouper = axis [ axis . isin ( dropped . index ) ]
else : # create a grouper with the original parameters , but on the dropped
# object
from pandas . core . groupby . grouper import _get_grouper
grouper , _ , _ = _get_grouper ( dropped , key = self . keys , axis = self . axis , level = self . level , sort = self . sort , mutated = self . mutated )
grb = dropped . groupby ( grouper , as_index = self . as_index , sort = self . sort )
sizes , result = grb . size ( ) , grb . nth ( n )
mask = ( sizes < max_len ) . values
# set the results which don ' t meet the criteria
if len ( result ) and mask . any ( ) :
result . loc [ mask ] = np . nan
# reset / reindex to the original groups
if ( len ( self . obj ) == len ( dropped ) or len ( result ) == len ( self . grouper . result_index ) ) :
result . index = self . grouper . result_index
else :
result = result . reindex ( self . grouper . result_index )
return result
|
def read_csv_iter ( path , fieldnames = None , sniff = True , mode = 'rt' , encoding = 'utf-8' , * args , ** kwargs ) :
'''Iterate through CSV rows in a file .
By default , csv . reader ( ) will be used any output will be a list of lists .
If fieldnames is provided , DictReader will be used and output will be list of OrderedDict instead .
CSV sniffing ( dialect detection ) is enabled by default , set sniff = False to switch it off .'''
|
with open ( path , mode = mode , encoding = encoding ) as infile :
for row in iter_csv_stream ( infile , fieldnames = fieldnames , sniff = sniff , * args , ** kwargs ) :
yield row
|
def transform_sparql_construct ( rdf , construct_query ) :
"""Perform a SPARQL CONSTRUCT query on the RDF data and return a new graph ."""
|
logging . debug ( "performing SPARQL CONSTRUCT transformation" )
if construct_query [ 0 ] == '@' : # actual query should be read from file
construct_query = file ( construct_query [ 1 : ] ) . read ( )
logging . debug ( "CONSTRUCT query: %s" , construct_query )
newgraph = Graph ( )
for triple in rdf . query ( construct_query ) :
newgraph . add ( triple )
return newgraph
|
def _common ( ret , name , service_name , kwargs ) :
'''Returns : tuple whose first element is a bool indicating success or failure
and the second element is either a ret dict for salt or an object'''
|
if 'interface' not in kwargs and 'public_url' not in kwargs :
kwargs [ 'interface' ] = name
service = __salt__ [ 'keystoneng.service_get' ] ( name_or_id = service_name )
if not service :
ret [ 'comment' ] = 'Cannot find service'
ret [ 'result' ] = False
return ( False , ret )
filters = kwargs . copy ( )
filters . pop ( 'enabled' , None )
filters . pop ( 'url' , None )
filters [ 'service_id' ] = service . id
kwargs [ 'service_name_or_id' ] = service . id
endpoints = __salt__ [ 'keystoneng.endpoint_search' ] ( filters = filters )
if len ( endpoints ) > 1 :
ret [ 'comment' ] = "Multiple endpoints match criteria"
ret [ 'result' ] = False
return ret
endpoint = endpoints [ 0 ] if endpoints else None
return ( True , endpoint )
|
def _all_same_area ( self , dataset_ids ) :
"""Return True if all areas for the provided IDs are equal ."""
|
all_areas = [ ]
for ds_id in dataset_ids :
for scn in self . scenes :
ds = scn . get ( ds_id )
if ds is None :
continue
all_areas . append ( ds . attrs . get ( 'area' ) )
all_areas = [ area for area in all_areas if area is not None ]
return all ( all_areas [ 0 ] == area for area in all_areas [ 1 : ] )
|
def coupl_model1 ( self ) :
"""In model 1 , we want enforce the following signs
on the couplings . Model 2 has the same couplings
but arbitrary signs ."""
|
self . Coupl [ 0 , 0 ] = np . abs ( self . Coupl [ 0 , 0 ] )
self . Coupl [ 0 , 1 ] = - np . abs ( self . Coupl [ 0 , 1 ] )
self . Coupl [ 1 , 1 ] = np . abs ( self . Coupl [ 1 , 1 ] )
|
def new_request ( sender , request = None , notify = True , ** kwargs ) :
"""New request for inclusion ."""
|
if current_app . config [ 'COMMUNITIES_MAIL_ENABLED' ] and notify :
send_community_request_email ( request )
|
async def set_control_setpoint ( self , setpoint , timeout = OTGW_DEFAULT_TIMEOUT ) :
"""Manipulate the control setpoint being sent to the boiler . Set
to 0 to pass along the value specified by the thermostat .
Return the newly accepted value , or None on failure .
This method is a coroutine"""
|
cmd = OTGW_CMD_CONTROL_SETPOINT
status = { }
ret = await self . _wait_for_cmd ( cmd , setpoint , timeout )
if ret is None :
return
ret = float ( ret )
status [ DATA_CONTROL_SETPOINT ] = ret
self . _update_status ( status )
return ret
|
def has_commit ( self , client_key = None ) :
"""Return True if client has new commit .
: param client _ key : The client key
: type client _ key : str
: return :
: rtype : boolean"""
|
if client_key is None and self . current_client is None :
raise ClientNotExist ( )
if client_key :
if not self . clients . has_client ( client_key ) :
raise ClientNotExist ( )
client = self . clients . get_client ( client_key )
return client . has_commit ( )
if self . current_client :
client = self . current_client
return client . has_commit ( )
return False
|
def _parseAtCharset ( self , src ) :
"""[ CHARSET _ SYM S * STRING S * ' ; ' ] ?"""
|
if isAtRuleIdent ( src , 'charset' ) :
src = stripAtRuleIdent ( src )
charset , src = self . _getString ( src )
src = src . lstrip ( )
if src [ : 1 ] != ';' :
raise self . ParseError ( '@charset expected a terminating \';\'' , src , self . ctxsrc )
src = src [ 1 : ] . lstrip ( )
self . cssBuilder . atCharset ( charset )
return src
|
def comm_sep ( self , plot_locs , criteria , loc_unit = None ) :
'''Calculates commonality ( Sorensen and Jaccard ) between pairs of plots .
Parameters
plot _ locs : dict
Dictionary with keys equal to each plot name , which must be
represented by a column in the data table , and values equal to a
tuple of the x and y coordinate of each plot
criteria : dict
See docstring for Patch . sad .
loc _ unit : str
Unit of plot locations . Special cases include ' decdeg ' ( decimal
degrees ) , returns result in km . Otherwise ignored .
Returns
result : structured array
Returns a structured array with fields plot - a and plot - b ( names of
two plots ) , dist ( distance between plots ) , and sorensen and jaccard
( similarity indices ) . Has row for each unique pair of plots .'''
|
# Set up sad _ dict with key = plot and val = clean sad for that plot
sad_dict = { }
# Loop through all plot cols , updating criteria , and getting spp _ list
for plot in plot_locs . keys ( ) : # Find current count col and remove it from criteria
for crit_key in criteria . keys ( ) :
if criteria [ crit_key ] == 'count' :
criteria . pop ( crit_key , None )
# Add this plot as col with counts
criteria [ plot ] = 'count'
# Get SAD for existing criteria with this plot as count col
sad_return = self . sad ( criteria , clean = True )
# Check that sad _ return only has one element , or throw error
if len ( sad_return ) > 1 :
raise NotImplementedError ( 'Too many criteria for comm_sep' )
# Get unique species list for this plot and store in sad _ dict
sad_dict [ plot ] = sad_return [ 0 ] [ 2 ]
# Set up recarray to hold Sorensen index for all pairs of plots
n_pairs = np . sum ( np . arange ( len ( plot_locs . keys ( ) ) ) )
result = np . recarray ( ( n_pairs , ) , dtype = [ ( 'plot-a' , 'S32' ) , ( 'plot-b' , 'S32' ) , ( 'spp-a' , int ) , ( 'spp-b' , int ) , ( 'dist' , float ) , ( 'sorensen' , float ) , ( 'jaccard' , float ) ] )
# Loop through all combinations of plots and fill in result table
row = 0
for pair in itertools . combinations ( plot_locs . keys ( ) , 2 ) : # Names of plots
plota = pair [ 0 ]
plotb = pair [ 1 ]
result [ row ] [ 'plot-a' ] = plota
result [ row ] [ 'plot-b' ] = plotb
# Calculate inter - plot distance
if loc_unit == 'decdeg' :
result [ row ] [ 'dist' ] = _decdeg_distance ( plot_locs [ plota ] , plot_locs [ plotb ] )
else :
result [ row ] [ 'dist' ] = _distance ( plot_locs [ plota ] , plot_locs [ plotb ] )
# Get similarity indices
spp_a = len ( sad_dict [ plota ] )
spp_b = len ( sad_dict [ plotb ] )
result [ row ] [ 'spp-a' ] = spp_a
result [ row ] [ 'spp-b' ] = spp_b
intersect = set ( sad_dict [ plota ] ) . intersection ( sad_dict [ plotb ] )
union = set ( sad_dict [ plota ] ) . union ( sad_dict [ plotb ] )
# Fill in zero if denom is zero
if spp_a + spp_b == 0 :
result [ row ] [ 'sorensen' ] = 0
else :
result [ row ] [ 'sorensen' ] = ( 2 * len ( intersect ) ) / ( spp_a + spp_b )
if len ( union ) == 0 :
result [ row ] [ 'jaccard' ] = 0
else :
result [ row ] [ 'jaccard' ] = len ( intersect ) / len ( union )
# Increment row counter
row += 1
return result
|
def delete_load_balancer ( access_token , subscription_id , resource_group , lb_name ) :
'''Delete a load balancer .
Args :
access _ token ( str ) : A valid Azure authentication token .
subscription _ id ( str ) : Azure subscription id .
resource _ group ( str ) : Azure resource group name .
lb _ name ( str ) : Name of the load balancer .
Returns :
HTTP response .'''
|
endpoint = '' . join ( [ get_rm_endpoint ( ) , '/subscriptions/' , subscription_id , '/resourceGroups/' , resource_group , '/providers/Microsoft.Network/loadBalancers/' , lb_name , '?api-version=' , NETWORK_API ] )
return do_delete ( endpoint , access_token )
|
def lint_file ( file_path ) :
"""Validate & lint ` file _ path ` and return a LintResult .
: param file _ path : YAML filename
: type file _ path : str
: return : LintResult object"""
|
with open ( file_path , 'r' ) as yaml :
try :
return lint ( yaml )
except Exception as e :
lr = LintResult ( )
lr . add_error ( 'could not parse YAML: %s' % e , exception = e )
return lr
|
def length ( self ) :
"""Gives the length of the queue . Returns ` ` None ` ` if the queue is not
connected .
If the queue is not connected then it will raise
: class : ` retask . ConnectionError ` ."""
|
if not self . connected :
raise ConnectionError ( 'Queue is not connected' )
try :
length = self . rdb . llen ( self . _name )
except redis . exceptions . ConnectionError as err :
raise ConnectionError ( str ( err ) )
return length
|
def request ( self , type , command_list ) :
'''Send NX - API JSON request to the NX - OS device .'''
|
req = self . _build_request ( type , command_list )
if self . nxargs [ 'connect_over_uds' ] :
self . connection . request ( 'POST' , req [ 'url' ] , req [ 'payload' ] , req [ 'headers' ] )
response = self . connection . getresponse ( )
else :
response = self . connection ( req [ 'url' ] , method = 'POST' , opts = req [ 'opts' ] , data = req [ 'payload' ] , header_dict = req [ 'headers' ] , decode = True , decode_type = 'json' , ** self . nxargs )
return self . parse_response ( response , command_list )
|
def accountSummary ( self , reqId , account , tag , value , curency ) :
"""accountSummary ( EWrapper self , int reqId , IBString const & account , IBString const & tag , IBString const & value , IBString const & curency )"""
|
return _swigibpy . EWrapper_accountSummary ( self , reqId , account , tag , value , curency )
|
def conv2bin ( data ) :
"""Convert a matrix of probabilities into binary values .
If the matrix has values < = 0 or > = 1 , the values are
normalized to be in [ 0 , 1 ] .
: type data : numpy array
: param data : input matrix
: return : converted binary matrix"""
|
if data . min ( ) < 0 or data . max ( ) > 1 :
data = normalize ( data )
out_data = data . copy ( )
for i , sample in enumerate ( out_data ) :
for j , val in enumerate ( sample ) :
if np . random . random ( ) <= val :
out_data [ i ] [ j ] = 1
else :
out_data [ i ] [ j ] = 0
return out_data
|
def dump ( cls ) :
"""Output all recorded metrics"""
|
with cls . lock :
if not cls . instances :
return
atexit . unregister ( cls . dump )
for self in cls . instances . values ( ) :
self . fh . close ( )
|
def open_grindstone ( self ) :
"""Opens a grindstone file and populates the grindstone with it ' s
contents .
Returns an empty grindstone json object if a file does not exist ."""
|
try :
with open ( self . grindstone_path , 'r' ) as f : # Try opening the file
return json . loads ( f . read ( ) )
# If the file is empty
except json . decoder . JSONDecodeError : # Default return empty object with empty tasks list
return { 'tasks' : [ ] }
# The file does not yet exist
except FileNotFoundError : # Default return empty object with empty tasks list
return { 'tasks' : [ ] }
|
def draw_visibility_image_internal ( gl , v , f ) :
"""Assumes camera is set up correctly in gl context ."""
|
gl . Clear ( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT ) ;
fc = np . arange ( 1 , len ( f ) + 1 )
fc = np . tile ( col ( fc ) , ( 1 , 3 ) )
fc [ : , 0 ] = fc [ : , 0 ] & 255
fc [ : , 1 ] = ( fc [ : , 1 ] >> 8 ) & 255
fc [ : , 2 ] = ( fc [ : , 2 ] >> 16 ) & 255
fc = np . asarray ( fc , dtype = np . uint8 )
draw_colored_primitives ( gl , v , f , fc )
raw = np . asarray ( gl . getImage ( ) , np . uint32 )
raw = raw [ : , : , 0 ] + raw [ : , : , 1 ] * 256 + raw [ : , : , 2 ] * 256 * 256 - 1
return raw
|
def compute_numtab ( self ) :
"""Recomputes the sets for the static ranges of the trigger time .
This method should only be called by the user if the string _ tab
member is modified ."""
|
self . numerical_tab = [ ]
for field_str , span in zip ( self . string_tab , FIELD_RANGES ) :
split_field_str = field_str . split ( ',' )
if len ( split_field_str ) > 1 and "*" in split_field_str :
raise ValueError ( "\"*\" must be alone in a field." )
unified = set ( )
for cron_atom in split_field_str : # parse _ atom only handles static cases
if not ( is_special_atom ( cron_atom , span ) ) :
unified . update ( parse_atom ( cron_atom , span ) )
self . numerical_tab . append ( unified )
if self . string_tab [ 2 ] == "*" and self . string_tab [ 4 ] != "*" :
self . numerical_tab [ 2 ] = set ( )
elif self . string_tab [ 4 ] == "*" and self . string_tab [ 2 ] != "*" :
self . numerical_tab [ 4 ] = set ( )
|
def _connect ( self ) :
"""Connect to the EC2 cloud provider .
: return : : py : class : ` boto . ec2 . connection . EC2Connection `
: raises : Generic exception on error"""
|
# check for existing connection
if self . _ec2_connection :
return self . _ec2_connection
try :
log . debug ( "Connecting to EC2 endpoint %s" , self . _ec2host )
# connect to webservice
ec2_connection = boto . ec2 . connect_to_region ( self . _region_name , aws_access_key_id = self . _access_key , aws_secret_access_key = self . _secret_key , is_secure = self . _secure , host = self . _ec2host , port = self . _ec2port , path = self . _ec2path , )
# With the loose setting ` BOTO _ USE _ ENDPOINT _ HEURISTICS `
# which is necessary to work around issue # 592 , Boto will
# now accept * any * string as an AWS region name ;
# furthermore , it * always * returns a connection object - -
# so the only way to check that we are not going to run
# into trouble is to check that there * is * a valid host
# name on the other end of the connection .
if ec2_connection . host :
log . debug ( "EC2 connection has been successful." )
else :
raise CloudProviderError ( "Cannot establish connection to EC2 region {0}" . format ( self . _region_name ) )
if not self . _vpc :
vpc_connection = None
self . _vpc_id = None
else :
vpc_connection , self . _vpc_id = self . _find_vpc_by_name ( self . _vpc )
except Exception as err :
log . error ( "Error connecting to EC2: %s" , err )
raise
self . _ec2_connection , self . _vpc_connection = ( ec2_connection , vpc_connection )
return self . _ec2_connection
|
def add_circle ( self , x0 , radius , lcar = None , R = None , compound = False , num_sections = 3 , holes = None , make_surface = True , ) :
"""Add circle in the : math : ` x ` - : math : ` y ` - plane ."""
|
if holes is None :
holes = [ ]
else :
assert make_surface
# Define points that make the circle ( midpoint and the four cardinal
# directions ) .
X = numpy . zeros ( ( num_sections + 1 , len ( x0 ) ) )
if num_sections == 4 : # For accuracy , the points are provided explicitly .
X [ 1 : , [ 0 , 1 ] ] = numpy . array ( [ [ radius , 0.0 ] , [ 0.0 , radius ] , [ - radius , 0.0 ] , [ 0.0 , - radius ] ] )
else :
X [ 1 : , [ 0 , 1 ] ] = numpy . array ( [ [ radius * numpy . cos ( 2 * numpy . pi * k / num_sections ) , radius * numpy . sin ( 2 * numpy . pi * k / num_sections ) , ] for k in range ( num_sections ) ] )
if R is not None :
assert numpy . allclose ( abs ( numpy . linalg . eigvals ( R ) ) , numpy . ones ( X . shape [ 1 ] ) ) , "The transformation matrix doesn't preserve circles; at least one eigenvalue lies off the unit circle."
X = numpy . dot ( X , R . T )
X += x0
# Add Gmsh Points .
p = [ self . add_point ( x , lcar = lcar ) for x in X ]
# Define the circle arcs .
arcs = [ self . add_circle_arc ( p [ k ] , p [ 0 ] , p [ k + 1 ] ) for k in range ( 1 , len ( p ) - 1 ) ]
arcs . append ( self . add_circle_arc ( p [ - 1 ] , p [ 0 ] , p [ 1 ] ) )
if compound :
if self . _gmsh_major ( ) == 3 :
arcs = [ self . add_compound_line ( arcs ) ]
elif self . _gmsh_major ( ) == 4 :
self . add_raw_code ( "Compound Curve{{{}}};" . format ( "," . join ( [ arc . id for arc in arcs ] ) ) )
line_loop = self . add_line_loop ( arcs )
if make_surface :
plane_surface = self . add_plane_surface ( line_loop , holes )
if compound and self . _gmsh_major ( ) == 4 :
self . add_raw_code ( "Compound Surface{{{}}};" . format ( plane_surface . id ) )
else :
plane_surface = None
class Circle ( object ) :
def __init__ ( self , x0 , radius , R , compound , num_sections , holes , line_loop , plane_surface , lcar = None , ) :
self . x0 = x0
self . radius = radius
self . lcar = lcar
self . R = R
self . compound = compound
self . num_sections = num_sections
self . holes = holes
self . line_loop = line_loop
self . plane_surface = plane_surface
return
return Circle ( x0 , radius , R , compound , num_sections , holes , line_loop , plane_surface , lcar = lcar , )
|
def _parse_message_to_mqtt ( self , data ) :
"""Parse a mysensors command string .
Return a MQTT topic , payload and qos - level as a tuple ."""
|
msg = Message ( data , self )
payload = str ( msg . payload )
msg . payload = ''
# prefix / node / child / type / ack / subtype : payload
return ( '{}/{}' . format ( self . _out_prefix , msg . encode ( '/' ) ) [ : - 2 ] , payload , msg . ack )
|
def discharge_coefficient_to_K ( D , Do , C ) :
r'''Converts a discharge coefficient to a standard loss coefficient ,
for use in computation of the actual pressure drop of an orifice or other
device .
. . math : :
K = \ left [ \ frac { \ sqrt { 1 - \ beta ^ 4(1 - C ^ 2 ) } } { C \ beta ^ 2 } - 1 \ right ] ^ 2
Parameters
D : float
Upstream internal pipe diameter , [ m ]
Do : float
Diameter of orifice at flow conditions , [ m ]
C : float
Coefficient of discharge of the orifice , [ - ]
Returns
K : float
Loss coefficient with respect to the velocity and density of the fluid
just upstream of the orifice , [ - ]
Notes
If expansibility is used in the orifice calculation , the result will not
match with the specified pressure drop formula in [ 1 ] _ ; it can almost
be matched by dividing the calculated mass flow by the expansibility factor
and using that mass flow with the loss coefficient .
Examples
> > > discharge _ coefficient _ to _ K ( D = 0.07366 , Do = 0.05 , C = 0.61512)
5.2314291729754
References
. . [ 1 ] American Society of Mechanical Engineers . Mfc - 3M - 2004 Measurement
Of Fluid Flow In Pipes Using Orifice , Nozzle , And Venturi . ASME , 2001.
. . [ 2 ] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross - Section Conduits Running
Full - - Part 2 : Orifice Plates .'''
|
beta = Do / D
beta2 = beta * beta
beta4 = beta2 * beta2
return ( ( 1.0 - beta4 * ( 1.0 - C * C ) ) ** 0.5 / ( C * beta2 ) - 1.0 ) ** 2
|
def num_events ( self ) :
"""Lazy evaluation of the number of events ."""
|
if not self . _num_events :
self . _num_events = self . coll_handle . count ( )
return self . _num_events
|
def _print_if_needed ( self ) :
"""Assumes you hold the lock"""
|
if self . _in_txn or self . print_frequency is None :
return
elif self . last_export is not None and self . last_export + self . print_frequency > time . time ( ) :
return
self . export ( )
|
def sealedbox_encrypt ( data , ** kwargs ) :
'''Encrypt data using a public key generated from ` nacl . keygen ` .
The encryptd data can be decrypted using ` nacl . sealedbox _ decrypt ` only with the secret key .
CLI Examples :
. . code - block : : bash
salt - run nacl . sealedbox _ encrypt datatoenc
salt - call - - local nacl . sealedbox _ encrypt datatoenc pk _ file = / etc / salt / pki / master / nacl . pub
salt - call - - local nacl . sealedbox _ encrypt datatoenc pk = ' vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ = ' '''
|
# ensure data is in bytes
data = salt . utils . stringutils . to_bytes ( data )
pk = _get_pk ( ** kwargs )
b = libnacl . sealed . SealedBox ( pk )
return base64 . b64encode ( b . encrypt ( data ) )
|
def _check_argument_units ( args , dimensionality ) :
"""Yield arguments with improper dimensionality ."""
|
for arg , val in args . items ( ) : # Get the needed dimensionality ( for printing ) as well as cached , parsed version
# for this argument .
try :
need , parsed = dimensionality [ arg ]
except KeyError : # Argument did not have units specified in decorator
continue
# See if the value passed in is appropriate
try :
if val . dimensionality != parsed :
yield arg , val . units , need
# No dimensionality
except AttributeError : # If this argument is dimensionless , don ' t worry
if parsed != '' :
yield arg , 'none' , need
|
def edge_val_dump ( self ) :
"""Yield the entire contents of the edge _ val table ."""
|
self . _flush_edge_val ( )
for ( graph , orig , dest , idx , key , branch , turn , tick , value ) in self . sql ( 'edge_val_dump' ) :
yield ( self . unpack ( graph ) , self . unpack ( orig ) , self . unpack ( dest ) , idx , self . unpack ( key ) , branch , turn , tick , self . unpack ( value ) )
|
def calc_qiga1_v1 ( self ) :
"""Perform the runoff concentration calculation for the first
interflow component .
The working equation is the analytical solution of the linear storage
equation under the assumption of constant change in inflow during
the simulation time step .
Required derived parameter :
| KI1 |
Required state sequence :
| QIGZ1 |
Calculated state sequence :
| QIGA1 |
Basic equation :
: math : ` QIGA1 _ { neu } = QIGA1 _ { alt } +
( QIGZ1 _ { alt } - QIGA1 _ { alt } ) \\ cdot ( 1 - exp ( - KI1 ^ { - 1 } ) ) +
( QIGZ1 _ { neu } - QIGZ1 _ { alt } ) \\ cdot ( 1 - KI1 \\ cdot ( 1 - exp ( - KI1 ^ { - 1 } ) ) ) `
Examples :
A normal test case :
> > > from hydpy . models . lland import *
> > > parameterstep ( )
> > > derived . ki1(0.1)
> > > states . qigz1 . old = 2.0
> > > states . qigz1 . new = 4.0
> > > states . qiga1 . old = 3.0
> > > model . calc _ qiga1 _ v1 ( )
> > > states . qiga1
qiga1(3.800054)
First extreme test case ( zero division is circumvented ) :
> > > derived . ki1(0.0)
> > > model . calc _ qiga1 _ v1 ( )
> > > states . qiga1
qiga1(4.0)
Second extreme test case ( numerical overflow is circumvented ) :
> > > derived . ki1(1e500)
> > > model . calc _ qiga1 _ v1 ( )
> > > states . qiga1
qiga1(5.0)"""
|
der = self . parameters . derived . fastaccess
old = self . sequences . states . fastaccess_old
new = self . sequences . states . fastaccess_new
if der . ki1 <= 0. :
new . qiga1 = new . qigz1
elif der . ki1 > 1e200 :
new . qiga1 = old . qiga1 + new . qigz1 - old . qigz1
else :
d_temp = ( 1. - modelutils . exp ( - 1. / der . ki1 ) )
new . qiga1 = ( old . qiga1 + ( old . qigz1 - old . qiga1 ) * d_temp + ( new . qigz1 - old . qigz1 ) * ( 1. - der . ki1 * d_temp ) )
|
def change_columns ( self , model , ** fields ) :
"""Change fields ."""
|
for name , field in fields . items ( ) :
old_field = model . _meta . fields . get ( name , field )
old_column_name = old_field and old_field . column_name
model . _meta . add_field ( name , field )
if isinstance ( old_field , pw . ForeignKeyField ) :
self . ops . append ( self . migrator . drop_foreign_key_constraint ( model . _meta . table_name , old_column_name ) )
if old_column_name != field . column_name :
self . ops . append ( self . migrator . rename_column ( model . _meta . table_name , old_column_name , field . column_name ) )
if isinstance ( field , pw . ForeignKeyField ) :
on_delete = field . on_delete if field . on_delete else 'RESTRICT'
on_update = field . on_update if field . on_update else 'RESTRICT'
self . ops . append ( self . migrator . add_foreign_key_constraint ( model . _meta . table_name , field . column_name , field . rel_model . _meta . table_name , field . rel_field . name , on_delete , on_update ) )
continue
self . ops . append ( self . migrator . change_column ( model . _meta . table_name , field . column_name , field ) )
if field . unique == old_field . unique :
continue
if field . unique :
index = ( field . column_name , ) , field . unique
self . ops . append ( self . migrator . add_index ( model . _meta . table_name , * index ) )
model . _meta . indexes . append ( index )
else :
index = ( field . column_name , ) , old_field . unique
self . ops . append ( self . migrator . drop_index ( model . _meta . table_name , * index ) )
model . _meta . indexes . remove ( index )
return model
|
def _bnd ( self , xloc , dist , cache ) :
"""Distribution bounds ."""
|
return numpy . log ( evaluation . evaluate_bound ( dist , numpy . e ** xloc , cache = cache ) )
|
def _apply_all ( self , sat ) :
"""Apply all of the custom functions to the satellite data object ."""
|
if len ( self . _functions ) > 0 :
for func , arg , kwarg , kind in zip ( self . _functions , self . _args , self . _kwargs , self . _kind ) :
if len ( sat . data ) > 0 :
if kind == 'add' : # apply custom functions that add data to the
# instrument object
tempd = sat . copy ( )
newData = func ( tempd , * arg , ** kwarg )
del tempd
# process different types of data returned by the
# function if a dict is returned , data in ' data '
if isinstance ( newData , dict ) : # if DataFrame returned , add Frame to existing frame
if isinstance ( newData [ 'data' ] , pds . DataFrame ) :
sat [ newData [ 'data' ] . columns ] = newData
# if a series is returned , add it as a column
elif isinstance ( newData [ 'data' ] , pds . Series ) : # look for name attached to series first
if newData [ 'data' ] . name is not None :
sat [ newData [ 'data' ] . name ] = newData
# look if name is provided as part of dict
# returned from function
elif 'name' in newData . keys ( ) :
name = newData . pop ( 'name' )
sat [ name ] = newData
# couldn ' t find name information
else :
raise ValueError ( 'Must assign a name to ' + 'Series or return a ' + '"name" in dictionary.' )
# some kind of iterable was returned
elif hasattr ( newData [ 'data' ] , '__iter__' ) : # look for name in returned dict
if 'name' in newData . keys ( ) :
name = newData . pop ( 'name' )
sat [ name ] = newData
else :
raise ValueError ( 'Must include "name" in ' + 'returned dictionary.' )
# bare DataFrame is returned
elif isinstance ( newData , pds . DataFrame ) :
sat [ newData . columns ] = newData
# bare Series is returned , name must be attached to
# Series
elif isinstance ( newData , pds . Series ) :
sat [ newData . name ] = newData
# some kind of iterable returned ,
# presuming ( name , data )
# or ( [ name1 , . . . ] , [ data1 , . . . ] )
elif hasattr ( newData , '__iter__' ) : # falling back to older behavior
# unpack tuple / list that was returned
newName = newData [ 0 ]
newData = newData [ 1 ]
if len ( newData ) > 0 : # doesn ' t really check ensure data , there could
# be multiple empty arrays returned , [ [ ] , [ ] ]
if isinstance ( newName , str ) : # one item to add
sat [ newName ] = newData
else : # multiple items
for name , data in zip ( newName , newData ) :
if len ( data ) > 0 : # fixes up the incomplete check
# from before
sat [ name ] = data
else :
raise ValueError ( "kernel doesn't know what to do " + "with returned data." )
# modifying loaded data
if kind == 'modify' :
t = func ( sat , * arg , ** kwarg )
if t is not None :
raise ValueError ( 'Modify functions should not ' + 'return any information via ' + 'return. Information may only be' + ' propagated back by modifying ' + 'supplied pysat object.' )
# pass function ( function runs , no data allowed back )
if kind == 'pass' :
tempd = sat . copy ( )
t = func ( tempd , * arg , ** kwarg )
del tempd
if t is not None :
raise ValueError ( 'Pass functions should not ' + 'return any information via ' + 'return.' )
|
def row ( self ) :
"""Game Dataset ( Row )
: return : {
' retro _ game _ id ' : Retrosheet Game id
' game _ type ' : Game Type ( S / R / F / D / L / W )
' game _ type _ des ' : Game Type Description
( Spring Training or Regular Season or Wild - card Game or Divisional Series or LCS or World Series )
' st _ fl ' : Spring Training FLAG ( T or F )
' regseason _ fl ' : Regular Season FLAG ( T or F )
' playoff _ fl ' : Play Off Flag ( T or F )
' local _ game _ time ' : Game Time ( UTC - 5)
' game _ id ' : Game Id
' home _ team _ id ' : Home Team Id
' home _ team _ lg ' : Home Team league ( AL or NL )
' away _ team _ id ' : Away Team Id
' away _ team _ lg ' : Away Team league ( AL or NL )
' home _ team _ name ' : Home Team Name
' away _ team _ name ' : Away Team Name
' home _ team _ name _ full ' : Home Team Name ( Full Name )
' away _ team _ name _ full ' : Away Team Name ( Full Name )
' interleague _ fl ' : Inter League Flag ( T or F )
' park _ id ' : Park Id
' park _ name ' : Park Name
' park _ loc ' : Park Location"""
|
row = OrderedDict ( )
row [ 'retro_game_id' ] = self . retro_game_id
row [ 'game_type' ] = self . game_type
row [ 'game_type_des' ] = self . game_type_des
row [ 'st_fl' ] = self . st_fl
row [ 'regseason_fl' ] = self . regseason_fl
row [ 'playoff_fl' ] = self . playoff_fl
row [ 'local_game_time' ] = self . local_game_time
row [ 'game_id' ] = self . game_id
row [ 'home_team_id' ] = self . home_team_id
row [ 'home_team_lg' ] = self . home_team_lg
row [ 'away_team_id' ] = self . away_team_id
row [ 'away_team_lg' ] = self . away_team_lg
row [ 'home_team_name' ] = self . home_team_name
row [ 'away_team_name' ] = self . away_team_name
row [ 'home_team_name_full' ] = self . home_team_name_full
row [ 'away_team_name_full' ] = self . away_team_name_full
row [ 'interleague_fl' ] = self . interleague_fl
row [ 'park_id' ] = self . park_id
row [ 'park_name' ] = self . park_name
row [ 'park_loc' ] = self . park_loc
return row
|
def get_s3_buckets ( api_client , s3_info , s3_params ) :
"""List all available buckets
: param api _ client :
: param s3 _ info :
: param s3 _ params :
: return :"""
|
manage_dictionary ( s3_info , 'buckets' , { } )
buckets = api_client [ get_s3_list_region ( s3_params [ 'selected_regions' ] ) ] . list_buckets ( ) [ 'Buckets' ]
targets = [ ]
for b in buckets : # Abort if bucket is not of interest
if ( b [ 'Name' ] in s3_params [ 'skipped_buckets' ] ) or ( len ( s3_params [ 'checked_buckets' ] ) and b [ 'Name' ] not in s3_params [ 'checked_buckets' ] ) :
continue
targets . append ( b )
s3_info [ 'buckets_count' ] = len ( targets )
s3_params [ 'api_clients' ] = api_client
s3_params [ 's3_info' ] = s3_info
thread_work ( targets , get_s3_bucket , params = s3_params , num_threads = 30 )
show_status ( s3_info )
s3_info [ 'buckets_count' ] = len ( s3_info [ 'buckets' ] )
return s3_info
|
def broadcast_impl ( self , old_slices , old_shape , new_shape ) :
"""Implementation of a broadcast operation .
Args :
old _ slices : LaidOutTensor .
old _ shape : Shape .
new _ shape : Shape .
Returns :
LaidOutTensor ."""
|
new_slice_shape = self . slice_shape ( new_shape )
def tf_fn ( x ) :
return ( tf . zeros ( new_slice_shape , dtype = x . dtype ) + _expand_dims ( x , old_shape , new_shape ) )
return self . slicewise ( tf_fn , old_slices )
|
def handle_new_config ( args ) :
"""usage : cosmic - ray new - config < config - file >
Create a new config file ."""
|
config = cosmic_ray . commands . new_config ( )
config_str = serialize_config ( config )
with open ( args [ '<config-file>' ] , mode = 'wt' ) as handle :
handle . write ( config_str )
return ExitCode . OK
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.