signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get_sqla_coltype_from_dialect_str ( coltype : str , dialect : Dialect ) -> TypeEngine :
"""Returns an SQLAlchemy column type , given a column type name ( a string ) and
an SQLAlchemy dialect . For example , this might convert the string
` ` INTEGER ( 11 ) ` ` to an SQLAlchemy ` ` Integer ( length = 11 ) ` ` .
Args :
dialect : a SQLAlchemy : class : ` Dialect ` class
coltype : a ` ` str ( ) ` ` representation , e . g . from ` ` str ( c [ ' type ' ] ) ` ` where
` ` c ` ` is an instance of : class : ` sqlalchemy . sql . schema . Column ` .
Returns :
a Python object that is a subclass of
: class : ` sqlalchemy . types . TypeEngine `
Example :
. . code - block : : python
get _ sqla _ coltype _ from _ string ( ' INTEGER ( 11 ) ' , engine . dialect )
# gives : Integer ( length = 11)
Notes :
- : class : ` sqlalchemy . engine . default . DefaultDialect ` is the dialect base
class
- a dialect contains these things of interest :
- ` ` ischema _ names ` ` : string - to - class dictionary
- ` ` type _ compiler ` ` : instance of e . g .
: class : ` sqlalchemy . sql . compiler . GenericTypeCompiler ` . This has a
` ` process ( ) ` ` method , but that operates on : class : ` TypeEngine ` objects .
- ` ` get _ columns ` ` : takes a table name , inspects the database
- example of the dangers of ` ` eval ` ` :
http : / / nedbatchelder . com / blog / 201206 / eval _ really _ is _ dangerous . html
- An example of a function doing the reflection / inspection within
SQLAlchemy is
: func : ` sqlalchemy . dialects . mssql . base . MSDialect . get _ columns ` ,
which has this lookup : ` ` coltype = self . ischema _ names . get ( type , None ) ` `
Caveats :
- the parameters , e . g . ` ` DATETIME ( 6 ) ` ` , do NOT necessarily either work at
all or work correctly . For example , SQLAlchemy will happily spit out
` ` ' INTEGER ( 11 ) ' ` ` but its : class : ` sqlalchemy . sql . sqltypes . INTEGER ` class
takes no parameters , so you get the error ` ` TypeError : object ( ) takes no
parameters ` ` . Similarly , MySQL ' s ` ` DATETIME ( 6 ) ` ` uses the 6 to refer to
precision , but the ` ` DATETIME ` ` class in SQLAlchemy takes only a boolean
parameter ( timezone ) .
- However , sometimes we have to have parameters , e . g . ` ` VARCHAR ` ` length .
- Thus , this is a bit useless .
- Fixed , with a few special cases .""" | size = None
# type : Optional [ int ]
dp = None
# type : Optional [ int ]
args = [ ]
# type : List [ Any ]
kwargs = { }
# type : Dict [ str , Any ]
basetype = ''
# noinspection PyPep8 , PyBroadException
try : # Split e . g . " VARCHAR ( 32 ) COLLATE blah " into " VARCHAR ( 32 ) " , " who cares "
m = RE_COLTYPE_WITH_COLLATE . match ( coltype )
if m is not None :
coltype = m . group ( 'maintype' )
found = False
if not found : # Deal with ENUM ( ' a ' , ' b ' , ' c ' , . . . )
m = RE_MYSQL_ENUM_COLTYPE . match ( coltype )
if m is not None : # Convert to VARCHAR with max size being that of largest enum
basetype = 'VARCHAR'
values = get_list_of_sql_string_literals_from_quoted_csv ( m . group ( 'valuelist' ) )
length = max ( len ( x ) for x in values )
kwargs = { 'length' : length }
found = True
if not found : # Split e . g . " DECIMAL ( 10 , 2 ) " into DECIMAL , 10 , 2
m = RE_COLTYPE_WITH_TWO_PARAMS . match ( coltype )
if m is not None :
basetype = m . group ( 'type' ) . upper ( )
size = ast . literal_eval ( m . group ( 'size' ) )
dp = ast . literal_eval ( m . group ( 'dp' ) )
found = True
if not found : # Split e . g . " VARCHAR ( 32 ) " into VARCHAR , 32
m = RE_COLTYPE_WITH_ONE_PARAM . match ( coltype )
if m is not None :
basetype = m . group ( 'type' ) . upper ( )
size_text = m . group ( 'size' ) . strip ( ) . upper ( )
if size_text != 'MAX' :
size = ast . literal_eval ( size_text )
found = True
if not found :
basetype = coltype . upper ( )
# Special cases : pre - processing
# noinspection PyUnresolvedReferences
if ( dialect . name == SqlaDialectName . MSSQL and basetype . lower ( ) == 'integer' ) :
basetype = 'int'
cls = _get_sqla_coltype_class_from_str ( basetype , dialect )
# Special cases : post - processing
if basetype == 'DATETIME' and size : # First argument to DATETIME ( ) is timezone , so . . .
# noinspection PyUnresolvedReferences
if dialect . name == SqlaDialectName . MYSQL :
kwargs = { 'fsp' : size }
else :
pass
else :
args = [ x for x in ( size , dp ) if x is not None ]
try :
return cls ( * args , ** kwargs )
except TypeError :
return cls ( )
except : # noinspection PyUnresolvedReferences
raise ValueError ( "Failed to convert SQL type {} in dialect {} to an " "SQLAlchemy type" . format ( repr ( coltype ) , repr ( dialect . name ) ) ) |
def setCellUser ( self , iden ) :
'''Switch to another user ( admin only ) .
This API allows remote admin / service accounts
to impersonate a user . Used mostly by services
that manage their own authentication / sessions .''' | if not self . user . admin :
mesg = 'setCellUser() caller must be admin.'
raise s_exc . AuthDeny ( mesg = mesg )
user = self . cell . auth . user ( iden )
if user is None :
raise s_exc . NoSuchUser ( iden = iden )
self . user = user
return True |
def _iter_info ( self , niter , level = logging . INFO ) :
"""Log iteration number and mismatch
Parameters
level
logging level
Returns
None""" | max_mis = self . iter_mis [ niter - 1 ]
msg = ' Iter {:<d}. max mismatch = {:8.7f}' . format ( niter , max_mis )
logger . info ( msg ) |
def get_mst ( points ) :
"""Parameters
points : list of points ( geometry . Point )
The first element of the list is the center of the bounding box of the
first stroke , the second one belongs to the seconds stroke , . . .
Returns
mst : square matrix
0 nodes the edges are not connected , > 0 means they are connected""" | graph = Graph ( )
for point in points :
graph . add_node ( point )
graph . generate_euclidean_edges ( )
matrix = scipy . sparse . csgraph . minimum_spanning_tree ( graph . w )
mst = matrix . toarray ( ) . astype ( int )
# returned matrix is not symmetrical ! make it symmetrical
for i in range ( len ( mst ) ) :
for j in range ( len ( mst ) ) :
if mst [ i ] [ j ] > 0 :
mst [ j ] [ i ] = mst [ i ] [ j ]
if mst [ j ] [ i ] > 0 :
mst [ i ] [ j ] = mst [ j ] [ i ]
return mst |
def is_allowed ( self , name_or_class , mask ) : # pragma : no cover
"""Return True is a new connection is allowed""" | if isinstance ( name_or_class , type ) :
name = name_or_class . type
else :
name = name_or_class
info = self . connections [ name ]
limit = self . config [ name + '_limit' ]
if limit and info [ 'total' ] >= limit :
msg = ( "Sorry, there is too much DCC %s active. Please try again " "later." ) % name . upper ( )
self . bot . notice ( mask , msg )
return False
if mask not in info [ 'masks' ] :
return True
limit = self . config [ name + '_user_limit' ]
if limit and info [ 'masks' ] [ mask ] >= limit :
msg = ( "Sorry, you have too many DCC %s active. Close the other " "connection(s) or wait a few seconds and try again." ) % name . upper ( )
self . bot . notice ( mask , msg )
return False
return True |
def _find_last_good_run ( build ) :
"""Finds the last good release and run for a build .""" | run_name = request . form . get ( 'run_name' , type = str )
utils . jsonify_assert ( run_name , 'run_name required' )
last_good_release = ( models . Release . query . filter_by ( build_id = build . id , status = models . Release . GOOD ) . order_by ( models . Release . created . desc ( ) ) . first ( ) )
last_good_run = None
if last_good_release :
logging . debug ( 'Found last good release for: build_id=%r, ' 'release_name=%r, release_number=%d' , build . id , last_good_release . name , last_good_release . number )
last_good_run = ( models . Run . query . filter_by ( release_id = last_good_release . id , name = run_name ) . first ( ) )
if last_good_run :
logging . debug ( 'Found last good run for: build_id=%r, ' 'release_name=%r, release_number=%d, ' 'run_name=%r' , build . id , last_good_release . name , last_good_release . number , last_good_run . name )
return last_good_release , last_good_run |
def verify_tree_consistency ( self , old_tree_size : int , new_tree_size : int , old_root : bytes , new_root : bytes , proof : Sequence [ bytes ] ) :
"""Verify the consistency between two root hashes .
old _ tree _ size must be < = new _ tree _ size .
Args :
old _ tree _ size : size of the older tree .
new _ tree _ size : size of the newer _ tree .
old _ root : the root hash of the older tree .
new _ root : the root hash of the newer tree .
proof : the consistency proof .
Returns :
True . The return value is enforced by a decorator and need not be
checked by the caller .
Raises :
ConsistencyError : the proof indicates an inconsistency
( this is usually really serious ! ) .
ProofError : the proof is invalid .
ValueError : supplied tree sizes are invalid .""" | old_size = old_tree_size
new_size = new_tree_size
if old_size < 0 or new_size < 0 :
raise ValueError ( "Negative tree size" )
if old_size > new_size :
raise ValueError ( "Older tree has bigger size (%d vs %d), did " "you supply inputs in the wrong order?" % ( old_size , new_size ) )
if old_size == new_size :
if old_root == new_root :
if proof :
logging . debug ( "Trees are identical, ignoring proof" )
return True
else :
raise error . ConsistencyError ( "Inconsistency: different root " "hashes for the same tree size" )
if old_size == 0 :
if proof : # A consistency proof with an empty tree is an empty proof .
# Anything is consistent with an empty tree , so ignore whatever
# bogus proof was supplied . Note we do not verify here that the
# root hash is a valid hash for an empty tree .
logging . debug ( "Ignoring non-empty consistency proof for " "empty tree." )
return True
# Now 0 < old _ size < new _ size
# A consistency proof is essentially an audit proof for the node with
# index old _ size - 1 in the newer tree . The sole difference is that
# the path is already hashed together into a single hash up until the
# first audit node that occurs in the newer tree only .
node = old_size - 1
last_node = new_size - 1
# While we are the right child , everything is in both trees ,
# so move one level up .
while node % 2 :
node //= 2
last_node //= 2
p = iter ( proof )
try :
if node : # Compute the two root hashes in parallel .
new_hash = old_hash = next ( p )
else : # The old tree was balanced ( 2 * * k nodes ) , so we already have
# the first root hash .
new_hash = old_hash = old_root
while node :
if node % 2 : # node is a right child : left sibling exists in both trees .
next_node = next ( p )
old_hash = self . hasher . hash_children ( next_node , old_hash )
new_hash = self . hasher . hash_children ( next_node , new_hash )
elif node < last_node : # node is a left child : right sibling only exists in the
# newer tree .
new_hash = self . hasher . hash_children ( new_hash , next ( p ) )
# else node = = last _ node : node is a left child with no sibling
# in either tree .
node //= 2
last_node //= 2
# Now old _ hash is the hash of the first subtree . If the two trees
# have different height , continue the path until the new root .
while last_node :
n = next ( p )
new_hash = self . hasher . hash_children ( new_hash , n )
last_node //= 2
# If the second hash does not match , the proof is invalid for the
# given pair . If , on the other hand , the newer hash matches but the
# older one doesn ' t , then the proof ( together with the signatures
# on the hashes ) is proof of inconsistency .
# Continue to find out .
if new_hash != new_root :
raise error . ProofError ( "Bad Merkle proof: second root hash " "does not match. Expected hash: %s " ", computed hash: %s" % ( hexlify ( new_root ) . strip ( ) , hexlify ( new_hash ) . strip ( ) ) )
elif old_hash != old_root :
raise error . ConsistencyError ( "Inconsistency: first root hash " "does not match. Expected hash: " "%s, computed hash: %s" % ( hexlify ( old_root ) . strip ( ) , hexlify ( old_hash ) . strip ( ) ) )
except StopIteration :
raise error . ProofError ( "Merkle proof is too short" )
# We ' ve already verified consistency , so accept the proof even if
# there ' s garbage left over ( but log a warning ) .
try :
next ( p )
except StopIteration :
pass
else :
logging . debug ( "Proof has extra nodes" )
return True |
def _assignSignature ( self , chip ) :
"""Assign a unique signature for the image based
on the instrument , detector , chip , and size
this will be used to uniquely identify the appropriate
static mask for the image .
This also records the filename for the static mask to the outputNames dictionary .""" | sci_chip = self . _image [ self . scienceExt , chip ]
ny = sci_chip . _naxis1
nx = sci_chip . _naxis2
detnum = sci_chip . detnum
instr = self . _instrument
sig = ( instr + self . _detector , ( nx , ny ) , int ( detnum ) )
# signature is a tuple
sci_chip . signature = sig |
def _validate_namespace ( self , namespace ) :
"""Validates a namespace , raising a ResponseFailed error if invalid .
Args :
state _ root ( str ) : The state _ root to validate
Raises :
ResponseFailed : The state _ root was invalid , and a status of
INVALID _ ROOT will be sent with the response .""" | if self . _namespace_regex . fullmatch ( namespace ) is None :
LOGGER . debug ( 'Invalid namespace: %s' , namespace )
raise _ResponseFailed ( self . _status . INVALID_ADDRESS ) |
def send_vm_info ( self , vm_info ) :
"""Send vm info to the compute host .
it will return True / False""" | agent_host = vm_info . get ( 'host' )
if not agent_host :
LOG . info ( "vm/port is not bound to host, not sending vm info" )
return True
try :
self . neutron_event . send_vm_info ( agent_host , str ( vm_info ) )
except ( rpc . MessagingTimeout , rpc . RPCException , rpc . RemoteError ) : # Failed to send info to the agent . Keep the data in the
# database as failure to send it later .
LOG . error ( 'Failed to send VM info to agent %s' , agent_host )
return False
else :
return True |
def _arrays_to_sections ( self , arrays ) :
'''input : unprocessed numpy arrays .
returns : columns of the size that they will appear in the image , not scaled
for display . That needs to wait until after variance is computed .''' | sections = [ ]
sections_to_resize_later = { }
show_all = self . config [ 'show_all' ]
image_width = self . _determine_image_width ( arrays , show_all )
for array_number , array in enumerate ( arrays ) :
rank = len ( array . shape )
section_height = self . _determine_section_height ( array , show_all )
if rank == 1 :
section = np . atleast_2d ( array )
elif rank == 2 :
section = array
elif rank == 4 :
section = self . _reshape_conv_array ( array , section_height , image_width )
else :
section = self . _reshape_irregular_array ( array , section_height , image_width )
# Only calculate variance for what we have to . In some cases ( biases ) ,
# the section is larger than the array , so we don ' t want to calculate
# variance for the same value over and over - better to resize later .
# About a 6-7x speedup for a big network with a big variance window .
section_size = section_height * image_width
array_size = np . prod ( array . shape )
if section_size > array_size :
sections . append ( section )
sections_to_resize_later [ array_number ] = section_height
else :
sections . append ( im_util . resize ( section , section_height , image_width ) )
self . sections_over_time . append ( sections )
if self . config [ 'mode' ] == 'variance' :
sections = self . _sections_to_variance_sections ( self . sections_over_time )
for array_number , height in sections_to_resize_later . items ( ) :
sections [ array_number ] = im_util . resize ( sections [ array_number ] , height , image_width )
return sections |
def check_streamers ( self , blacklist = None ) :
"""Check if any streamers are ready to produce a report .
You can limit what streamers are checked by passing a set - like
object into blacklist .
This method is the primary way to see when you should poll a given
streamer for its next report .
Note , this function is not idempotent . If a streamer is marked as
manual and it is triggered from a node rule inside the sensor _ graph ,
that trigger will only last as long as the next call to
check _ streamers ( ) so you need to explicitly build a report on all
ready streamers before calling check _ streamers again .
Args :
blacklist ( set ) : Optional set of streamer indices that should
not be checked right now .
Returns :
list of DataStreamer : A list of the ready streamers .""" | ready = [ ]
selected = set ( )
for i , streamer in enumerate ( self . streamers ) :
if blacklist is not None and i in blacklist :
continue
if i in selected :
continue
marked = False
if i in self . _manually_triggered_streamers :
marked = True
self . _manually_triggered_streamers . remove ( i )
if streamer . triggered ( marked ) :
self . _logger . debug ( "Streamer %d triggered, manual=%s" , i , marked )
ready . append ( streamer )
selected . add ( i )
# Handle streamers triggered with another
for j , streamer2 in enumerate ( self . streamers [ i : ] ) :
if streamer2 . with_other == i and j not in selected and streamer2 . triggered ( True ) :
self . _logger . debug ( "Streamer %d triggered due to with-other on %d" , j , i )
ready . append ( streamer2 )
selected . add ( j )
return ready |
def PrimaryHDU ( model ) :
'''Construct the primary HDU file containing basic header info .''' | # Get mission cards
cards = model . _mission . HDUCards ( model . meta , hdu = 0 )
if 'KEPMAG' not in [ c [ 0 ] for c in cards ] :
cards . append ( ( 'KEPMAG' , model . mag , 'Kepler magnitude' ) )
# Add EVEREST info
cards . append ( ( 'COMMENT' , '************************' ) )
cards . append ( ( 'COMMENT' , '* EVEREST INFO *' ) )
cards . append ( ( 'COMMENT' , '************************' ) )
cards . append ( ( 'MISSION' , model . mission , 'Mission name' ) )
cards . append ( ( 'VERSION' , EVEREST_MAJOR_MINOR , 'EVEREST pipeline version' ) )
cards . append ( ( 'SUBVER' , EVEREST_VERSION , 'EVEREST pipeline subversion' ) )
cards . append ( ( 'DATE' , strftime ( '%Y-%m-%d' ) , 'EVEREST file creation date (YYYY-MM-DD)' ) )
# Create the HDU
header = pyfits . Header ( cards = cards )
hdu = pyfits . PrimaryHDU ( header = header )
return hdu |
def generate_context ( context_file = 'cookiecutter.json' , default_context = None , extra_context = None ) :
"""Generate the context for a Cookiecutter project template .
Loads the JSON file as a Python object , with key being the JSON filename .
: param context _ file : JSON file containing key / value pairs for populating
the cookiecutter ' s variables .
: param default _ context : Dictionary containing config to take into account .
: param extra _ context : Dictionary containing configuration overrides""" | context = OrderedDict ( [ ] )
try :
with open ( context_file ) as file_handle :
obj = json . load ( file_handle , object_pairs_hook = OrderedDict )
except ValueError as e : # JSON decoding error . Let ' s throw a new exception that is more
# friendly for the developer or user .
full_fpath = os . path . abspath ( context_file )
json_exc_message = str ( e )
our_exc_message = ( 'JSON decoding error while loading "{0}". Decoding' ' error details: "{1}"' . format ( full_fpath , json_exc_message ) )
raise ContextDecodingException ( our_exc_message )
# Add the Python object to the context dictionary
file_name = os . path . split ( context_file ) [ 1 ]
file_stem = file_name . split ( '.' ) [ 0 ]
context [ file_stem ] = obj
# Overwrite context variable defaults with the default context from the
# user ' s global config , if available
if default_context :
apply_overwrites_to_context ( obj , default_context )
if extra_context :
apply_overwrites_to_context ( obj , extra_context )
logger . debug ( 'Context generated is {}' . format ( context ) )
return context |
def search_mappings ( kb , key = None , value = None , match_type = None , sortby = None , page = None , per_page = None ) :
"""Search tags for knowledge .""" | if kb . kbtype == models . KnwKB . KNWKB_TYPES [ 'written_as' ] :
return pagination . RestfulSQLAlchemyPagination ( api . query_kb_mappings ( kbid = kb . id , key = key or '' , value = value or '' , match_type = match_type or 's' , sortby = sortby or 'to' , ) , page = page or 1 , per_page = per_page or 10 ) . items
return [ ] |
def draw ( self , mode = "triangles" ) :
"""Draw collection""" | gl . glDepthMask ( 0 )
Collection . draw ( self , mode )
gl . glDepthMask ( 1 ) |
def _pre_compute_secondary ( self , positive_vals , negative_vals ) :
"""Compute secondary y min and max""" | self . _secondary_max = max ( max ( positive_vals ) , max ( negative_vals ) )
self . _secondary_min = - self . _secondary_max |
def _computeUniqueReadCounts ( self ) :
"""Add all pathogen / sample combinations to self . pathogenSampleFiles .
This will make all de - duplicated ( by id ) FASTA / FASTQ files and store
the number of de - duplicated reads into C { self . pathogenNames } .""" | for pathogenName , samples in self . pathogenNames . items ( ) :
for sampleName in samples :
self . pathogenSampleFiles . add ( pathogenName , sampleName ) |
def _is_intrinsic_dict ( self , input ) :
"""Can the input represent an intrinsic function in it ?
: param input : Object to be checked
: return : True , if the input contains a supported intrinsic function . False otherwise""" | # All intrinsic functions are dictionaries with just one key
return isinstance ( input , dict ) and len ( input ) == 1 and list ( input . keys ( ) ) [ 0 ] in self . supported_intrinsics |
def filter_tree ( tree ) :
"""Filter all 401 errors .""" | to_remove = [ ]
for elem in tree . findall ( 'urldata' ) :
valid = elem . find ( 'valid' )
if valid is not None and valid . text == '0' and valid . attrib . get ( 'result' , '' ) . startswith ( '401' ) :
to_remove . append ( elem )
root = tree . getroot ( )
for elem in to_remove :
root . remove ( elem ) |
def pipeline_refine ( d0 , candloc , scaledm = 2.1 , scalepix = 2 , scaleuv = 1.0 , chans = [ ] , returndata = False ) :
"""Reproduces candidate and potentially improves sensitivity through better DM and imaging parameters .
scale * parameters enhance sensitivity by making refining dmgrid and images .
Other options include :
d0 [ ' selectpol ' ] = [ ' RR ' ]
d0 [ ' flaglist ' ] = [ ( ' blstd ' , 2.5 , 0.05 ) ]""" | import rtpipe . parseparams as pp
assert len ( candloc ) == 6 , 'candloc should be (scan, segment, candint, dmind, dtind, beamnum).'
scan , segment , candint , dmind , dtind , beamnum = candloc
d1 = d0 . copy ( )
# dont mess with original ( mutable ! )
segmenttimes = d1 [ 'segmenttimesdict' ] [ scan ]
# if file not at stated full path , assume it is local
if not os . path . exists ( d1 [ 'filename' ] ) :
workdir = os . getcwd ( )
filename = os . path . join ( workdir , os . path . basename ( d1 [ 'filename' ] ) )
else :
filename = d1 [ 'filename' ]
# clean up d1 of superfluous keys
params = pp . Params ( )
# will be used as input to rt . set _ pipeline
for key in d1 . keys ( ) :
if not hasattr ( params , key ) :
_ = d1 . pop ( key )
d1 [ 'npix' ] = 0 ;
d1 [ 'uvres' ] = 0
d1 [ 'savecands' ] = False
d1 [ 'savenoise' ] = False
d1 [ 'logfile' ] = False
# redefine d . many parameters modified after this to keep from messing up time boundaries / cand location
d = set_pipeline ( filename , scan , ** d1 )
if chans :
d [ 'chans' ] = chans
d [ 'segmenttimes' ] = segmenttimes
d [ 'nsegments' ] = len ( segmenttimes )
data_mem = mps . Array ( mps . ctypes . c_float , datasize ( d ) * 2 )
u_mem = mps . Array ( mps . ctypes . c_float , d [ 'nbl' ] )
v_mem = mps . Array ( mps . ctypes . c_float , d [ 'nbl' ] )
w_mem = mps . Array ( mps . ctypes . c_float , d [ 'nbl' ] )
data = numpyview ( data_mem , 'complex64' , datashape ( d ) )
u = numpyview ( u_mem , 'float32' , d [ 'nbl' ] )
v = numpyview ( v_mem , 'float32' , d [ 'nbl' ] )
w = numpyview ( w_mem , 'float32' , d [ 'nbl' ] )
# fill data , uvw
data [ : ] = pipeline_reproduce ( d , segment = segment , product = 'data' )
d [ 'segment' ] = segment
u [ : ] , v [ : ] , w [ : ] = ps . get_uvw_segment ( d , segment )
# refine parameters
dmcand = d [ 'dmarr' ] [ dmind ]
if scaledm > 1. :
try :
dmdelta = d [ 'dmarr' ] [ dmind + 1 ] - d [ 'dmarr' ] [ dmind ]
except IndexError :
try :
dmdelta = d [ 'dmarr' ] [ dmind ] - d [ 'dmarr' ] [ dmind - 1 ]
except IndexError :
dmdelta = 0.1 * dmcand
d [ 'dmarr' ] = list ( n . arange ( dmcand - dmdelta , dmcand + dmdelta , dmdelta / scaledm ) )
elif scaledm == 1. :
d [ 'dmarr' ] = [ dmcand ]
d [ 'datadelay' ] = [ rtlib . calc_delay ( d [ 'freq' ] , d [ 'inttime' ] , dm ) . max ( ) for dm in d [ 'dmarr' ] ] + [ d [ 'datadelay' ] [ - 1 ] ]
d [ 'dtarr' ] = [ d [ 'dtarr' ] [ dtind ] ]
d [ 'npixx' ] = scalepix * d [ 'npixx' ]
d [ 'npixy' ] = scalepix * d [ 'npixy' ]
d [ 'uvres' ] = scaleuv * d [ 'uvres' ]
# search
logger . info ( 'Refining DM grid to %s and expanding images to (%d, %d) pix with uvres %d' % ( str ( d [ 'dmarr' ] ) , d [ 'npixx' ] , d [ 'npixy' ] , d [ 'uvres' ] ) )
cands = search ( d , data_mem , u_mem , v_mem , w_mem )
cands = { tuple ( [ scan ] + list ( loc ) ) : list ( prop ) for ( loc , prop ) in cands . iteritems ( ) }
d [ 'featureind' ] . insert ( 0 , 'scan' )
# making cand plot from this
# need to keep from confusing old and new indices
# im , data = rt . pipeline _ reproduce ( d , loc [ candnum ] , product = ' imdata ' )
# scan , segment , candint , dmind , dtind , beamnum = loc
# loclabel = scan , segment , candint , dmind , dtind , beamnum
# make _ cand _ plot ( d , im , data , loclabel , outname = outname )
# return info to reproduce / visualize refined cands
if returndata :
return data
else :
return d , cands |
def parse_color ( src = None ) : # type : ( Optional [ str ] ) - > Optional [ Union [ Tuple [ int , . . . ] , int ] ]
"""Parse a string representing a color value .
Color is either a fixed color ( when coloring something from the UI , see
the GLYPHS _ COLORS constant ) or a list of the format [ u8 , u8 , u8 , u8 ] ,
Glyphs does not support an alpha channel as of 2.5.1 ( confirmed by Georg
Seifert ) , and always writes a 1 to it . This was brought up and is probably
corrected in the next versions .
https : / / github . com / googlei18n / glyphsLib / pull / 363 # issuecomment - 390418497""" | if src is None :
return None
# Tuple .
if src [ 0 ] == "(" :
rgba = tuple ( int ( v ) for v in src [ 1 : - 1 ] . split ( "," ) if v )
if not ( len ( rgba ) == 4 and all ( 0 <= v < 256 for v in rgba ) ) :
raise ValueError ( "Broken color tuple: {}. Must have four values from 0 to 255." . format ( src ) )
return rgba
# Constant .
return int ( src ) |
def toradialvelocity ( self , rf , v0 ) :
"""Convert a Doppler type value ( e . g . in radio mode ) to a real
radialvelocity . The type of velocity ( e . g . * LSRK * ) should be specified
: param rf : radialvelocity reference code ( see : meth : ` radialvelocity ` )
: param v0 : a doppler measure
Example : :
a = dm . doppler ( ' radio ' , 0.4)
dm . toradialvelocity ( ' topo ' , a )""" | if is_measure ( v0 ) and v0 [ 'type' ] == 'doppler' :
return self . doptorv ( rf , v0 )
else :
raise TypeError ( 'Illegal Doppler specified' ) |
def rename_set ( set = None , new_set = None , family = 'ipv4' ) :
'''. . versionadded : : 2014.7.0
Delete ipset set .
CLI Example :
. . code - block : : bash
salt ' * ' ipset . rename _ set custom _ set new _ set = new _ set _ name
IPv6:
salt ' * ' ipset . rename _ set custom _ set new _ set = new _ set _ name family = ipv6''' | if not set :
return 'Error: Set needs to be specified'
if not new_set :
return 'Error: New name for set needs to be specified'
settype = _find_set_type ( set )
if not settype :
return 'Error: Set does not exist'
settype = _find_set_type ( new_set )
if settype :
return 'Error: New Set already exists'
cmd = '{0} rename {1} {2}' . format ( _ipset_cmd ( ) , set , new_set )
out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = False )
if not out :
out = True
return out |
def get_objective_objective_bank_assignment_session ( self ) :
"""Gets the session for assigning objective to objective bank mappings .
return : ( osid . learning . ObjectiveObjectiveBankAssignmentSession )
- an ` ` ObjectiveObjectiveBankAssignmentSession ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented -
` ` supports _ objective _ objective _ bank _ assignment ( ) ` ` is
` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ objective _ objective _ bank _ assignment ( ) ` ` is ` ` true ` ` . *""" | if not self . supports_objective_objective_bank_assignment ( ) :
raise errors . Unimplemented ( )
# pylint : disable = no - member
return sessions . ObjectiveObjectiveBankAssignmentSession ( runtime = self . _runtime ) |
def get_keyboard_mapping ( self , first_keycode , count ) :
"""Return the current keyboard mapping as a list of tuples ,
starting at first _ keycount and no more than count .""" | r = request . GetKeyboardMapping ( display = self . display , first_keycode = first_keycode , count = count )
return r . keysyms |
def compute_nats_and_bits_per_dim ( data_dim , latent_dim , average_reconstruction , average_prior ) :
"""Computes negative ELBO , which is an upper bound on the negative likelihood .
Args :
data _ dim : int - like indicating data dimensionality .
latent _ dim : int - like indicating latent dimensionality .
average _ reconstruction : Scalar Tensor indicating the reconstruction cost
averaged over all data dimensions and any data batches .
average _ prior : Scalar Tensor indicating the negative log - prior probability
averaged over all latent dimensions and any data batches .
Returns :
Tuple of scalar Tensors , representing the nats and bits per data dimension
( e . g . , subpixels ) respectively .""" | with tf . name_scope ( None , default_name = "compute_nats_per_dim" ) :
data_dim = tf . cast ( data_dim , average_reconstruction . dtype )
latent_dim = tf . cast ( latent_dim , average_prior . dtype )
negative_log_likelihood = data_dim * average_reconstruction
negative_log_prior = latent_dim * average_prior
negative_elbo = negative_log_likelihood + negative_log_prior
nats_per_dim = tf . divide ( negative_elbo , data_dim , name = "nats_per_dim" )
bits_per_dim = tf . divide ( nats_per_dim , tf . log ( 2. ) , name = "bits_per_dim" )
return nats_per_dim , bits_per_dim |
def surface_measure ( self , param ) :
"""Density function of the surface measure .
This is the default implementation relying on the ` surface _ deriv `
method . For a detector with ` ndim ` equal to 1 , the density is given
by the ` Arc length ` _ , for a surface with ` ndim ` 2 in a 3D space , it
is the length of the cross product of the partial derivatives of the
parametrization , see Wikipedia ' s ` Surface area ` _ article .
Parameters
param : ` array - like ` or sequence
Parameter value ( s ) at which to evaluate . If ` ` ndim > = 2 ` ` ,
a sequence of length ` ndim ` must be provided .
Returns
measure : float or ` numpy . ndarray `
The density value ( s ) at the given parameter ( s ) . If a single
parameter is provided , a float is returned . Otherwise , an
array is returned with shape
- ` ` param . shape ` ` if ` ndim ` is 1,
- ` ` broadcast ( * param ) . shape ` ` otherwise .
References
. . _ Arc length :
https : / / en . wikipedia . org / wiki / Curve # Lengths _ of _ curves
. . _ Surface area :
https : / / en . wikipedia . org / wiki / Surface _ area""" | # Checking is done by ` surface _ deriv `
if self . ndim == 1 :
scalar_out = ( np . shape ( param ) == ( ) )
measure = np . linalg . norm ( self . surface_deriv ( param ) , axis = - 1 )
if scalar_out :
measure = float ( measure )
return measure
elif self . ndim == 2 and self . space_ndim == 3 :
scalar_out = ( np . shape ( param ) == ( 2 , ) )
deriv = self . surface_deriv ( param )
if deriv . ndim > 2 : # Vectorized , need to reshape ( N , 2 , 3 ) to ( 2 , N , 3)
deriv = moveaxis ( deriv , - 2 , 0 )
cross = np . cross ( * deriv , axis = - 1 )
measure = np . linalg . norm ( cross , axis = - 1 )
if scalar_out :
measure = float ( measure )
return measure
else :
raise NotImplementedError ( 'no default implementation of `surface_measure` available ' 'for `ndim={}` and `space_ndim={}`' '' . format ( self . ndim , self . space_ndim ) ) |
def _check_requirements ( self ) :
"""Checks the IOU image .""" | if not self . _path :
raise IOUError ( "IOU image is not configured" )
if not os . path . isfile ( self . _path ) or not os . path . exists ( self . _path ) :
if os . path . islink ( self . _path ) :
raise IOUError ( "IOU image '{}' linked to '{}' is not accessible" . format ( self . _path , os . path . realpath ( self . _path ) ) )
else :
raise IOUError ( "IOU image '{}' is not accessible" . format ( self . _path ) )
try :
with open ( self . _path , "rb" ) as f : # read the first 7 bytes of the file .
elf_header_start = f . read ( 7 )
except OSError as e :
raise IOUError ( "Cannot read ELF header for IOU image '{}': {}" . format ( self . _path , e ) )
# IOU images must start with the ELF magic number , be 32 - bit or 64 - bit , little endian
# and have an ELF version of 1 normal IOS image are big endian !
if elf_header_start != b'\x7fELF\x01\x01\x01' and elf_header_start != b'\x7fELF\x02\x01\x01' :
raise IOUError ( "'{}' is not a valid IOU image" . format ( self . _path ) )
if not os . access ( self . _path , os . X_OK ) :
raise IOUError ( "IOU image '{}' is not executable" . format ( self . _path ) ) |
def sync ( state , host , source , destination , user = None , group = None , mode = None , delete = False , exclude = None , exclude_dir = None , add_deploy_dir = True , ) :
'''Syncs a local directory with a remote one , with delete support . Note that delete will
remove extra files on the remote side , but not extra directories .
+ source : local directory to sync
+ destination : remote directory to sync to
+ user : user to own the files and directories
+ group : group to own the files and directories
+ mode : permissions of the files
+ delete : delete remote files not present locally
+ exclude : string or list / tuple of strings to match & exclude files ( eg * . pyc )
+ exclude _ dir : string or list / tuple of strings to match & exclude directories ( eg node _ modules )''' | # If we don ' t enforce the source ending with / , remote _ dirname below might start with
# a / , which makes the path . join cut off the destination bit .
if not source . endswith ( path . sep ) :
source = '{0}{1}' . format ( source , path . sep )
# Add deploy directory ?
if add_deploy_dir and state . deploy_dir :
source = path . join ( state . deploy_dir , source )
# Ensure the source directory exists
if not path . isdir ( source ) :
raise IOError ( 'No such directory: {0}' . format ( source ) )
# Ensure exclude is a list / tuple
if exclude is not None :
if not isinstance ( exclude , ( list , tuple ) ) :
exclude = [ exclude ]
# Ensure exclude _ dir is a list / tuple
if exclude_dir is not None :
if not isinstance ( exclude_dir , ( list , tuple ) ) :
exclude_dir = [ exclude_dir ]
put_files = [ ]
ensure_dirnames = [ ]
for dirname , _ , filenames in walk ( source ) :
remote_dirname = dirname . replace ( source , '' )
# Should we exclude this dir ?
if exclude_dir and any ( fnmatch ( remote_dirname , match ) for match in exclude_dir ) :
continue
if remote_dirname :
ensure_dirnames . append ( remote_dirname )
for filename in filenames :
full_filename = path . join ( dirname , filename )
# Should we exclude this file ?
if exclude and any ( fnmatch ( full_filename , match ) for match in exclude ) :
continue
put_files . append ( ( # Join local as normal ( unix , win )
full_filename , # Join remote as unix like
'/' . join ( item for item in ( destination , remote_dirname , filename ) if item ) , ) )
# Ensure the destination directory
yield directory ( state , host , destination , user = user , group = group , )
# Ensure any remote dirnames
for dirname in ensure_dirnames :
yield directory ( state , host , '/' . join ( ( destination , dirname ) ) , user = user , group = group , )
# Put each file combination
for local_filename , remote_filename in put_files :
yield put ( state , host , local_filename , remote_filename , user = user , group = group , mode = mode , add_deploy_dir = False , )
# Delete any extra files
if delete :
remote_filenames = set ( host . fact . find_files ( destination ) or [ ] )
wanted_filenames = set ( [ remote_filename for _ , remote_filename in put_files ] )
files_to_delete = remote_filenames - wanted_filenames
for filename in files_to_delete : # Should we exclude this file ?
if exclude and any ( fnmatch ( filename , match ) for match in exclude ) :
continue
yield file ( state , host , filename , present = False ) |
def constant_time_cmp ( a , b ) :
'''Compare two strings using constant time .''' | result = True
for x , y in zip ( a , b ) :
result &= ( x == y )
return result |
def render_path ( self ) -> str :
"""Render path by filling the path template with video information .""" | # TODO : Fix defaults when date is not found ( empty string or None )
# https : / / stackoverflow . com / questions / 23407295 / default - kwarg - values - for - pythons - str - format - method
from string import Formatter
class UnseenFormatter ( Formatter ) :
def get_value ( self , key , args , kwds ) :
if isinstance ( key , str ) :
try :
return kwds [ key ]
except KeyError :
return key
else :
return super ( ) . get_value ( key , args , kwds )
data = self . video . data
site_name = data [ 'site' ]
try :
template = self . templates [ site_name ]
except KeyError :
raise NoTemplateFoundError
fmt = UnseenFormatter ( )
filename_raw = fmt . format ( template , ** data )
filename = clean_filename ( filename_raw )
path = os . path . join ( self . download_dir , filename )
return path |
def for_sponsor ( self , sponsor , include_cancelled = False ) :
"""Return a QueryList of EighthScheduledActivities where the given EighthSponsor is
sponsoring .
If a sponsorship is defined in an EighthActivity , it may be overridden
on a block by block basis in an EighthScheduledActivity . Sponsors from
the EighthActivity do not carry over .
EighthScheduledActivities that are deleted or cancelled are also not
counted .""" | sponsoring_filter = ( Q ( sponsors = sponsor ) | ( Q ( sponsors = None ) & Q ( activity__sponsors = sponsor ) ) )
sched_acts = ( EighthScheduledActivity . objects . exclude ( activity__deleted = True ) . filter ( sponsoring_filter ) . distinct ( ) )
if not include_cancelled :
sched_acts = sched_acts . exclude ( cancelled = True )
return sched_acts |
def get_xyz ( self , xyz_axis = 0 ) :
"""Return a vector array of the x , y , and z coordinates .
Parameters
xyz _ axis : int , optional
The axis in the final array along which the x , y , z components
should be stored ( default : 0 ) .
Returns
xs : ` ~ astropy . units . Quantity `
With dimension 3 along ` ` xyz _ axis ` ` .""" | # Add new axis in x , y , z so one can concatenate them around it .
# NOTE : just use np . stack once our minimum numpy version is 1.10.
result_ndim = self . ndim + 1
if not - result_ndim <= xyz_axis < result_ndim :
raise IndexError ( 'xyz_axis {0} out of bounds [-{1}, {1})' . format ( xyz_axis , result_ndim ) )
if xyz_axis < 0 :
xyz_axis += result_ndim
# Get components to the same units ( very fast for identical units )
# since np . concatenate cannot deal with quantity .
unit = self . _x1 . unit
sh = self . shape
sh = sh [ : xyz_axis ] + ( 1 , ) + sh [ xyz_axis : ]
components = [ getattr ( self , '_' + name ) . reshape ( sh ) . to ( unit ) . value for name in self . attr_classes ]
xs_value = np . concatenate ( components , axis = xyz_axis )
return u . Quantity ( xs_value , unit = unit , copy = False ) |
def searchbyno ( self , no ) :
"""搜尋股票代碼
: param str no : 欲搜尋的字串
: rtype : dict""" | pattern = re . compile ( str ( no ) )
result = { }
for i in self . __allstockno :
query = re . search ( pattern , str ( i ) )
if query :
query . group ( )
result [ i ] = self . __allstockno [ i ]
return result |
def addDataFrameColumn ( self , columnName , dtype = str , defaultValue = None ) :
"""Adds a column to the dataframe as long as
the model ' s editable property is set to True and the
dtype is supported .
: param columnName : str
name of the column .
: param dtype : qtpandas . models . SupportedDtypes option
: param defaultValue : ( object )
to default the column ' s value to , should be the same as the dtype or None
: return : ( bool )
True on success , False otherwise .""" | if not self . editable or dtype not in SupportedDtypes . allTypes ( ) :
return False
elements = self . rowCount ( )
columnPosition = self . columnCount ( )
newColumn = pandas . Series ( [ defaultValue ] * elements , index = self . _dataFrame . index , dtype = dtype )
self . beginInsertColumns ( QtCore . QModelIndex ( ) , columnPosition - 1 , columnPosition - 1 )
try :
self . _dataFrame . insert ( columnPosition , columnName , newColumn , allow_duplicates = False )
except ValueError as e : # columnName does already exist
return False
self . endInsertColumns ( )
self . propagateDtypeChanges ( columnPosition , newColumn . dtype )
return True |
def thumbnail_exists ( self , thumbnail_name ) :
"""Calculate whether the thumbnail already exists and that the source is
not newer than the thumbnail .
If the source and thumbnail file storages are local , their file
modification times are used . Otherwise the database cached modification
times are used .""" | if self . remote_source :
return False
if utils . is_storage_local ( self . source_storage ) :
source_modtime = utils . get_modified_time ( self . source_storage , self . name )
else :
source = self . get_source_cache ( )
if not source :
return False
source_modtime = source . modified
if not source_modtime :
return False
local_thumbnails = utils . is_storage_local ( self . thumbnail_storage )
if local_thumbnails :
thumbnail_modtime = utils . get_modified_time ( self . thumbnail_storage , thumbnail_name )
if not thumbnail_modtime :
return False
return source_modtime <= thumbnail_modtime
thumbnail = self . get_thumbnail_cache ( thumbnail_name )
if not thumbnail :
return False
thumbnail_modtime = thumbnail . modified
if thumbnail . modified and source_modtime <= thumbnail . modified :
return thumbnail
return False |
def get_tree ( self , list_of_keys ) :
"""gettree will extract the value from a nested tree
INPUT
list _ of _ keys : a list of keys ie . [ ' key1 ' , ' key2 ' ]
USAGE
> > > # Access the value for key2 within the nested dictionary
> > > adv _ dict ( { ' key1 ' : { ' key2 ' : ' value ' } } ) . gettree ( [ ' key1 ' , ' key2 ' ] )
' value '""" | cur_obj = self
for key in list_of_keys :
cur_obj = cur_obj . get ( key )
if not cur_obj :
break
return cur_obj |
def assign_extension_to_users ( self , body ) :
"""AssignExtensionToUsers .
[ Preview API ] Assigns the access to the given extension for a given list of users
: param : class : ` < ExtensionAssignment > < azure . devops . v5_0 . licensing . models . ExtensionAssignment > ` body : The extension assignment details .
: rtype : [ ExtensionOperationResult ]""" | content = self . _serialize . body ( body , 'ExtensionAssignment' )
response = self . _send ( http_method = 'PUT' , location_id = '8cec75ea-044f-4245-ab0d-a82dafcc85ea' , version = '5.0-preview.1' , content = content )
return self . _deserialize ( '[ExtensionOperationResult]' , self . _unwrap_collection ( response ) ) |
def disable_all_breakpoints ( cls ) :
"""Disable all breakpoints and udate ` active _ breakpoint _ flag ` .""" | for bp in cls . breakpoints_by_number :
if bp : # breakpoint # 0 exists and is always None
bp . enabled = False
cls . update_active_breakpoint_flag ( )
return |
def _finalize_block_blob ( self , sd , metadata , digest ) : # type : ( SyncCopy , blobxfer . models . synccopy . Descriptor , dict ,
# str ) - > None
"""Finalize Block blob
: param SyncCopy self : this
: param blobxfer . models . synccopy . Descriptor sd : synccopy descriptor
: param dict metadata : metadata dict
: param str digest : md5 digest""" | blobxfer . operations . azure . blob . block . put_block_list ( sd . dst_entity , sd . last_block_num , digest , metadata )
if blobxfer . util . is_not_empty ( sd . dst_entity . replica_targets ) :
for ase in sd . dst_entity . replica_targets :
blobxfer . operations . azure . blob . block . put_block_list ( ase , sd . last_block_num , digest , metadata ) |
def mse ( vref , vcmp ) :
"""Compute Mean Squared Error ( MSE ) between two images .
Parameters
vref : array _ like
Reference image
vcmp : array _ like
Comparison image
Returns
x : float
MSE between ` vref ` and ` vcmp `""" | r = np . asarray ( vref , dtype = np . float64 ) . ravel ( )
c = np . asarray ( vcmp , dtype = np . float64 ) . ravel ( )
return np . mean ( np . abs ( r - c ) ** 2 ) |
def get_product ( self , standard , key ) :
"""查询商品信息
详情请参考
http : / / mp . weixin . qq . com / wiki / 15/7fa787701295b884410b5163e13313af . html
: param standard : 商品编码标准
: param key : 商品编码内容
: return : 返回的 JSON 数据包""" | data = { 'keystandard' : standard , 'keystr' : key , }
return self . _post ( 'product/get' , data = data ) |
def wrap_function_cols ( self , name , package_name = None , object_name = None , java_class_instance = None , doc = "" ) :
"""Utility method for wrapping a scala / java function that returns a spark sql Column .
This assumes that the function that you are wrapping takes a list of spark sql Column objects as its arguments .""" | def _ ( * cols ) :
jcontainer = self . get_java_container ( package_name = package_name , object_name = object_name , java_class_instance = java_class_instance )
# Ensure that your argument is a column
col_args = [ col . _jc if isinstance ( col , Column ) else _make_col ( col ) . _jc for col in cols ]
function = getattr ( jcontainer , name )
args = col_args
jc = function ( * args )
return Column ( jc )
_ . __name__ = name
_ . __doc__ = doc
return _ |
def answer ( self ) :
"""Return the answer for the question from the validator .
This will ultimately only be called on the first validator if
multiple validators have been added .""" | if isinstance ( self . validator , list ) :
return self . validator [ 0 ] . choice ( )
return self . validator . choice ( ) |
def reject_entry ( request , entry_id ) :
"""Admins can reject an entry that has been verified or approved but not
invoiced to set its status to ' unverified ' for the user to fix .""" | return_url = request . GET . get ( 'next' , reverse ( 'dashboard' ) )
try :
entry = Entry . no_join . get ( pk = entry_id )
except :
message = 'No such log entry.'
messages . error ( request , message )
return redirect ( return_url )
if entry . status == Entry . UNVERIFIED or entry . status == Entry . INVOICED :
msg_text = 'This entry is unverified or is already invoiced.'
messages . error ( request , msg_text )
return redirect ( return_url )
if request . POST . get ( 'Yes' ) :
entry . status = Entry . UNVERIFIED
entry . save ( )
msg_text = 'The entry\'s status was set to unverified.'
messages . info ( request , msg_text )
return redirect ( return_url )
return render ( request , 'timepiece/entry/reject.html' , { 'entry' : entry , 'next' : request . GET . get ( 'next' ) , } ) |
def score_task ( self , X , Y , t = 0 , metric = "accuracy" , verbose = True , ** kwargs ) :
"""Scores the predictive performance of the Classifier on task t
Args :
X : The input for the predict _ task method
Y : A [ n ] or [ n , 1 ] np . ndarray or torch . Tensor of gold labels in
{1 , . . . , K _ t }
t : The task index to score
metric : The metric with which to score performance on this task
Returns :
The ( float ) score of the Classifier for the specified task and
metric""" | Y = self . _to_numpy ( Y )
Y_tp = self . predict_task ( X , t = t , ** kwargs )
probs = self . predict_proba ( X ) [ t ]
score = metric_score ( Y [ t ] , Y_tp , metric , ignore_in_gold = [ 0 ] , probs = probs , ** kwargs )
if verbose :
print ( f"[t={t}] {metric.capitalize()}: {score:.3f}" )
return score |
def user_active_directory_enabled ( user , attributes , created , updated ) :
"""Activate / deactivate user accounts based on Active Directory ' s
userAccountControl flags . Requires ' userAccountControl '
to be included in LDAP _ SYNC _ USER _ EXTRA _ ATTRIBUTES .""" | try :
user_account_control = int ( attributes [ 'userAccountControl' ] [ 0 ] )
if user_account_control & 2 :
user . is_active = False
else :
user . is_active = True
except KeyError :
pass |
def helper_import ( module_name , class_name = None ) :
"""Return class or module object .
if the argument is only a module name and return a module object .
if the argument is a module and class name , and return a class object .""" | try :
module = __import__ ( module_name , globals ( ) , locals ( ) , [ class_name ] )
except ( BlackbirdError , ImportError ) as error :
raise BlackbirdError ( 'can not load {0} module [{1}]' '' . format ( module_name , str ( error ) ) )
if not class_name :
return module
else :
try :
return getattr ( module , class_name )
except :
return False |
def pairs ( iterable ) :
""": return : iterator yielding overlapping pairs from iterable
: Example :
> > > list ( pairs ( [ 1 , 2 , 3 , 4 ] )
[ ( 1 , 2 ) , ( 2 , 3 ) , ( 3 , 4 ) ]""" | a , b = itertools . tee ( iterable )
next ( b , None )
return zip ( a , b ) |
def build_digest_challenge ( timestamp , secret , realm , opaque , stale ) :
'''Builds a Digest challenge that may be sent as the value of the ' WWW - Authenticate ' header
in a 401 or 403 response .
' opaque ' may be any value - it will be returned by the client .
' timestamp ' will be incorporated and signed in the nonce - it may be retrieved from the
client ' s authentication request using get _ nonce _ timestamp ( )''' | nonce = calculate_nonce ( timestamp , secret )
return 'Digest %s' % format_parts ( realm = realm , qop = 'auth' , nonce = nonce , opaque = opaque , algorithm = 'MD5' , stale = stale and 'true' or 'false' ) |
def get_payment_transaction_by_id ( cls , payment_transaction_id , ** kwargs ) :
"""Find PaymentTransaction
Return single instance of PaymentTransaction by its ID .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . get _ payment _ transaction _ by _ id ( payment _ transaction _ id , async = True )
> > > result = thread . get ( )
: param async bool
: param str payment _ transaction _ id : ID of paymentTransaction to return ( required )
: return : PaymentTransaction
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _get_payment_transaction_by_id_with_http_info ( payment_transaction_id , ** kwargs )
else :
( data ) = cls . _get_payment_transaction_by_id_with_http_info ( payment_transaction_id , ** kwargs )
return data |
def _unmarshal ( self , obj ) :
"""Walks an object and unmarshals any MORs into psphere objects .""" | if isinstance ( obj , suds . sudsobject . Object ) is False :
logger . debug ( "%s is not a suds instance, skipping" , obj )
return obj
logger . debug ( "Processing:" )
logger . debug ( obj )
logger . debug ( "...with keylist:" )
logger . debug ( obj . __keylist__ )
# If the obj that we ' re looking at has a _ type key
# then create a class of that type and return it immediately
if "_type" in obj . __keylist__ :
logger . debug ( "obj is a MOR, converting to psphere class" )
return self . _mor_to_pobject ( obj )
new_object = obj . __class__ ( )
for sub_obj in obj :
logger . debug ( "Looking at %s of type %s" , sub_obj , type ( sub_obj ) )
if isinstance ( sub_obj [ 1 ] , list ) :
new_embedded_objs = [ ]
for emb_obj in sub_obj [ 1 ] :
new_emb_obj = self . _unmarshal ( emb_obj )
new_embedded_objs . append ( new_emb_obj )
setattr ( new_object , sub_obj [ 0 ] , new_embedded_objs )
continue
if not issubclass ( sub_obj [ 1 ] . __class__ , suds . sudsobject . Object ) :
logger . debug ( "%s is not a sudsobject subclass, skipping" , sub_obj [ 1 ] . __class__ )
setattr ( new_object , sub_obj [ 0 ] , sub_obj [ 1 ] )
continue
logger . debug ( "Obj keylist: %s" , sub_obj [ 1 ] . __keylist__ )
if "_type" in sub_obj [ 1 ] . __keylist__ :
logger . debug ( "Converting nested MOR to psphere class:" )
logger . debug ( sub_obj [ 1 ] )
kls = classmapper ( sub_obj [ 1 ] . _type )
logger . debug ( "Setting %s.%s to %s" , new_object . __class__ . __name__ , sub_obj [ 0 ] , sub_obj [ 1 ] )
setattr ( new_object , sub_obj [ 0 ] , kls ( sub_obj [ 1 ] , self ) )
else :
logger . debug ( "Didn't find _type in:" )
logger . debug ( sub_obj [ 1 ] )
setattr ( new_object , sub_obj [ 0 ] , self . _unmarshal ( sub_obj [ 1 ] ) )
return new_object |
def update_model ( raw_model , app_model , forbidden_keys = None , inverse = False ) :
"""Updates the ` raw _ model ` according to the values in the ` app _ model ` .
: param raw _ model : Raw model which gets updated .
: param app _ model : App model holding the data .
: param forbidden _ keys : Data / attributes which will not be updated .
: type forbidden _ keys : list
: param inverse : If the value is ` True ` all ` app _ model ` attributes which are contained in the ` raw _ model ` are
updated . If the value is ` False ` all ` raw _ model ` properties which are in the ` app _ model ` will be
updated .""" | if forbidden_keys is None :
forbidden_keys = [ ]
if type ( app_model ) != dict :
app_model = app_model . __dict__
if inverse :
for k in app_model :
logging . debug ( "Considering property {0}." . format ( k ) )
if ( hasattr ( raw_model , k ) ) and ( k not in forbidden_keys ) :
logging . debug ( "Setting property {0} to value '{1}'." . format ( k , app_model [ k ] ) )
setattr ( raw_model , k , app_model [ k ] )
else :
for k in raw_model . __dict__ :
logging . debug ( "Considering property {0}." . format ( k ) )
if ( k in app_model ) and ( k not in forbidden_keys ) :
logging . debug ( "Setting property {0} to value '{1}'." . format ( k , app_model [ k ] ) )
setattr ( raw_model , k , app_model [ k ] ) |
def _get_variant_region ( self ) :
"""Categorize variant by location in transcript ( 5 ' utr , exon , intron , 3 ' utr )
: return " exon " , " intron " , " five _ utr " , " three _ utr " , " whole _ gene "
: rtype str""" | if self . _var_c . posedit . pos . start . datum == Datum . CDS_END and self . _var_c . posedit . pos . end . datum == Datum . CDS_END :
result = self . T_UTR
elif self . _var_c . posedit . pos . start . base < 0 and self . _var_c . posedit . pos . end . base < 0 :
result = self . F_UTR
elif self . _var_c . posedit . pos . start . base < 0 and self . _var_c . posedit . pos . end . datum == Datum . CDS_END :
result = self . WHOLE_GENE
elif self . _var_c . posedit . pos . start . offset != 0 or self . _var_c . posedit . pos . end . offset != 0 : # leave out anything intronic for now
result = self . INTRON
else : # anything else that contains an exon
result = self . EXON
return result |
def bytes_to_str ( self , b ) :
"convert bytes array to raw string" | if PYTHON_MAJOR_VER == 3 :
return b . decode ( charset_map . get ( self . charset , self . charset ) )
return b |
def _set_bank_view ( self , session ) :
"""Sets the underlying bank view to match current view""" | if self . _bank_view == COMPARATIVE :
try :
session . use_comparative_bank_view ( )
except AttributeError :
pass
else :
try :
session . use_plenary_bank_view ( )
except AttributeError :
pass |
def velocity_dispersion ( self , kwargs_lens , kwargs_lens_light , lens_light_model_bool_list = None , aniso_param = 1 , r_eff = None , R_slit = 0.81 , dR_slit = 0.1 , psf_fwhm = 0.7 , num_evaluate = 1000 ) :
"""computes the LOS velocity dispersion of the lens within a slit of size R _ slit x dR _ slit and seeing psf _ fwhm .
The assumptions are a Hernquist light profile and the spherical power - law lens model at the first position .
Further information can be found in the AnalyticKinematics ( ) class .
: param kwargs _ lens : lens model parameters
: param kwargs _ lens _ light : deflector light parameters
: param aniso _ param : scaled r _ ani with respect to the half light radius
: param r _ eff : half light radius , if not provided , will be computed from the lens light model
: param R _ slit : width of the slit
: param dR _ slit : length of the slit
: param psf _ fwhm : full width at half maximum of the seeing condition
: param num _ evaluate : number of spectral rendering of the light distribution that end up on the slit
: return : velocity dispersion in units [ km / s ]""" | gamma = kwargs_lens [ 0 ] [ 'gamma' ]
if 'center_x' in kwargs_lens_light [ 0 ] :
center_x , center_y = kwargs_lens_light [ 0 ] [ 'center_x' ] , kwargs_lens_light [ 0 ] [ 'center_y' ]
else :
center_x , center_y = 0 , 0
if r_eff is None :
r_eff = self . lens_analysis . half_light_radius_lens ( kwargs_lens_light , center_x = center_x , center_y = center_y , model_bool_list = lens_light_model_bool_list )
theta_E = kwargs_lens [ 0 ] [ 'theta_E' ]
r_ani = aniso_param * r_eff
sigma2 = self . analytic_kinematics . vel_disp ( gamma , theta_E , r_eff , r_ani , R_slit , dR_slit , FWHM = psf_fwhm , rendering_number = num_evaluate )
return sigma2 |
def load_yamlf ( fpath , encoding ) :
""": param unicode fpath :
: param unicode encoding :
: rtype : dict | list""" | with codecs . open ( fpath , encoding = encoding ) as f :
return yaml . safe_load ( f ) |
def save_model ( self , request , obj , form , change ) :
"""sends the email and does not save it""" | email = message . EmailMessage ( subject = obj . subject , body = obj . body , from_email = obj . from_email , to = [ t . strip ( ) for t in obj . to_emails . split ( ',' ) ] , bcc = [ t . strip ( ) for t in obj . bcc_emails . split ( ',' ) ] , cc = [ t . strip ( ) for t in obj . cc_emails . split ( ',' ) ] )
email . send ( ) |
def delete_multi ( self , keys , time = 0 , key_prefix = '' ) :
'''Delete multiple keys in the memcache doing just one query .
> > > notset _ keys = mc . set _ multi ( { ' key1 ' : ' val1 ' , ' key2 ' : ' val2 ' } )
> > > mc . get _ multi ( [ ' key1 ' , ' key2 ' ] ) = = { ' key1 ' : ' val1 ' , ' key2 ' : ' val2 ' }
> > > mc . delete _ multi ( [ ' key1 ' , ' key2 ' ] )
> > > mc . get _ multi ( [ ' key1 ' , ' key2 ' ] ) = = { }
This method is recommended over iterated regular L { delete } s as it reduces total latency , since
your app doesn ' t have to wait for each round - trip of L { delete } before sending
the next one .
@ param keys : An iterable of keys to clear
@ param time : number of seconds any subsequent set / update commands should fail . Defaults to 0 for no delay .
@ param key _ prefix : Optional string to prepend to each key when sending to memcache .
See docs for L { get _ multi } and L { set _ multi } .
@ return : 1 if no failure in communication with any memcacheds .
@ rtype : int''' | self . _statlog ( 'delete_multi' )
server_keys , prefixed_to_orig_key = self . _map_and_prefix_keys ( keys , key_prefix )
# send out all requests on each server before reading anything
dead_servers = [ ]
rc = 1
for server in server_keys . iterkeys ( ) :
bigcmd = [ ]
write = bigcmd . append
if time != None :
for key in server_keys [ server ] : # These are mangled keys
write ( "delete %s %d\r\n" % ( key , time ) )
else :
for key in server_keys [ server ] : # These are mangled keys
write ( "delete %s\r\n" % key )
try :
server . send_cmds ( '' . join ( bigcmd ) )
except socket . error , msg :
rc = 0
if isinstance ( msg , tuple ) :
msg = msg [ 1 ]
server . mark_dead ( msg )
dead_servers . append ( server )
# if any servers died on the way , don ' t expect them to respond .
for server in dead_servers :
del server_keys [ server ]
for server , keys in server_keys . iteritems ( ) :
try :
for key in keys :
server . expect ( "DELETED" )
except socket . error , msg :
if isinstance ( msg , tuple ) :
msg = msg [ 1 ]
server . mark_dead ( msg )
rc = 0
return rc |
def access_token_response ( self , access_token ) :
"""Returns a successful response after creating the access token
as defined in : rfc : ` 5.1 ` .""" | response_data = { 'access_token' : access_token . token , 'token_type' : constants . TOKEN_TYPE , 'expires_in' : access_token . get_expire_delta ( ) , 'scope' : ' ' . join ( scope . names ( access_token . scope ) ) , }
# Not all access _ tokens are given a refresh _ token
# ( for example , public clients doing password auth )
try :
rt = access_token . refresh_token
response_data [ 'refresh_token' ] = rt . token
except ObjectDoesNotExist :
pass
return HttpResponse ( json . dumps ( response_data ) , mimetype = 'application/json' ) |
def _create_function ( name , doc = "" ) :
"""Create a PySpark function by its name""" | def _ ( col ) :
sc = SparkContext . _active_spark_context
jc = getattr ( sc . _jvm . functions , name ) ( col . _jc if isinstance ( col , Column ) else col )
return Column ( jc )
_ . __name__ = name
_ . __doc__ = doc
return _ |
def get_historical_prices ( self , ** kwargs ) :
"""Historical Prices
Reference : https : / / iexcloud . io / docs / api / # chart
Data Weighting : See IEX Cloud Docs
Parameters
range : str , default ' 1m ' , optional
Chart range to return . See docs .
Choose from [ ` 5y ` , ` 2y ` , ` 1y ` , ` ytd ` , ` 6m ` , ` 3m ` , ` 1m ` , ` 1d ` , ` date ` ,
` dynamic ` ]
Choosing ` date ` will return IEX - only data by minute for a
specified date in the format YYYYMMDD if available .
Currently supporting trailing 30 calendar days .
Choosing ` dynamic ` will return ` 1d ` or ` 1m ` data depending on
the day or week and time of day .
Intraday per minute data is only returned during market hours .
chartReset : boolean , default True , optional
If true , 1d chart will reset at midnight instead of the default
behavior of 9:30am EST .
chartSimplify : boolean , default True , optional
If true , runs polyline simplification using Douglas - Peucker
algorithm . Useful for plotting spotline charts
chartInterval : int , default None , optional
Chart data will return every nth element ( where n is chartInterval )
changeFromClose : bool , default False , optional
If true , changeOverTime and marketChangeOverTime will be relative
to previous day close instead of the first value .
chartLast : int , optional
return the last N elements
chartCloseOnly : boolean , default False , optional
Specify to return adjusted data only with keys ` ` date ` ` , ` ` close ` ` ,
and ` ` volume ` ` .
chartIEXOnly : boolean , default False , optional
Only for ` ` 1d ` ` . Limits the return of intraday prices to IEX only
data
Returns
list or pandas DataFrame
Stocks Historical Prices endpoint data""" | def fmt_p ( out ) :
result = { }
for symbol in self . symbols :
d = out . pop ( symbol )
df = pd . DataFrame ( d )
df . set_index ( pd . DatetimeIndex ( df [ "date" ] ) , inplace = True )
values = [ "open" , "high" , "low" , "close" , "volume" ]
df = df [ values ]
result . update ( { symbol : df } )
if len ( result ) == 1 :
return result [ self . symbols [ 0 ] ]
else :
return pd . concat ( result . values ( ) , keys = result . keys ( ) , axis = 1 )
return self . _get_endpoint ( "chart" , fmt_p = fmt_p , params = kwargs ) |
def matrix_multiply ( m1 , m2 ) :
"""Matrix multiplication ( iterative algorithm ) .
The running time of the iterative matrix multiplication algorithm is : math : ` O ( n ^ { 3 } ) ` .
: param m1 : 1st matrix with dimensions : math : ` ( n \\ times p ) `
: type m1 : list , tuple
: param m2 : 2nd matrix with dimensions : math : ` ( p \\ times m ) `
: type m2 : list , tuple
: return : resultant matrix with dimensions : math : ` ( n \\ times m ) `
: rtype : list""" | mm = [ [ 0.0 for _ in range ( len ( m2 [ 0 ] ) ) ] for _ in range ( len ( m1 ) ) ]
for i in range ( len ( m1 ) ) :
for j in range ( len ( m2 [ 0 ] ) ) :
for k in range ( len ( m2 ) ) :
mm [ i ] [ j ] += float ( m1 [ i ] [ k ] * m2 [ k ] [ j ] )
return mm |
def allocate ( n , dtype = numpy . float32 ) :
"""allocate context - portable pinned host memory""" | return drv . pagelocked_empty ( int ( n ) , dtype , order = 'C' , mem_flags = drv . host_alloc_flags . PORTABLE ) |
def wrap ( cls , meth ) :
'''Wraps a connection opening method in this class .''' | async def inner ( * args , ** kwargs ) :
sock = await meth ( * args , ** kwargs )
return cls ( sock )
return inner |
def process_ticket ( self ) :
"""validate ticket from SAML XML body
: raises : SamlValidateError : if the ticket is not found or not valid , or if we fail
to parse the posted XML .
: return : a ticket object
: rtype : : class : ` models . Ticket < cas _ server . models . Ticket > `""" | try :
auth_req = self . root . getchildren ( ) [ 1 ] . getchildren ( ) [ 0 ]
ticket = auth_req . getchildren ( ) [ 0 ] . text
ticket = models . Ticket . get ( ticket )
if ticket . service != self . target :
raise SamlValidateError ( u'AuthnFailed' , u'TARGET %s does not match ticket service' % self . target )
return ticket
except ( IndexError , KeyError ) :
raise SamlValidateError ( u'VersionMismatch' )
except Ticket . DoesNotExist :
raise SamlValidateError ( u'AuthnFailed' , u'ticket %s should begin with PT- or ST-' % ticket )
except ( ServiceTicket . DoesNotExist , ProxyTicket . DoesNotExist ) :
raise SamlValidateError ( u'AuthnFailed' , u'ticket %s not found' % ticket ) |
def getContacts ( self , only_active = True ) :
"""Return an array containing the contacts from this Client""" | contacts = self . objectValues ( "Contact" )
if only_active :
contacts = filter ( api . is_active , contacts )
return contacts |
def _map_to_cfg ( self ) :
"""Map our current slice to CFG .
Based on self . _ statements _ per _ run and self . _ exit _ statements _ per _ run , this method will traverse the CFG and
check if there is any missing block on the path . If there is , the default exit of that missing block will be
included in the slice . This is because Slicecutor cannot skip individual basic blocks along a path .""" | exit_statements_per_run = self . chosen_exits
new_exit_statements_per_run = defaultdict ( list )
while len ( exit_statements_per_run ) :
for block_address , exits in exit_statements_per_run . items ( ) :
for stmt_idx , exit_target in exits :
if exit_target not in self . chosen_exits : # Oh we found one !
# The default exit should be taken no matter where it leads to
# Add it to the new set
tpl = ( DEFAULT_STATEMENT , None )
if tpl not in new_exit_statements_per_run [ exit_target ] :
new_exit_statements_per_run [ exit_target ] . append ( tpl )
# Add the new ones to our global dict
for block_address , exits in new_exit_statements_per_run . items ( ) :
for ex in exits :
if ex not in self . chosen_exits [ block_address ] :
self . chosen_exits [ block_address ] . append ( ex )
# Switch them so we can process the new set
exit_statements_per_run = new_exit_statements_per_run
new_exit_statements_per_run = defaultdict ( list ) |
def adapter_add_nio_binding ( self , adapter_number , port_number , nio ) :
"""Adds a adapter NIO binding .
: param adapter _ number : adapter number
: param port _ number : port number
: param nio : NIO instance to add to the adapter / port""" | try :
adapter = self . _adapters [ adapter_number ]
except IndexError :
raise IOUError ( 'Adapter {adapter_number} does not exist for IOU "{name}"' . format ( name = self . _name , adapter_number = adapter_number ) )
if not adapter . port_exists ( port_number ) :
raise IOUError ( "Port {port_number} does not exist in adapter {adapter}" . format ( adapter = adapter , port_number = port_number ) )
adapter . add_nio ( port_number , nio )
log . info ( 'IOU "{name}" [{id}]: {nio} added to {adapter_number}/{port_number}' . format ( name = self . _name , id = self . _id , nio = nio , adapter_number = adapter_number , port_number = port_number ) )
if self . ubridge :
bridge_name = "IOL-BRIDGE-{}" . format ( self . application_id + 512 )
yield from self . _ubridge_send ( "iol_bridge add_nio_udp {name} {iol_id} {bay} {unit} {lport} {rhost} {rport}" . format ( name = bridge_name , iol_id = self . application_id , bay = adapter_number , unit = port_number , lport = nio . lport , rhost = nio . rhost , rport = nio . rport ) )
yield from self . _ubridge_apply_filters ( adapter_number , port_number , nio . filters ) |
def verify_fft_options ( opt , parser ) :
"""Parses the FFT options and verifies that they are
reasonable .
Parameters
opt : object
Result of parsing the CLI with OptionParser , or any object with the
required attributes .
parser : object
OptionParser instance .""" | if opt . fftw_measure_level not in [ 0 , 1 , 2 , 3 ] :
parser . error ( "{0} is not a valid FFTW measure level." . format ( opt . fftw_measure_level ) )
if opt . fftw_import_system_wisdom and ( ( opt . fftw_input_float_wisdom_file is not None ) or ( opt . fftw_input_double_wisdom_file is not None ) ) :
parser . error ( "If --fftw-import-system-wisdom is given, then you cannot give" " either of --fftw-input-float-wisdom-file or --fftw-input-double-wisdom-file" )
if opt . fftw_threads_backend is not None :
if opt . fftw_threads_backend not in [ 'openmp' , 'pthreads' , 'unthreaded' ] :
parser . error ( "Invalid threads backend; must be 'openmp', 'pthreads' or 'unthreaded'" ) |
def get_subnet_nwk_excl ( self , tenant_id , excl_list , excl_part = False ) :
"""Retrieve the subnets of a network .
Get the subnets inside a network after applying the exclusion
list .""" | net_list = self . get_network_by_tenant ( tenant_id )
ret_subnet_list = [ ]
for net in net_list :
if excl_part :
name = net . get ( 'name' )
part = name . partition ( '::' ) [ 2 ]
if part :
continue
subnet_lst = self . get_subnets_for_net ( net . get ( 'id' ) )
for subnet_elem in subnet_lst :
subnet = subnet_elem . get ( 'cidr' ) . split ( '/' ) [ 0 ]
subnet_and_mask = subnet_elem . get ( 'cidr' )
if subnet not in excl_list :
ret_subnet_list . append ( subnet_and_mask )
return ret_subnet_list |
def create_from_intermediate ( cls , crypto , intermediate_point , seed , compressed = True , include_cfrm = True ) :
"""Given an intermediate point , given to us by " owner " , generate an address
and encrypted private key that can be decoded by the passphrase used to generate
the intermediate point .""" | flagbyte = b'\x20' if compressed else b'\x00'
payload = b58decode_check ( str ( intermediate_point ) )
ownerentropy = payload [ 8 : 16 ]
passpoint = payload [ 16 : - 4 ]
x , y = uncompress ( passpoint )
if not is_py2 :
seed = bytes ( seed , 'ascii' )
seedb = hexlify ( sha256 ( seed ) . digest ( ) ) [ : 24 ]
factorb = int ( hexlify ( sha256 ( sha256 ( seedb ) . digest ( ) ) . digest ( ) ) , 16 )
generatedaddress = pubtoaddr ( fast_multiply ( ( x , y ) , factorb ) )
wrap = lambda x : x
if not is_py2 :
wrap = lambda x : bytes ( x , 'ascii' )
addresshash = sha256 ( sha256 ( wrap ( generatedaddress ) ) . digest ( ) ) . digest ( ) [ : 4 ]
encrypted_seedb = scrypt . hash ( passpoint , addresshash + ownerentropy , 1024 , 1 , 1 , 64 )
derivedhalf1 , derivedhalf2 = encrypted_seedb [ : 32 ] , encrypted_seedb [ 32 : ]
aes = AES . new ( derivedhalf2 )
block1 = long ( seedb [ 0 : 16 ] , 16 ) ^ long ( hexlify ( derivedhalf1 [ 0 : 16 ] ) , 16 )
encryptedpart1 = aes . encrypt ( unhexlify ( '%0.32x' % block1 ) )
block2 = long ( hexlify ( encryptedpart1 [ 8 : 16 ] ) + seedb [ 16 : 24 ] , 16 ) ^ long ( hexlify ( derivedhalf1 [ 16 : 32 ] ) , 16 )
encryptedpart2 = aes . encrypt ( unhexlify ( '%0.32x' % block2 ) )
# 39 bytes 2 1 4 8 8 16
payload = b"\x01\x43" + flagbyte + addresshash + ownerentropy + encryptedpart1 [ : 8 ] + encryptedpart2
encrypted_pk = b58encode_check ( payload )
if not include_cfrm :
return generatedaddress , encrypted_pk
confirmation_code = Bip38ConfirmationCode . create ( flagbyte , ownerentropy , factorb , derivedhalf1 , derivedhalf2 , addresshash )
return generatedaddress , cls ( crypto , encrypted_pk ) , confirmation_code |
def get_es_label ( obj , def_obj ) :
"""Returns object with label for an object that goes into the elacticsearch
' label ' field
args :
obj : data object to update
def _ obj : the class instance that has defintion values""" | label_flds = LABEL_FIELDS
if def_obj . es_defs . get ( 'kds_esLabel' ) :
label_flds = def_obj . es_defs [ 'kds_esLabel' ] + LABEL_FIELDS
try :
for label in label_flds :
if def_obj . cls_defs . get ( label ) :
obj [ 'label' ] = def_obj . cls_defs [ label ] [ 0 ]
break
if not obj . get ( 'label' ) :
obj [ 'label' ] = def_obj . __class__ . __name__ . split ( "_" ) [ - 1 ]
except AttributeError : # an attribute error is caused when the class is only
# an instance of the BaseRdfClass . We will search the rdf _ type
# property and construct a label from rdf _ type value
if def_obj . get ( 'rdf_type' ) :
obj [ 'label' ] = def_obj [ 'rdf_type' ] [ - 1 ] . value [ - 1 ]
else :
obj [ 'label' ] = "no_label"
return obj |
def get_events ( self , from_ = None , to = None ) :
"""Query a slice of the events .
Events are always returned in the order the were added .
Parameters :
from _ - - if not None , return only events added after the event with
id ` from _ ` . If None , return from the start of history .
to - - if not None , return only events added before , and
including , the event with event id ` to ` . If None , return up
to , and including , the last added event .
returns - - an iterable of ( event id , eventdata ) tuples .""" | assert from_ is None or isinstance ( from_ , str )
assert to is None or isinstance ( to , str )
if from_ and not self . key_exists ( from_ ) :
msg = 'from_={0}' . format ( from_ )
raise EventStore . EventKeyDoesNotExistError ( msg )
if to and not self . key_exists ( to ) :
msg = 'to={0}' . format ( to )
raise EventStore . EventKeyDoesNotExistError ( msg )
# + 1 below because we have already seen the event
fromindex = self . _get_eventid ( from_ ) + 1 if from_ else 0
toindex = self . _get_eventid ( to ) if to else None
if from_ and to and fromindex > toindex :
raise EventOrderError ( "'to' happened cronologically before" " 'from_'." )
if toindex :
sql = ( 'SELECT uuid, event FROM events ' 'WHERE eventid BETWEEN ? AND ?' )
params = ( fromindex , toindex )
else :
sql = 'SELECT uuid, event FROM events WHERE eventid >= ?'
params = ( fromindex , )
sql = sql + " ORDER BY eventid"
return [ ( row [ 0 ] , row [ 1 ] . encode ( 'utf-8' ) ) for row in self . conn . execute ( sql , params ) ] |
def error ( self , line_number , offset , text , check ) :
"""Run the checks and collect the errors .""" | code = super ( _Report , self ) . error ( line_number , offset , text , check )
if code :
self . errors . append ( ( line_number , offset + 1 , code , text , check ) ) |
def get_function_id ( sig ) :
'''Return the function id of the given signature
Args :
sig ( str )
Return :
( int )''' | s = sha3 . keccak_256 ( )
s . update ( sig . encode ( 'utf-8' ) )
return int ( "0x" + s . hexdigest ( ) [ : 8 ] , 16 ) |
def _map_update ( self , prior_mean , prior_cov , global_cov_scaled , new_observation ) :
"""Maximum A Posterior ( MAP ) update of a parameter
Parameters
prior _ mean : float or 1D array
Prior mean of parameters .
prior _ cov : float or 1D array
Prior variance of scalar parameter , or
prior covariance of multivariate parameter
global _ cov _ scaled : float or 1D array
Global prior variance of scalar parameter , or
global prior covariance of multivariate parameter
new _ observation : 1D or 2D array , with shape [ n _ dim , n _ subj ]
New observations on parameters .
Returns
posterior _ mean : float or 1D array
Posterior mean of parameters .
posterior _ cov : float or 1D array
Posterior variance of scalar parameter , or
posterior covariance of multivariate parameter""" | common = np . linalg . inv ( prior_cov + global_cov_scaled )
observation_mean = np . mean ( new_observation , axis = 1 )
posterior_mean = prior_cov . dot ( common . dot ( observation_mean ) ) + global_cov_scaled . dot ( common . dot ( prior_mean ) )
posterior_cov = prior_cov . dot ( common . dot ( global_cov_scaled ) )
return posterior_mean , posterior_cov |
def merge_lists ( * args ) :
"""Merge an arbitrary number of lists into a single list and dedupe it
Args :
* args : Two or more lists
Returns :
A deduped merged list of all the provided lists as a single list""" | out = { }
for contacts in filter ( None , args ) :
for contact in contacts :
out [ contact . value ] = contact
return list ( out . values ( ) ) |
def add_children ( self , root ) :
"""Add child objects using the factory""" | for c in root . getChildren ( ns = wsdlns ) :
child = Factory . create ( c , self )
if child is None :
continue
self . children . append ( child )
if isinstance ( child , Import ) :
self . imports . append ( child )
continue
if isinstance ( child , Types ) :
self . types . append ( child )
continue
if isinstance ( child , Message ) :
self . messages [ child . qname ] = child
continue
if isinstance ( child , PortType ) :
self . port_types [ child . qname ] = child
continue
if isinstance ( child , Binding ) :
self . bindings [ child . qname ] = child
continue
if isinstance ( child , Service ) :
self . services . append ( child )
continue |
def has_provider_support ( provider , media_type ) :
"""Verifies if API provider has support for requested media type""" | if provider . lower ( ) not in API_ALL :
return False
provider_const = "API_" + media_type . upper ( )
return provider in globals ( ) . get ( provider_const , { } ) |
def main_build_index ( args = [ ] , prog_name = sys . argv [ 0 ] ) :
"""main entry point for the index script .
: param args : the arguments for this script , as a list of string . Should
already have had the script name stripped . That is , if
there are no args provided , this should be an empty list .
: param prog _ name : the name of the script ; taken from command line args by
default , but you can change it if you want .""" | # get options and arguments
ui = getUI_build_index ( prog_name , args )
# just run unit tests
if ui . optionIsSet ( "test" ) :
unittest . main ( argv = [ sys . argv [ 0 ] ] )
sys . exit ( )
# just show help
if ui . optionIsSet ( "help" ) :
ui . usage ( )
sys . exit ( )
verbose = ( ui . optionIsSet ( "verbose" ) is True ) or DEFAULT_VERBOSITY
# get input file - handle ( s ) ; fall back to stdin if none found .
in_fhs = [ open ( x ) for x in ui . getAllArguments ( ) ]
if in_fhs == [ ] :
if sys . stdin . isatty ( ) :
sys . stderr . write ( "[NO INPUT FILE FOUND; WAITING FOR INPUT FROM STDIN]\n" )
in_fhs = [ sys . stdin ]
# Get output handle ( s ) . If only one input file , and no output file , output
# to standard out .
out_fhs = [ sys . stdout ]
if ui . optionIsSet ( "output" ) :
out_fhs = [ open ( x , "w" ) for x in ui . getValue ( "output" ) . split ( ) ]
if len ( in_fhs ) != len ( out_fhs ) :
sys . stderr . write ( "mismatch between number of input files and output files" )
sys . exit ( 0 )
# figure out which indexer to use and then index each file
op_val = ui . getValue ( "type" ) if ui . optionIsSet ( "type" ) else None
indexer = __get_indexer ( ui . getAllArguments ( ) , op_val )
for in_fh , out_fh in zip ( in_fhs , out_fhs ) :
indexer ( in_fh , out_fh , verbose ) |
def dashfn ( handle , lenout = _default_len_out ) :
"""Return the name of the DAS file associated with a handle .
https : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / dashfn _ c . html
: param handle : Handle of a DAS file .
: type handle : int
: param lenout : Length of output file name string .
: type lenout : int
: return : Corresponding file name .
: rtype : str""" | handle = ctypes . c_int ( handle )
namlen = ctypes . c_int ( lenout )
fname = stypes . stringToCharP ( lenout )
libspice . dashfn_c ( handle , namlen , fname )
return stypes . toPythonString ( fname ) |
def forget ( self ) :
"""Reset _ observed events . Remove self from observers .
: return : Nothing""" | self . _observed_events = { }
if self in self . _observers :
self . _observers . remove ( self ) |
def get_bucket_file_list ( self ) :
"""Little utility method that handles pagination and returns
all objects in given bucket .""" | logger . debug ( "Retrieving bucket object list" )
paginator = self . s3_client . get_paginator ( 'list_objects' )
options = { 'Bucket' : self . aws_bucket_name }
if self . aws_bucket_prefix :
logger . debug ( "Adding prefix {} to bucket list as a filter" . format ( self . aws_bucket_prefix ) )
options [ 'Prefix' ] = self . aws_bucket_prefix
page_iterator = paginator . paginate ( ** options )
obj_dict = { }
for page in page_iterator :
obj_dict . update ( get_bucket_page ( page ) )
return obj_dict |
def get_tables ( self ) :
"""Get all table names .
Returns
` set ` of ` str `""" | self . cursor . execute ( 'SELECT name FROM sqlite_master WHERE type="table"' )
return set ( x [ 0 ] for x in self . cursor . fetchall ( ) ) |
def clicks ( times , fs , click = None , length = None ) :
"""Returns a signal with the signal ' click ' placed at each specified time
Parameters
times : np . ndarray
times to place clicks , in seconds
fs : int
desired sampling rate of the output signal
click : np . ndarray
click signal , defaults to a 1 kHz blip
length : int
desired number of samples in the output signal ,
defaults to ` ` times . max ( ) * fs + click . shape [ 0 ] + 1 ` `
Returns
click _ signal : np . ndarray
Synthesized click signal""" | # Create default click signal
if click is None : # 1 kHz tone , 100ms
click = np . sin ( 2 * np . pi * np . arange ( fs * .1 ) * 1000 / ( 1. * fs ) )
# Exponential decay
click *= np . exp ( - np . arange ( fs * .1 ) / ( fs * .01 ) )
# Set default length
if length is None :
length = int ( times . max ( ) * fs + click . shape [ 0 ] + 1 )
# Pre - allocate click signal
click_signal = np . zeros ( length )
# Place clicks
for time in times : # Compute the boundaries of the click
start = int ( time * fs )
end = start + click . shape [ 0 ]
# Make sure we don ' t try to output past the end of the signal
if start >= length :
break
if end >= length :
click_signal [ start : ] = click [ : length - start ]
break
# Normally , just add a click here
click_signal [ start : end ] = click
return click_signal |
def addDerivedMSCal ( msname ) :
"""Add the derived columns like HA to an MS or CalTable .
It adds the columns HA , HA1 , HA2 , PA1 , PA2 , LAST , LAST1 , LAST2 , AZEL1,
AZEL2 , and UVW _ J2000.
They are all bound to the DerivedMSCal virtual data manager .
It fails if one of the columns already exists .""" | # Open the MS
t = table ( msname , readonly = False , ack = False )
colnames = t . colnames ( )
# Check that the columns needed by DerivedMSCal are present .
# Note that ANTENNA2 and FEED2 are not required .
for col in [ "TIME" , "ANTENNA1" , "FIELD_ID" , "FEED1" ] :
if col not in colnames :
raise ValueError ( "Columns " + colnames + " should be present in table " + msname )
scols1 = [ 'HA' , 'HA1' , 'HA2' , 'PA1' , 'PA2' ]
scols2 = [ 'LAST' , 'LAST1' , 'LAST2' ]
acols1 = [ 'AZEL1' , 'AZEL2' ]
acols2 = [ 'UVW_J2000' ]
descs = [ ]
# Define the columns and their units .
for col in scols1 :
descs . append ( makescacoldesc ( col , 0. , keywords = { "QuantumUnits" : [ "rad" ] } ) )
for col in scols2 :
descs . append ( makescacoldesc ( col , 0. , keywords = { "QuantumUnits" : [ "d" ] } ) )
for col in acols1 :
descs . append ( makearrcoldesc ( col , 0. , keywords = { "QuantumUnits" : [ "rad" , "rad" ] } ) )
for col in acols2 :
descs . append ( makearrcoldesc ( col , 0. , keywords = { "QuantumUnits" : [ "m" , "m" , "m" ] , "MEASINFO" : { "Ref" : "J2000" , "type" : "uvw" } } ) )
# Add all columns using DerivedMSCal as data manager .
dminfo = { "TYPE" : "DerivedMSCal" , "NAME" : "" , "SPEC" : { } }
t . addcols ( maketabdesc ( descs ) , dminfo )
# Flush the table to make sure it is written .
t . flush ( ) |
def toTheOrdinal ( n , inTitleCase = True ) :
"""Returns the definite article with the ordinal name of a number
e . g . ' the second '
Becomes important for languages with multiple definite articles ( e . g . French )""" | if n == - 1 :
retval = _ ( "the last" )
elif n == - 2 :
retval = _ ( "the penultimate" )
elif n == 1 :
retval = _ ( "the first" )
elif n == 2 :
retval = _ ( "the second" )
elif n == 3 :
retval = _ ( "the third" )
elif n == 4 :
retval = _ ( "the fourth" )
elif n == 5 :
retval = _ ( "the fifth" )
else : # TODO : support other definite articles depending on gender , etc .
retval = _ ( "the" )
if inTitleCase :
retval = retval . title ( )
retval += " " + _n2w ( n , to = "ordinal_num" )
return retval
if inTitleCase :
retval = retval . title ( )
return retval |
def sort ( self , sortlist , name = '' , limit = 0 , offset = 0 , style = 'Python' ) :
"""Sort the table and return the result as a reference table .
This method sorts the table . It forms a
` TaQL < . . / . . / doc / 199 . html > ` _
command from the given arguments and executes it using the
: func : ` taql ` function .
The result is returned in a so - called reference table which references
the columns and rows in the original table . Usually a reference
table is temporary , but it can be made persistent by giving it a name .
Note that a reference table is handled as any table , thus can be
queried again .
` sortlist `
The ORDERBY part of a TaQL command . It is a single string in which
commas have to be used to separate sort keys . A sort key can be the
name of a column , but it can be an expression as well .
` name `
The name of the reference table if it is to be made persistent .
` limit `
If > 0 , maximum number of rows to be selected after the sort step .
It can , for instance , be used to select the N highest values .
` offset `
If > 0 , ignore the first ` offset ` matches after the sort step .
` style `
The TaQL syntax style to be used ( defaults to Python ) .""" | command = 'select from $1 orderby ' + sortlist
if limit > 0 :
command += ' limit %d' % limit
if offset > 0 :
command += ' offset %d' % offset
if name :
command += ' giving ' + name
return tablecommand ( command , style , [ self ] ) |
def scale ( config = None , name = None , replicas = None ) :
"""Scales the number of pods in the specified K8sReplicationController to the desired replica count .
: param config : an instance of K8sConfig
: param name : the name of the ReplicationController we want to scale .
: param replicas : the desired number of replicas .
: return : An instance of K8sReplicationController""" | rc = K8sReplicationController ( config = config , name = name ) . get ( )
rc . desired_replicas = replicas
rc . update ( )
rc . _wait_for_desired_replicas ( )
return rc |
def interp ( var , indexes_coords , method , ** kwargs ) :
"""Make an interpolation of Variable
Parameters
var : Variable
index _ coords :
Mapping from dimension name to a pair of original and new coordinates .
Original coordinates should be sorted in strictly ascending order .
Note that all the coordinates should be Variable objects .
method : string
One of { ' linear ' , ' nearest ' , ' zero ' , ' slinear ' , ' quadratic ' ,
' cubic ' } . For multidimensional interpolation , only
{ ' linear ' , ' nearest ' } can be used .
* * kwargs :
keyword arguments to be passed to scipy . interpolate
Returns
Interpolated Variable
See Also
DataArray . interp
Dataset . interp""" | if not indexes_coords :
return var . copy ( )
# simple speed up for the local interpolation
if method in [ 'linear' , 'nearest' ] :
var , indexes_coords = _localize ( var , indexes_coords )
# default behavior
kwargs [ 'bounds_error' ] = kwargs . get ( 'bounds_error' , False )
# target dimensions
dims = list ( indexes_coords )
x , new_x = zip ( * [ indexes_coords [ d ] for d in dims ] )
destination = broadcast_variables ( * new_x )
# transpose to make the interpolated axis to the last position
broadcast_dims = [ d for d in var . dims if d not in dims ]
original_dims = broadcast_dims + dims
new_dims = broadcast_dims + list ( destination [ 0 ] . dims )
interped = interp_func ( var . transpose ( * original_dims ) . data , x , destination , method , kwargs )
result = Variable ( new_dims , interped , attrs = var . attrs )
# dimension of the output array
out_dims = OrderedSet ( )
for d in var . dims :
if d in dims :
out_dims . update ( indexes_coords [ d ] [ 1 ] . dims )
else :
out_dims . add ( d )
return result . transpose ( * tuple ( out_dims ) ) |
def release ( self , xcoord , ycoord ) :
"""Release previously issued tap ' and hold ' command at specified location .
: Args :
- xcoord : X Coordinate to release .
- ycoord : Y Coordinate to release .""" | self . _actions . append ( lambda : self . _driver . execute ( Command . TOUCH_UP , { 'x' : int ( xcoord ) , 'y' : int ( ycoord ) } ) )
return self |
def get_python_args ( fname , python_args , interact , debug , end_args ) :
"""Construct Python interpreter arguments""" | p_args = [ ]
if python_args is not None :
p_args += python_args . split ( )
if interact :
p_args . append ( '-i' )
if debug :
p_args . extend ( [ '-m' , 'pdb' ] )
if fname is not None :
if os . name == 'nt' and debug : # When calling pdb on Windows , one has to replace backslashes by
# slashes to avoid confusion with escape characters ( otherwise ,
# for example , ' \ t ' will be interpreted as a tabulation ) :
p_args . append ( osp . normpath ( fname ) . replace ( os . sep , '/' ) )
else :
p_args . append ( fname )
if end_args :
p_args . extend ( shell_split ( end_args ) )
return p_args |
def _jog ( self , axis , direction , step ) :
"""Move the pipette on ` axis ` in ` direction ` by ` step ` and update the
position tracker""" | jog ( axis , direction , step , self . hardware , self . _current_mount )
self . current_position = self . _position ( )
return 'Jog: {}' . format ( [ axis , str ( direction ) , str ( step ) ] ) |
def as_plural ( result_key ) :
"""Given a result key , return in the plural form .""" | # Not at all guaranteed to work in all cases . . .
if result_key . endswith ( 'y' ) :
return re . sub ( "y$" , "ies" , result_key )
elif result_key . endswith ( 'address' ) :
return result_key + 'es'
elif result_key . endswith ( 'us' ) :
return re . sub ( "us$" , "uses" , result_key )
elif not result_key . endswith ( 's' ) :
return result_key + 's'
else :
return result_key |
def char_code ( columns , name = None ) :
"""Character set code field .
: param name : name for the field
: return : an instance of the Character set code field rules""" | if name is None :
name = 'Char Code Field (' + str ( columns ) + ' columns)'
if columns <= 0 :
raise BaseException ( )
char_sets = None
for char_set in _tables . get_data ( 'character_set' ) :
regex = '[ ]{' + str ( 15 - len ( char_set ) ) + '}' + char_set
if char_sets is None :
char_sets = regex
else :
char_sets += '|' + regex
# Accepted sets
_character_sets = pp . Regex ( char_sets )
_unicode_1_16b = pp . Regex ( 'U\+0[0-8,A-F]{3}[ ]{' + str ( columns - 6 ) + '}' )
_unicode_2_21b = pp . Regex ( 'U\+0[0-8,A-F]{4}[ ]{' + str ( columns - 7 ) + '}' )
# Basic field
char_code_field = ( _character_sets | _unicode_1_16b | _unicode_2_21b )
# Parse action
char_code_field = char_code_field . setParseAction ( lambda s : s [ 0 ] . strip ( ) )
# Name
char_code_field . setName ( name )
return char_code_field |
def airport_codes ( ) :
"""Returns the set of airport codes that is available to be requested .""" | html = requests . get ( URL ) . text
data_block = _find_data_block ( html )
return _airport_codes_from_data_block ( data_block ) |
def debug_ratelimit ( g ) :
"""Log debug of github ratelimit information from last API call
Parameters
org : github . MainClass . Github
github object""" | assert isinstance ( g , github . MainClass . Github ) , type ( g )
debug ( "github ratelimit: {rl}" . format ( rl = g . rate_limiting ) ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.