signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get_power_factor ( self , output = 'eigs' , doping_levels = True , relaxation_time = 1e-14 ) :
"""Gives the power factor ( Seebeck ^ 2 * conductivity ) in units
microW / ( m * K ^ 2 ) in either a full 3x3 tensor form ,
as 3 eigenvalues , or as the average value ( trace / 3.0 ) If
doping _ levels = True , the results are given at
different p and n doping levels ( given by self . doping ) , otherwise it
is given as a series of
electron chemical potential values
Args :
output ( string ) : the type of output . ' tensor ' give the full 3x3
tensor , ' eigs ' its 3 eigenvalues and
' average ' the average of the three eigenvalues
doping _ levels ( boolean ) : True for the results to be given at
different doping levels , False for results
at different electron chemical potentials
relaxation _ time ( float ) : constant relaxation time in secs
Returns :
If doping _ levels = True , a dictionnary { temp : { ' p ' : [ ] , ' n ' : [ ] } } . The
' p ' links to power factor
at p - type doping and ' n ' to the conductivity at n - type doping .
Otherwise ,
returns a { temp : [ ] } dictionary . The result contains either the
sorted three eigenvalues of the symmetric
power factor tensor ( format = ' eigs ' ) or a full tensor ( 3x3 array ) (
output = ' tensor ' ) or as an average
( output = ' average ' ) .
The result includes a given constant relaxation time
units are microW / ( m K ^ 2)"""
|
result = None
result_doping = None
if doping_levels :
result_doping = { doping : { t : [ ] for t in self . _seebeck_doping [ doping ] } for doping in self . _seebeck_doping }
for doping in result_doping :
for t in result_doping [ doping ] :
for i in range ( len ( self . doping [ doping ] ) ) :
full_tensor = np . dot ( self . _cond_doping [ doping ] [ t ] [ i ] , np . dot ( self . _seebeck_doping [ doping ] [ t ] [ i ] , self . _seebeck_doping [ doping ] [ t ] [ i ] ) )
result_doping [ doping ] [ t ] . append ( full_tensor )
else :
result = { t : [ ] for t in self . _seebeck }
for t in result :
for i in range ( len ( self . mu_steps ) ) :
full_tensor = np . dot ( self . _cond [ t ] [ i ] , np . dot ( self . _seebeck [ t ] [ i ] , self . _seebeck [ t ] [ i ] ) )
result [ t ] . append ( full_tensor )
return BoltztrapAnalyzer . _format_to_output ( result , result_doping , output , doping_levels , multi = 1e6 * relaxation_time )
|
def dict_setdiff ( dict_ , negative_keys ) :
r"""returns a copy of dict _ without keys in the negative _ keys list
Args :
dict _ ( dict ) :
negative _ keys ( list ) :"""
|
keys = [ key for key in six . iterkeys ( dict_ ) if key not in set ( negative_keys ) ]
subdict_ = dict_subset ( dict_ , keys )
return subdict_
|
def post ( self , url , headers = None , ** kwargs ) :
"""Sends a POST request to a URL .
: param url : The URL .
: type url : ` ` string ` `
: param headers : A list of pairs specifying the headers for the HTTP
response ( for example , ` ` [ ( ' Content - Type ' : ' text / cthulhu ' ) , ( ' Token ' : ' boris ' ) ] ` ` ) .
: type headers : ` ` list ` `
: param kwargs : Additional keyword arguments ( optional ) . If the argument
is ` ` body ` ` , the value is used as the body for the request , and the
keywords and their arguments will be URL encoded . If there is no
` ` body ` ` keyword argument , all the keyword arguments are encoded
into the body of the request in the format ` ` x - www - form - urlencoded ` ` .
: type kwargs : ` ` dict ` `
: returns : A dictionary describing the response ( see : class : ` HttpLib ` for
its structure ) .
: rtype : ` ` dict ` `"""
|
if headers is None :
headers = [ ]
headers . append ( ( "Content-Type" , "application/x-www-form-urlencoded" ) ) ,
# We handle GET - style arguments and an unstructured body . This is here
# to support the receivers / stream endpoint .
if 'body' in kwargs :
body = kwargs . pop ( 'body' )
if len ( kwargs ) > 0 :
url = url + UrlEncoded ( '?' + _encode ( ** kwargs ) , skip_encode = True )
else :
body = _encode ( ** kwargs )
message = { 'method' : "POST" , 'headers' : headers , 'body' : body }
return self . request ( url , message )
|
def query_put_bounders ( query , partition_column , start , end ) :
"""Put bounders in the query
Args :
query : SQL query string
partition _ column : partition _ column name
start : lower _ bound
end : upper _ bound
Returns :
Query with bounders"""
|
where = " WHERE TMP_TABLE.{0} >= {1} AND TMP_TABLE.{0} <= {2}" . format ( partition_column , start , end )
query_with_bounders = "SELECT * FROM ({0}) AS TMP_TABLE {1}" . format ( query , where )
return query_with_bounders
|
def _update_usage_plan_apis ( plan_id , apis , op , region = None , key = None , keyid = None , profile = None ) :
'''Helper function that updates the usage plan identified by plan _ id by adding or removing it to each of the stages , specified by apis parameter .
apis
a list of dictionaries , where each dictionary contains the following :
apiId
a string , which is the id of the created API in AWS ApiGateway
stage
a string , which is the stage that the created API is deployed to .
op
' add ' or ' remove ' '''
|
try :
patchOperations = [ ]
for api in apis :
patchOperations . append ( { 'op' : op , 'path' : '/apiStages' , 'value' : '{0}:{1}' . format ( api [ 'apiId' ] , api [ 'stage' ] ) } )
res = None
if patchOperations :
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
res = conn . update_usage_plan ( usagePlanId = plan_id , patchOperations = patchOperations )
return { 'success' : True , 'result' : res }
except ClientError as e :
return { 'error' : __utils__ [ 'boto3.get_error' ] ( e ) }
except Exception as e :
return { 'error' : e }
|
def get_all_zones ( self , zones = None , filters = None ) :
"""Get all Availability Zones associated with the current region .
: type zones : list
: param zones : Optional list of zones . If this list is present ,
only the Zones associated with these zone names
will be returned .
: type filters : dict
: param filters : Optional filters that can be used to limit
the results returned . Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value . The set of allowable filter
names / values is dependent on the request
being performed . Check the EC2 API guide
for details .
: rtype : list of : class : ` boto . ec2 . zone . Zone `
: return : The requested Zone objects"""
|
params = { }
if zones :
self . build_list_params ( params , zones , 'ZoneName' )
if filters :
self . build_filter_params ( params , filters )
return self . get_list ( 'DescribeAvailabilityZones' , params , [ ( 'item' , Zone ) ] , verb = 'POST' )
|
def check_release_files ( package_info , * args ) :
"""Does the package have release files ?
: param package _ info : package _ info dictionary
: return : Tuple ( is the condition True or False ? , reason if it is False else None )"""
|
reason = "No release files uploaded"
result = False
release_urls = args [ 0 ]
if len ( release_urls ) > 0 :
result = True
return result , reason , HAS_RELEASE_FILES
|
def logger ( self ) :
"""Instantiates and returns a ServiceLogger instance"""
|
if not hasattr ( self , '_logger' ) or not self . _logger :
self . _logger = ServiceLogger ( )
return self . _logger
|
def find_or_create_by_name ( self , item_name , items_list , item_type ) :
"""See if item with item _ name exists in item _ list .
If not , create that item .
Either way , return an item of type item _ type ."""
|
item = self . find_by_name ( item_name , items_list )
if not item :
item = self . data_lists [ item_type ] [ 2 ] ( item_name , None )
return item
|
def convert_args ( self , command , args ) :
"""Converts ` ` str - > int ` ` or ` ` register - > int ` ` ."""
|
for wanted , arg in zip ( command . argtypes ( ) , args ) :
wanted = wanted . type_
if ( wanted == "const" ) :
try :
yield to_int ( arg )
except :
if ( arg in self . processor . constants ) :
yield self . processor . constants [ arg ]
else :
yield arg
if ( wanted == "register" ) :
yield self . register_indices [ arg ]
|
def transformer_moe_layer_v1 ( inputs , output_dim , hparams , train , master_dtype = tf . bfloat16 , slice_dtype = tf . float32 ) :
"""Local mixture of experts that works well on TPU .
Adapted from the paper https : / / arxiv . org / abs / 1701.06538
Note : until the algorithm and inferface solidify , we pass in a hyperparameters
dictionary in order not to complicate the interface in mtf _ transformer . py .
Once this code moves out of " research " , we should pass the hyperparameters
separately .
Hyperparameters used :
hparams . moe _ num _ experts : number of experts
hparams . moe _ hidden _ size : size of hidden layer in each expert
hparams . moe _ group _ size : size of each " group " for gating purposes
hparams . moe _ capacity _ factor _ train : a float
hparams . moe _ capacity _ factor _ eval : a float
hparams . moe _ gating : a string
+ all hyperparmeters used by _ top _ 2 _ gating ( )
The number of parameters in the gating network is :
( input _ dim . size * hparams . num _ experts ) +
The number of parameters in the experts themselves is :
( hparams . num _ experts
* ( input _ dim . size + output _ dim . size )
* hparams . moe _ hidden _ size )
The input is n - dimensional : [ < batch _ and _ length _ dims > , input _ dim ] , consisting
of the representations of all positions in a batch of sequences .
Each position of each sequence is sent to 0-2 experts . The expert
choices and the combination weights are determined by a learned gating
function .
This function returns a small auxiliary loss that should be added to the
training loss of the model . This loss helps to balance expert usage .
Without the loss , it is very likely that a few experts will be trained and
the rest will starve .
Several hacks are necessary to get around current TPU limitations :
- To ensure static shapes , we enforce ( by truncation / padding )
that each sequence send the same number of elements to each expert .
It would make more sense to enforce this equality over the entire batch ,
but due to our hacked - up gather - by - matmul implementation , we need to divide
the batch into " groups " . For each group , the same number of elements
are sent to each expert .
TODO ( noam ) : Factor this code better . We want to be able to substitute
different code for the experts themselves .
Args :
inputs : a mtf . Tensor with shape [ < batch _ dims . . . > , length _ dim , input _ dim ]
output _ dim : a mtf . Dimension ( for Transformer , this is input _ dim )
hparams : model hyperparameters
train : a boolean
master _ dtype : a tf . dtype
slice _ dtype : a tf . dtype
Returns :
outputs : a Tensor with shape [ < batch _ dims . . . > , length _ dim , output _ dim ]
loss : a mtf scalar
Raises :
ValueError : on unrecognized hparams . moe _ gating"""
|
orig_inputs = inputs
input_dim = inputs . shape . dims [ - 1 ]
hidden_dim = mtf . Dimension ( "expert_hidden" , hparams . moe_hidden_size )
experts_dim = mtf . Dimension ( "experts" , hparams . moe_num_experts )
group_size_dim = mtf . Dimension ( "group" , hparams . moe_group_size )
batch_dim = mtf . Dimension ( orig_inputs . shape [ 0 ] . name , orig_inputs . shape . size // ( group_size_dim . size * input_dim . size ) )
inputs = mtf . reshape ( inputs , [ batch_dim , group_size_dim , input_dim ] )
# Each sequence sends expert _ capacity positions to each expert .
capacity_factor = ( hparams . moe_capacity_factor_train if train else hparams . moe_capacity_factor_eval )
expert_capacity = min ( group_size_dim . size , int ( ( group_size_dim . size * capacity_factor ) / experts_dim . size ) )
expert_capacity_dim = mtf . Dimension ( "expert_capacity" , expert_capacity )
experts_dim_unsplit = mtf . Dimension ( "expert_unsplit" , experts_dim . size )
batch_dim_unsplit = mtf . Dimension ( "batch_unsplit" , batch_dim . size )
if hparams . moe_gating == "top_2" :
dispatch_tensor , combine_tensor , loss = _top_2_gating ( inputs = inputs , outer_expert_dims = None , experts_dim = experts_dim_unsplit , expert_capacity_dim = expert_capacity_dim , hparams = hparams , train = train )
else :
raise ValueError ( "unknown hparams.moe_gating=%s" % hparams . moe_gating )
# put num _ experts dimension first to make split easier in alltoall
expert_inputs = mtf . einsum ( [ inputs , dispatch_tensor ] , mtf . Shape ( [ experts_dim_unsplit , batch_dim , expert_capacity_dim , input_dim ] ) )
expert_inputs = mtf . reshape ( expert_inputs , mtf . Shape ( [ experts_dim , batch_dim_unsplit , expert_capacity_dim , input_dim ] ) )
# Now feed the expert inputs through the experts .
h = mtf . layers . dense ( expert_inputs , hidden_dim , expert_dims = [ experts_dim ] , activation = mtf . relu , use_bias = False , master_dtype = master_dtype , slice_dtype = slice_dtype , name = "x0" )
expert_output = mtf . layers . dense ( h , output_dim , expert_dims = [ experts_dim ] , use_bias = False , master_dtype = master_dtype , slice_dtype = slice_dtype , name = "x1" )
expert_output = mtf . reshape ( expert_output , mtf . Shape ( [ experts_dim_unsplit , batch_dim , expert_capacity_dim , input_dim ] ) )
output = mtf . einsum ( [ expert_output , combine_tensor ] , mtf . Shape ( [ batch_dim , group_size_dim , output_dim ] ) )
output = mtf . reshape ( output , orig_inputs . shape . dims [ : - 1 ] + [ output_dim ] )
return output , loss * hparams . moe_loss_coef
|
def add_nodes ( self , nodes , nesting = 1 ) :
"""Adds edges indicating the call - tree for the procedures listed in
the nodes ."""
|
hopNodes = set ( )
# nodes in this hop
hopEdges = [ ]
# edges in this hop
# get nodes and edges for this hop
for i , n in zip ( range ( len ( nodes ) ) , nodes ) :
r , g , b = rainbowcolour ( i , len ( nodes ) )
colour = '#%02X%02X%02X' % ( r , g , b )
for p in n . calls :
if p not in hopNodes :
hopNodes . add ( p )
hopEdges . append ( ( n , p , 'solid' , colour ) )
for p in getattr ( n , 'interfaces' , [ ] ) :
if p not in hopNodes :
hopNodes . add ( p )
hopEdges . append ( ( n , p , 'dashed' , colour ) )
# add nodes , edges and attributes to the graph if maximum number of
# nodes is not exceeded
if self . add_to_graph ( hopNodes , hopEdges , nesting ) :
self . dot . attr ( 'graph' , size = '11.875,1000.0' )
self . dot . attr ( 'graph' , concentrate = 'false' )
|
def validate ( self , instance , value ) :
"""Check the class of the container and validate each element
This returns a copy of the container to prevent unwanted sharing of
pointers ."""
|
if not self . coerce and not isinstance ( value , self . _class_container ) :
self . error ( instance , value )
if self . coerce and not isinstance ( value , CONTAINERS ) :
value = [ value ]
if not isinstance ( value , self . _class_container ) :
out_class = self . _class_container
else :
out_class = value . __class__
out = [ ]
for val in value :
try :
out += [ self . prop . validate ( instance , val ) ]
except ValueError :
self . error ( instance , val , extra = 'This item is invalid.' )
return out_class ( out )
|
def LightcurveHDU ( model ) :
'''Construct the data HDU file containing the arrays and the observing info .'''
|
# Get mission cards
cards = model . _mission . HDUCards ( model . meta , hdu = 1 )
# Add EVEREST info
cards . append ( ( 'COMMENT' , '************************' ) )
cards . append ( ( 'COMMENT' , '* EVEREST INFO *' ) )
cards . append ( ( 'COMMENT' , '************************' ) )
cards . append ( ( 'MISSION' , model . mission , 'Mission name' ) )
cards . append ( ( 'VERSION' , EVEREST_MAJOR_MINOR , 'EVEREST pipeline version' ) )
cards . append ( ( 'SUBVER' , EVEREST_VERSION , 'EVEREST pipeline subversion' ) )
cards . append ( ( 'DATE' , strftime ( '%Y-%m-%d' ) , 'EVEREST file creation date (YYYY-MM-DD)' ) )
cards . append ( ( 'MODEL' , model . name , 'Name of EVEREST model used' ) )
cards . append ( ( 'APNAME' , model . aperture_name , 'Name of aperture used' ) )
cards . append ( ( 'BPAD' , model . bpad , 'Chunk overlap in cadences' ) )
for c in range ( len ( model . breakpoints ) ) :
cards . append ( ( 'BRKPT%02d' % ( c + 1 ) , model . breakpoints [ c ] , 'Light curve breakpoint' ) )
cards . append ( ( 'CBVNUM' , model . cbv_num , 'Number of CBV signals to recover' ) )
cards . append ( ( 'CBVNITER' , model . cbv_niter , 'Number of CBV SysRem iterations' ) )
cards . append ( ( 'CBVWIN' , model . cbv_win , 'Window size for smoothing CBVs' ) )
cards . append ( ( 'CBVORD' , model . cbv_order , 'Order when smoothing CBVs' ) )
cards . append ( ( 'CDIVS' , model . cdivs , 'Cross-validation subdivisions' ) )
cards . append ( ( 'CDPP' , model . cdpp , 'Average de-trended CDPP' ) )
cards . append ( ( 'CDPPR' , model . cdppr , 'Raw CDPP' ) )
cards . append ( ( 'CDPPV' , model . cdppv , 'Average validation CDPP' ) )
cards . append ( ( 'CDPPG' , model . cdppg , 'Average GP-de-trended CDPP' ) )
for i in range ( 99 ) :
try :
cards . append ( ( 'CDPP%02d' % ( i + 1 ) , model . cdpp_arr [ i ] if not np . isnan ( model . cdpp_arr [ i ] ) else 0 , 'Chunk de-trended CDPP' ) )
cards . append ( ( 'CDPPR%02d' % ( i + 1 ) , model . cdppr_arr [ i ] if not np . isnan ( model . cdppr_arr [ i ] ) else 0 , 'Chunk raw CDPP' ) )
cards . append ( ( 'CDPPV%02d' % ( i + 1 ) , model . cdppv_arr [ i ] if not np . isnan ( model . cdppv_arr [ i ] ) else 0 , 'Chunk validation CDPP' ) )
except :
break
cards . append ( ( 'CVMIN' , model . cv_min , 'Cross-validation objective function' ) )
cards . append ( ( 'GITER' , model . giter , 'Number of GP optimiziation iterations' ) )
cards . append ( ( 'GMAXF' , model . giter , 'Max number of GP function evaluations' ) )
cards . append ( ( 'GPFACTOR' , model . gp_factor , 'GP amplitude initialization factor' ) )
cards . append ( ( 'KERNEL' , model . kernel , 'GP kernel name' ) )
if model . kernel == 'Basic' :
cards . append ( ( 'GPWHITE' , model . kernel_params [ 0 ] , 'GP white noise amplitude (e-/s)' ) )
cards . append ( ( 'GPRED' , model . kernel_params [ 1 ] , 'GP red noise amplitude (e-/s)' ) )
cards . append ( ( 'GPTAU' , model . kernel_params [ 2 ] , 'GP red noise timescale (days)' ) )
elif model . kernel == 'QuasiPeriodic' :
cards . append ( ( 'GPWHITE' , model . kernel_params [ 0 ] , 'GP white noise amplitude (e-/s)' ) )
cards . append ( ( 'GPRED' , model . kernel_params [ 1 ] , 'GP red noise amplitude (e-/s)' ) )
cards . append ( ( 'GPGAMMA' , model . kernel_params [ 2 ] , 'GP scale factor' ) )
cards . append ( ( 'GPPER' , model . kernel_params [ 3 ] , 'GP period (days)' ) )
for c in range ( len ( model . breakpoints ) ) :
for o in range ( model . pld_order ) :
cards . append ( ( 'LAMB%02d%02d' % ( c + 1 , o + 1 ) , model . lam [ c ] [ o ] , 'Cross-validation parameter' ) )
if model . name == 'iPLD' :
cards . append ( ( 'RECL%02d%02d' % ( c + 1 , o + 1 ) , model . reclam [ c ] [ o ] , 'Cross-validation parameter' ) )
cards . append ( ( 'LEPS' , model . leps , 'Cross-validation tolerance' ) )
cards . append ( ( 'MAXPIX' , model . max_pixels , 'Maximum size of TPF aperture' ) )
for i , source in enumerate ( model . nearby [ : 99 ] ) :
cards . append ( ( 'NRBY%02dID' % ( i + 1 ) , source [ 'ID' ] , 'Nearby source ID' ) )
cards . append ( ( 'NRBY%02dX' % ( i + 1 ) , source [ 'x' ] , 'Nearby source X position' ) )
cards . append ( ( 'NRBY%02dY' % ( i + 1 ) , source [ 'y' ] , 'Nearby source Y position' ) )
cards . append ( ( 'NRBY%02dM' % ( i + 1 ) , source [ 'mag' ] , 'Nearby source magnitude' ) )
cards . append ( ( 'NRBY%02dX0' % ( i + 1 ) , source [ 'x0' ] , 'Nearby source reference X' ) )
cards . append ( ( 'NRBY%02dY0' % ( i + 1 ) , source [ 'y0' ] , 'Nearby source reference Y' ) )
for i , n in enumerate ( model . neighbors ) :
cards . append ( ( 'NEIGH%02d' % i , model . neighbors [ i ] , 'Neighboring star used to de-trend' ) )
cards . append ( ( 'OITER' , model . oiter , 'Number of outlier search iterations' ) )
cards . append ( ( 'OPTGP' , model . optimize_gp , 'GP optimization performed?' ) )
cards . append ( ( 'OSIGMA' , model . osigma , 'Outlier tolerance (standard deviations)' ) )
for i , planet in enumerate ( model . planets ) :
cards . append ( ( 'P%02dT0' % ( i + 1 ) , planet [ 0 ] , 'Planet transit time (days)' ) )
cards . append ( ( 'P%02dPER' % ( i + 1 ) , planet [ 1 ] , 'Planet transit period (days)' ) )
cards . append ( ( 'P%02dDUR' % ( i + 1 ) , planet [ 2 ] , 'Planet transit duration (days)' ) )
cards . append ( ( 'PLDORDER' , model . pld_order , 'PLD de-trending order' ) )
cards . append ( ( 'SATUR' , model . saturated , 'Is target saturated?' ) )
cards . append ( ( 'SATTOL' , model . saturation_tolerance , 'Fractional saturation tolerance' ) )
# Add the EVEREST quality flags to the QUALITY array
quality = np . array ( model . quality )
quality [ np . array ( model . badmask , dtype = int ) ] += 2 ** ( QUALITY_BAD - 1 )
quality [ np . array ( model . nanmask , dtype = int ) ] += 2 ** ( QUALITY_NAN - 1 )
quality [ np . array ( model . outmask , dtype = int ) ] += 2 ** ( QUALITY_OUT - 1 )
quality [ np . array ( model . recmask , dtype = int ) ] += 2 ** ( QUALITY_REC - 1 )
quality [ np . array ( model . transitmask , dtype = int ) ] += 2 ** ( QUALITY_TRN - 1 )
# When de - trending , we interpolated to fill in NaN fluxes . Here
# we insert the NaNs back in , since there ' s no actual physical
# information at those cadences .
flux = np . array ( model . flux )
flux [ model . nanmask ] = np . nan
# Create the arrays list
arrays = [ pyfits . Column ( name = 'CADN' , format = 'D' , array = model . cadn ) , pyfits . Column ( name = 'FLUX' , format = 'D' , array = flux , unit = 'e-/s' ) , pyfits . Column ( name = 'FRAW' , format = 'D' , array = model . fraw , unit = 'e-/s' ) , pyfits . Column ( name = 'FRAW_ERR' , format = 'D' , array = model . fraw_err , unit = 'e-/s' ) , pyfits . Column ( name = 'QUALITY' , format = 'J' , array = quality ) , pyfits . Column ( name = 'TIME' , format = 'D' , array = model . time , unit = 'BJD - 2454833' ) ]
# Add the CBVs
if model . fcor is not None :
arrays += [ pyfits . Column ( name = 'FCOR' , format = 'D' , array = model . fcor , unit = 'e-/s' ) ]
for n in range ( model . XCBV . shape [ 1 ] ) :
arrays += [ pyfits . Column ( name = 'CBV%02d' % ( n + 1 ) , format = 'D' , array = model . XCBV [ : , n ] ) ]
# Did we subtract a background term ?
if hasattr ( model . bkg , '__len__' ) :
arrays . append ( pyfits . Column ( name = 'BKG' , format = 'D' , array = model . bkg , unit = 'e-/s' ) )
# Create the HDU
header = pyfits . Header ( cards = cards )
cols = pyfits . ColDefs ( arrays )
hdu = pyfits . BinTableHDU . from_columns ( cols , header = header , name = 'ARRAYS' )
return hdu
|
def default ( self , request , tag ) :
"""Render the initial value of the wrapped L { Parameter } instance ."""
|
if self . parameter . default is not None :
tag [ self . parameter . default ]
return tag
|
def _new_empty_handle ( ) :
"""Returns a new empty handle .
Empty handle can be used to hold a result .
Returns
handle
A new empty ` NDArray ` handle ."""
|
hdl = NDArrayHandle ( )
check_call ( _LIB . MXNDArrayCreateNone ( ctypes . byref ( hdl ) ) )
return hdl
|
def _spin_up ( self , images , duration ) :
"""Simulate the motors getting warmed up ."""
|
total = 0
# pylint : disable = no - member
for image in images :
self . microbit . display . show ( image )
time . sleep ( 0.05 )
total += 0.05
if total >= duration :
return
remaining = duration - total
self . _full_speed_rumble ( images [ - 2 : ] , remaining )
self . set_display ( )
|
def config_args ( self ) :
"""Set config options"""
|
# Module list options :
self . arg_parser . add_argument ( '--version' , action = 'version' , version = '%(prog)s ' + str ( __version__ ) )
self . arg_parser . add_argument ( '--verbose' , action = 'store_true' , dest = 'verbosemode' , help = _ ( 'set verbose terminal output' ) )
self . arg_parser . add_argument ( '-s' , action = 'store_true' , dest = 'silentmode' , help = _ ( 'silence terminal output' ) )
self . arg_parser . add_argument ( '--list-parsers' , action = 'store_true' , dest = 'list_parsers' , help = _ ( 'return a list of available parsers' ) )
self . arg_parser . add_argument ( '-p' , action = 'store' , dest = 'parser' , default = 'syslog' , help = _ ( 'select a parser (default: syslog)' ) )
self . arg_parser . add_argument ( '-z' , '--unzip' , action = 'store_true' , dest = 'unzip' , help = _ ( 'include files compressed with gzip' ) )
self . arg_parser . add_argument ( '-t' , action = 'store' , dest = 'tzone' , help = _ ( 'specify timezone offset to UTC (e.g. \'+0500\')' ) )
self . arg_parser . add_argument ( 'files' , # nargs needs to be * not + so - - list - filters / etc
# will work without file arg
metavar = 'file' , nargs = '*' , help = _ ( 'specify input files' ) )
# self . arg _ parser . add _ argument _ group ( self . parse _ args )
self . arg_parser . add_argument_group ( self . filter_args )
self . arg_parser . add_argument_group ( self . output_args )
self . args = self . arg_parser . parse_args ( )
|
def get_subscription_from_cli ( name = None ) :
'''Get the default , or named , subscription id from CLI ' s local cache .
Args :
name ( str ) : Optional subscription name . If this is set , the subscription id of the named
subscription is returned from the CLI cache if present . If not set , the subscription id
of the default subscription is returned .
Returns :
Azure subscription ID string .
Requirements :
User has run ' az login ' once , or is in Azure Cloud Shell .'''
|
home = os . path . expanduser ( '~' )
azure_profile_path = home + os . sep + '.azure' + os . sep + 'azureProfile.json'
if os . path . isfile ( azure_profile_path ) is False :
print ( 'Error from get_subscription_from_cli(): Cannot find ' + azure_profile_path )
return None
with io . open ( azure_profile_path , 'r' , encoding = 'utf-8-sig' ) as azure_profile_fd :
azure_profile = json . load ( azure_profile_fd )
for subscription_info in azure_profile [ 'subscriptions' ] :
if ( name is None and subscription_info [ 'isDefault' ] is True ) or subscription_info [ 'name' ] == name :
return subscription_info [ 'id' ]
return None
|
def setPosition ( self , poiID , x , y ) :
"""setPosition ( string , ( double , double ) ) - > None
Sets the position coordinates of the poi ."""
|
self . _connection . _beginMessage ( tc . CMD_SET_POI_VARIABLE , tc . VAR_POSITION , poiID , 1 + 8 + 8 )
self . _connection . _string += struct . pack ( "!Bdd" , tc . POSITION_2D , x , y )
self . _connection . _sendExact ( )
|
def zipWithUniqueId ( self ) :
"""Zips this RDD with generated unique Long ids .
Items in the kth partition will get ids k , n + k , 2 * n + k , . . . , where
n is the number of partitions . So there may exist gaps , but this
method won ' t trigger a spark job , which is different from
L { zipWithIndex }
> > > sc . parallelize ( [ " a " , " b " , " c " , " d " , " e " ] , 3 ) . zipWithUniqueId ( ) . collect ( )
[ ( ' a ' , 0 ) , ( ' b ' , 1 ) , ( ' c ' , 4 ) , ( ' d ' , 2 ) , ( ' e ' , 5 ) ]"""
|
n = self . getNumPartitions ( )
def func ( k , it ) :
for i , v in enumerate ( it ) :
yield v , i * n + k
return self . mapPartitionsWithIndex ( func )
|
def rsdl ( self ) :
"""Compute fixed point residual ."""
|
return np . linalg . norm ( ( self . X - self . Yprv ) . ravel ( ) )
|
def TokenClient ( domain , token , user_agent = None , request_encoder = default_request_encoder , response_decoder = default_response_decoder , ) :
"""Creates a Freshbooks client for a freshbooks domain , using
token - based auth .
The optional request _ encoder and response _ decoder parameters can be
passed the logging _ request _ encoder and logging _ response _ decoder objects
from this module , or custom encoders , to aid debugging or change the
behaviour of refreshbooks ' request - to - XML - to - response mapping .
The optional user _ agent keyword parameter can be used to specify the
user agent string passed to FreshBooks . If unset , a default user agent
string is used ."""
|
return AuthorizingClient ( domain , transport . TokenAuthorization ( token ) , request_encoder , response_decoder , user_agent = user_agent )
|
def init ( req , model ) : # pylint : disable = unused - argument
"""Determine the pagination preference by query parameter
Numbers only , > = 0 , & each query param may only be
specified once .
: return : Paginator object"""
|
limit = req . get_param ( 'page[limit]' ) or goldman . config . PAGE_LIMIT
offset = req . get_param ( 'page[offset]' ) or 0
try :
return Paginator ( limit , offset )
except ValueError :
raise InvalidQueryParams ( ** { 'detail' : 'The page[\'limit\'] & page[\'offset\'] query ' 'params may only be specified once each & must ' 'both be an integer >= 0.' , 'links' : 'jsonapi.org/format/#fetching-pagination' , 'parameter' : 'page' , } )
|
def get_pokemon_by_name ( self , name ) :
"""Returns an array of Pokemon objects containing all the forms of the
Pokemon specified the name of the Pokemon ."""
|
endpoint = '/pokemon/' + str ( name )
return self . make_request ( self . BASE_URL + endpoint )
|
def get_upregulated_genes_network ( self ) -> Graph :
"""Get the graph of up - regulated genes .
: return Graph : Graph of up - regulated genes ."""
|
logger . info ( "In get_upregulated_genes_network()" )
deg_graph = self . graph . copy ( )
# deep copy graph
not_diff_expr = self . graph . vs ( up_regulated_eq = False )
# delete genes which are not differentially expressed or have no connections to others
deg_graph . delete_vertices ( not_diff_expr . indices )
deg_graph . delete_vertices ( deg_graph . vs . select ( _degree_eq = 0 ) )
return deg_graph
|
def watch_docs ( c ) :
"""Watch both doc trees & rebuild them if files change .
This includes e . g . rebuilding the API docs if the source code changes ;
rebuilding the WWW docs if the README changes ; etc .
Reuses the configuration values ` ` packaging . package ` ` or ` ` tests . package ` `
( the former winning over the latter if both defined ) when determining which
source directory to scan for API doc updates ."""
|
# TODO : break back down into generic single - site version , then create split
# tasks as with docs / www above . Probably wants invoke # 63.
# NOTE : ' www ' / ' docs ' refer to the module level sub - collections . meh .
# Readme & WWW triggers WWW
www_c = Context ( config = c . config . clone ( ) )
www_c . update ( ** www . configuration ( ) )
www_handler = make_handler ( ctx = www_c , task_ = www [ "build" ] , regexes = [ r"\./README.rst" , r"\./sites/www" ] , ignore_regexes = [ r".*/\..*\.swp" , r"\./sites/www/_build" ] , )
# Code and docs trigger API
docs_c = Context ( config = c . config . clone ( ) )
docs_c . update ( ** docs . configuration ( ) )
regexes = [ r"\./sites/docs" ]
package = c . get ( "packaging" , { } ) . get ( "package" , None )
if package is None :
package = c . get ( "tests" , { } ) . get ( "package" , None )
if package :
regexes . append ( r"\./{}/" . format ( package ) )
api_handler = make_handler ( ctx = docs_c , task_ = docs [ "build" ] , regexes = regexes , ignore_regexes = [ r".*/\..*\.swp" , r"\./sites/docs/_build" ] , )
observe ( www_handler , api_handler )
|
def dicom_read ( directory , pixeltype = 'float' ) :
"""Read a set of dicom files in a directory into a single ANTsImage .
The origin of the resulting 3D image will be the origin of the
first dicom image read .
Arguments
directory : string
folder in which all the dicom images exist
Returns
ANTsImage
Example
> > > import ants
> > > img = ants . dicom _ read ( ' ~ / desktop / dicom - subject / ' )"""
|
slices = [ ]
imgidx = 0
for imgpath in os . listdir ( directory ) :
if imgpath . endswith ( '.dcm' ) :
if imgidx == 0 :
tmp = image_read ( os . path . join ( directory , imgpath ) , dimension = 3 , pixeltype = pixeltype )
origin = tmp . origin
spacing = tmp . spacing
direction = tmp . direction
tmp = tmp . numpy ( ) [ : , : , 0 ]
else :
tmp = image_read ( os . path . join ( directory , imgpath ) , dimension = 2 , pixeltype = pixeltype ) . numpy ( )
slices . append ( tmp )
imgidx += 1
slices = np . stack ( slices , axis = - 1 )
return from_numpy ( slices , origin = origin , spacing = spacing , direction = direction )
|
def beta_geometric_nbd_model_transactional_data ( T , r , alpha , a , b , observation_period_end = "2019-1-1" , freq = "D" , size = 1 ) :
"""Generate artificial transactional data according to the BG / NBD model .
See [ 1 ] for model details
Parameters
T : int , float or array _ like
The length of time observing new customers .
r , alpha , a , b : float
Parameters in the model . See [ 1 ] _
observation _ period _ end : date _ like
The date observation ends
freq : string , optional
Default ' D ' for days , ' W ' for weeks , ' h ' for hours
size : int , optional
The number of customers to generate
Returns
DataFrame
The following columns :
' customer _ id ' , ' date '
References
. . [ 1 ] : ' " Counting Your Customers " the Easy Way : An Alternative to the Pareto / NBD Model '
( http : / / brucehardie . com / papers / bgnbd _ 2004-04-20 . pdf )"""
|
observation_period_end = pd . to_datetime ( observation_period_end )
if type ( T ) in [ float , int ] :
start_date = [ observation_period_end - pd . Timedelta ( T - 1 , unit = freq ) ] * size
T = T * np . ones ( size )
else :
start_date = [ observation_period_end - pd . Timedelta ( T [ i ] - 1 , unit = freq ) for i in range ( size ) ]
T = np . asarray ( T )
probability_of_post_purchase_death = random . beta ( a , b , size = size )
lambda_ = random . gamma ( r , scale = 1.0 / alpha , size = size )
columns = [ "customer_id" , "date" ]
df = pd . DataFrame ( columns = columns )
for i in range ( size ) :
s = start_date [ i ]
p = probability_of_post_purchase_death [ i ]
l = lambda_ [ i ]
age = T [ i ]
purchases = [ [ i , s - pd . Timedelta ( 1 , unit = freq ) ] ]
next_purchase_in = random . exponential ( scale = 1.0 / l )
alive = True
while next_purchase_in < age and alive :
purchases . append ( [ i , s + pd . Timedelta ( next_purchase_in , unit = freq ) ] )
next_purchase_in += random . exponential ( scale = 1.0 / l )
alive = random . random ( ) > p
df = df . append ( pd . DataFrame ( purchases , columns = columns ) )
return df . reset_index ( drop = True )
|
def check_presence_of_mandatory_args ( args , mandatory_args ) :
'''Checks whether all mandatory arguments are passed .
This function aims at methods with many arguments
which are passed as kwargs so that the order
in which the are passed does not matter .
: args : The dictionary passed as args .
: mandatory _ args : A list of keys that have to be
present in the dictionary .
: raise : : exc : ` ~ ValueError `
: returns : True , if all mandatory args are passed . If not ,
an exception is raised .'''
|
missing_args = [ ]
for name in mandatory_args :
if name not in args . keys ( ) :
missing_args . append ( name )
if len ( missing_args ) > 0 :
raise ValueError ( 'Missing mandatory arguments: ' + ', ' . join ( missing_args ) )
else :
return True
|
def _addsub_int_array ( self , other , op ) :
"""Add or subtract array - like of integers equivalent to applying
` _ time _ shift ` pointwise .
Parameters
other : Index , ExtensionArray , np . ndarray
integer - dtype
op : { operator . add , operator . sub }
Returns
result : same class as self"""
|
# _ addsub _ int _ array is overriden by PeriodArray
assert not is_period_dtype ( self )
assert op in [ operator . add , operator . sub ]
if self . freq is None : # GH # 19123
raise NullFrequencyError ( "Cannot shift with no freq" )
elif isinstance ( self . freq , Tick ) : # easy case where we can convert to timedelta64 operation
td = Timedelta ( self . freq )
return op ( self , td * other )
# We should only get here with DatetimeIndex ; dispatch
# to _ addsub _ offset _ array
assert not is_timedelta64_dtype ( self )
return op ( self , np . array ( other ) * self . freq )
|
def angular_separation ( lonp1 , latp1 , lonp2 , latp2 ) :
"""Compute the angles between lon / lat points p1 and p2 given in radians .
On the unit sphere , this also corresponds to the great circle distance .
p1 and p2 can be numpy arrays of the same length ."""
|
xp1 , yp1 , zp1 = lonlat2xyz ( lonp1 , latp1 )
xp2 , yp2 , zp2 = lonlat2xyz ( lonp2 , latp2 )
# # dot products to obtain angles
angles = np . arccos ( ( xp1 * xp2 + yp1 * yp2 + zp1 * zp2 ) )
# # As this is a unit sphere , angle = length
return angles
|
def undo ( self , x_prec ) :
"""Transform the unknowns to original coordinates
This method also transforms the gradient to preconditioned coordinates"""
|
if self . scales is None :
return x_prec
else :
return np . dot ( self . rotation , x_prec / self . scales )
|
def _get_optional_attrs ( kws ) :
"""Given keyword args , return optional _ attributes to be loaded into the GODag ."""
|
vals = OboOptionalAttrs . attributes . intersection ( kws . keys ( ) )
if 'sections' in kws :
vals . add ( 'relationship' )
if 'norel' in kws :
vals . discard ( 'relationship' )
return vals
|
def bads_report ( bads , path_prefix = None ) :
"""Return a nice report of bad architectures in ` bads `
Parameters
bads : set
set of length 2 or 3 tuples . A length 2 tuple is of form
` ` ( depending _ lib , missing _ archs ) ` ` meaning that an arch in
` require _ archs ` was missing from ` ` depending _ lib ` ` . A length 3 tuple
is of form ` ` ( depended _ lib , depending _ lib , missing _ archs ) ` ` where
` ` depended _ lib ` ` is the filename of the library depended on ,
` ` depending _ lib ` ` is the library depending on ` ` depending _ lib ` ` and
` ` missing _ archs ` ` is a set of missing architecture strings giving
architectures present in ` ` depending _ lib ` ` and missing in
` ` depended _ lib ` ` . An empty set means all architectures were present as
required .
path _ prefix : None or str , optional
Path prefix to strip from ` ` depended _ lib ` ` and ` ` depending _ lib ` ` . None
means do not strip anything .
Returns
report : str
A nice report for printing"""
|
path_processor = ( ( lambda x : x ) if path_prefix is None else get_rp_stripper ( path_prefix ) )
reports = [ ]
for result in bads :
if len ( result ) == 3 :
depended_lib , depending_lib , missing_archs = result
reports . append ( "{0} needs {1} {2} missing from {3}" . format ( path_processor ( depending_lib ) , 'archs' if len ( missing_archs ) > 1 else 'arch' , ', ' . join ( sorted ( missing_archs ) ) , path_processor ( depended_lib ) ) )
elif len ( result ) == 2 :
depending_lib , missing_archs = result
reports . append ( "Required {0} {1} missing from {2}" . format ( 'archs' if len ( missing_archs ) > 1 else 'arch' , ', ' . join ( sorted ( missing_archs ) ) , path_processor ( depending_lib ) ) )
else :
raise ValueError ( 'Report tuple should be length 2 or 3' )
return '\n' . join ( sorted ( reports ) )
|
def inlink_file ( self , filepath ) :
"""Create a symbolic link to the specified file in the
directory containing the input files of the task ."""
|
if not os . path . exists ( filepath ) :
logger . debug ( "Creating symbolic link to not existent file %s" % filepath )
# Extract the Abinit extension and add the prefix for input files .
root , abiext = abi_splitext ( filepath )
infile = "in_" + abiext
infile = self . indir . path_in ( infile )
# Link path to dest if dest link does not exist .
# else check that it points to the expected file .
self . history . info ( "Linking path %s --> %s" % ( filepath , infile ) )
if not os . path . exists ( infile ) :
os . symlink ( filepath , infile )
else :
if os . path . realpath ( infile ) != filepath :
raise self . Error ( "infile %s does not point to filepath %s" % ( infile , filepath ) )
|
def swap_axis_to_0 ( x , axis ) :
"""Insert a new singleton axis at position 0 and swap it with the
specified axis . The resulting array has an additional dimension ,
with ` ` axis ` ` + 1 ( which was ` ` axis ` ` before the insertion of the
new axis ) of ` ` x ` ` at position 0 , and a singleton axis at position
` ` axis ` ` + 1.
Parameters
x : ndarray
Input array
axis : int
Index of axis in ` ` x ` ` to swap to axis index 0.
Returns
arr : ndarray
Output array"""
|
return np . ascontiguousarray ( np . swapaxes ( x [ np . newaxis , ... ] , 0 , axis + 1 ) )
|
def print_in_box ( text ) :
"""Prints ` text ` surrounded by a box made of * s"""
|
print ( '' )
print ( '*' * ( len ( text ) + 6 ) )
print ( '** ' + text + ' **' )
print ( '*' * ( len ( text ) + 6 ) )
print ( '' )
|
def error ( self , msg ) :
'''error ( msg : string )
Print a usage message incorporating ' msg ' to stderr and exit .
This keeps option parsing exit status uniform for all parsing errors .'''
|
self . print_usage ( sys . stderr )
self . exit ( salt . defaults . exitcodes . EX_USAGE , '{0}: error: {1}\n' . format ( self . get_prog_name ( ) , msg ) )
|
def _merge_flags ( new_flags , old_flags = None , conf = 'any' ) :
'''Merges multiple lists of flags removing duplicates and resolving conflicts
giving priority to lasts lists .'''
|
if not old_flags :
old_flags = [ ]
args = [ old_flags , new_flags ]
if conf == 'accept_keywords' :
tmp = new_flags + [ i for i in old_flags if _check_accept_keywords ( new_flags , i ) ]
else :
tmp = portage . flatten ( args )
flags = { }
for flag in tmp :
if flag [ 0 ] == '-' :
flags [ flag [ 1 : ] ] = False
else :
flags [ flag ] = True
tmp = [ ]
for key , val in six . iteritems ( flags ) :
if val :
tmp . append ( key )
else :
tmp . append ( '-' + key )
# Next sort is just aesthetic , can be commented for a small performance
# boost
tmp . sort ( key = lambda x : x . lstrip ( '-' ) )
return tmp
|
def _register_external_service ( self , plugin_name , plugin_instance ) :
"""Register an external service .
: param plugin _ name : Service name
: param plugin _ instance : PluginBase
: return :"""
|
for attr in plugin_instance . get_external_services ( ) . keys ( ) :
if attr in self . _external_services :
raise PluginException ( "External service with name {} already exists! Unable to add " "services from plugin {}." . format ( attr , plugin_name ) )
self . _external_services [ attr ] = plugin_instance . get_external_services ( ) . get ( attr )
|
def load_from_s3 ( self , bucket , prefix = None ) :
"""Load messages previously saved to S3."""
|
n = 0
if prefix :
prefix = '%s/' % prefix
else :
prefix = '%s/' % self . id [ 1 : ]
rs = bucket . list ( prefix = prefix )
for key in rs :
n += 1
m = self . new_message ( key . get_contents_as_string ( ) )
self . write ( m )
return n
|
def moveOrder ( self , orderNumber , rate , amount = None , postOnly = None , immediateOrCancel = None ) :
"""Cancels an order and places a new one of the same type in a single
atomic transaction , meaning either both operations will succeed or both
will fail . Required POST parameters are " orderNumber " and " rate " ; you
may optionally specify " amount " if you wish to change the amount of
the new order . " postOnly " or " immediateOrCancel " may be specified for
exchange orders , but will have no effect on margin orders ."""
|
return self . _private ( 'moveOrder' , orderNumber = orderNumber , rate = rate , amount = amount , postOnly = postOnly , immediateOrCancel = immediateOrCancel )
|
def shift_down_left ( self , times = 1 ) :
"""Finds Location shifted down left by 1
: rtype : Location"""
|
try :
return Location ( self . _rank - times , self . _file - times )
except IndexError as e :
raise IndexError ( e )
|
def _lookup_parent ( self , cls ) :
"""Lookup a transitive parent object that is an instance
of a given class ."""
|
codeobj = self . parent
while codeobj is not None and not isinstance ( codeobj , cls ) :
codeobj = codeobj . parent
return codeobj
|
def list_prediction ( self , species , to_this_composition = True ) :
"""Args :
species :
list of species
to _ this _ composition :
If true , substitutions with this as a final composition
will be found . If false , substitutions with this as a
starting composition will be found ( these are slightly
different )
Returns :
List of predictions in the form of dictionaries .
If to _ this _ composition is true , the values of the dictionary
will be from the list species . If false , the keys will be
from that list ."""
|
for sp in species :
if get_el_sp ( sp ) not in self . p . species :
raise ValueError ( "the species {} is not allowed for the" "probability model you are using" . format ( sp ) )
max_probabilities = [ ]
for s1 in species :
if to_this_composition :
max_p = max ( [ self . p . cond_prob ( s2 , s1 ) for s2 in self . p . species ] )
else :
max_p = max ( [ self . p . cond_prob ( s1 , s2 ) for s2 in self . p . species ] )
max_probabilities . append ( max_p )
output = [ ]
def _recurse ( output_prob , output_species ) :
best_case_prob = list ( max_probabilities )
best_case_prob [ : len ( output_prob ) ] = output_prob
if functools . reduce ( mul , best_case_prob ) > self . threshold :
if len ( output_species ) == len ( species ) :
odict = { 'probability' : functools . reduce ( mul , best_case_prob ) }
if to_this_composition :
odict [ 'substitutions' ] = dict ( zip ( output_species , species ) )
else :
odict [ 'substitutions' ] = dict ( zip ( species , output_species ) )
if len ( output_species ) == len ( set ( output_species ) ) :
output . append ( odict )
return
for sp in self . p . species :
i = len ( output_prob )
if to_this_composition :
prob = self . p . cond_prob ( sp , species [ i ] )
else :
prob = self . p . cond_prob ( species [ i ] , sp )
_recurse ( output_prob + [ prob ] , output_species + [ sp ] )
_recurse ( [ ] , [ ] )
logging . info ( '{} substitutions found' . format ( len ( output ) ) )
return output
|
def inspect ( self ) :
"""Fetches image information from the client ."""
|
policy = self . policy
image_name = format_image_tag ( ( self . config_id . config_name , self . config_id . instance_name ) )
image_id = policy . images [ self . client_name ] . get ( image_name )
if image_id :
self . detail = { 'Id' : image_id }
# Currently there is no need for actually inspecting the image .
else :
self . detail = NOT_FOUND
|
def get_available_extension ( name , user = None , host = None , port = None , maintenance_db = None , password = None , runas = None ) :
'''Get info about an available postgresql extension
CLI Example :
. . code - block : : bash
salt ' * ' postgres . get _ available _ extension plpgsql'''
|
return available_extensions ( user = user , host = host , port = port , maintenance_db = maintenance_db , password = password , runas = runas ) . get ( name , None )
|
def get_submission ( submission_uuid , read_replica = False ) :
"""Retrieves a single submission by uuid .
Args :
submission _ uuid ( str ) : Identifier for the submission .
Kwargs :
read _ replica ( bool ) : If true , attempt to use the read replica database .
If no read replica is available , use the default database .
Raises :
SubmissionNotFoundError : Raised if the submission does not exist .
SubmissionRequestError : Raised if the search parameter is not a string .
SubmissionInternalError : Raised for unknown errors .
Examples :
> > > get _ submission ( " 20b78e0f32df805d21064fc912f40e9ae5ab260d " )
' student _ item ' : 2,
' attempt _ number ' : 1,
' submitted _ at ' : datetime . datetime ( 2014 , 1 , 29 , 23 , 14 , 52 , 649284 , tzinfo = < UTC > ) ,
' created _ at ' : datetime . datetime ( 2014 , 1 , 29 , 17 , 14 , 52 , 668850 , tzinfo = < UTC > ) ,
' answer ' : u ' The answer is 42 . '"""
|
if not isinstance ( submission_uuid , six . string_types ) :
if isinstance ( submission_uuid , UUID ) :
submission_uuid = six . text_type ( submission_uuid )
else :
raise SubmissionRequestError ( msg = "submission_uuid ({!r}) must be serializable" . format ( submission_uuid ) )
cache_key = Submission . get_cache_key ( submission_uuid )
try :
cached_submission_data = cache . get ( cache_key )
except Exception : # The cache backend could raise an exception
# ( for example , memcache keys that contain spaces )
logger . exception ( "Error occurred while retrieving submission from the cache" )
cached_submission_data = None
if cached_submission_data :
logger . info ( "Get submission {} (cached)" . format ( submission_uuid ) )
return cached_submission_data
try :
submission = _get_submission_model ( submission_uuid , read_replica )
submission_data = SubmissionSerializer ( submission ) . data
cache . set ( cache_key , submission_data )
except Submission . DoesNotExist :
logger . error ( "Submission {} not found." . format ( submission_uuid ) )
raise SubmissionNotFoundError ( u"No submission matching uuid {}" . format ( submission_uuid ) )
except Exception as exc : # Something very unexpected has just happened ( like DB misconfig )
err_msg = "Could not get submission due to error: {}" . format ( exc )
logger . exception ( err_msg )
raise SubmissionInternalError ( err_msg )
logger . info ( "Get submission {}" . format ( submission_uuid ) )
return submission_data
|
def flood_fill_aplx ( self , * args , ** kwargs ) :
"""Unreliably flood - fill APLX to a set of application cores .
. . note : :
Most users should use the : py : meth : ` . load _ application ` wrapper
around this method which guarantees successful loading .
This method can be called in either of the following ways : :
flood _ fill _ aplx ( " / path / to / app . aplx " , { ( x , y ) : { core , . . . } , . . . } )
flood _ fill _ aplx ( { " / path / to / app . aplx " : { ( x , y ) : { core , . . . } , . . . } ,
Note that the latter format is the same format produced by
: py : func : ` ~ rig . place _ and _ route . util . build _ application _ map ` .
. . warning : :
The loading process is likely , but not guaranteed , to succeed .
This is because the flood - fill packets used during loading are not
guaranteed to arrive . The effect is that some chips may not receive
the complete application binary and will silently ignore the
application loading request .
As a result , the user is responsible for checking that each core
was successfully loaded with the correct binary . At present , the
two recommended approaches to this are :
* If the ` ` wait ` ` argument is given then the user should check that
the correct number of application binaries reach the initial
barrier ( i . e . , the ` ` wait ` ` state ) . If the number does not match
the expected number of loaded cores the next approach must be
used :
* The user can check the process list of each chip to ensure the
application was loaded into the correct set of cores . See
: py : meth : ` . read _ vcpu _ struct _ field ` .
Parameters
app _ id : int
wait : bool ( Default : True )
Should the application await the AppSignal . start signal after it
has been loaded ?"""
|
# Coerce the arguments into a single form . If there are two arguments
# then assume that we have filename and a map of chips and cores ;
# otherwise there should be ONE argument which is of the form of the
# return value of ` build _ application _ map ` .
application_map = { }
if len ( args ) == 1 :
application_map = args [ 0 ]
elif len ( args ) == 2 :
application_map = { args [ 0 ] : args [ 1 ] }
else :
raise TypeError ( "flood_fill_aplx: accepts either 1 or 2 positional arguments: " "a map of filenames to targets OR a single filename and its" "targets" )
# Get the application ID , the context system will guarantee that this
# is available
app_id = kwargs . pop ( "app_id" )
flags = 0x0000
if kwargs . pop ( "wait" ) :
flags |= AppFlags . wait
# The forward and retry parameters
fr = NNConstants . forward << 8 | NNConstants . retry
# Load each APLX in turn
for ( aplx , targets ) in iteritems ( application_map ) : # Determine the minimum number of flood - fills that are necessary to
# load the APLX . The regions and cores should be sorted into
# ascending order , ` compress _ flood _ fill _ regions ` ensures this is
# done .
fills = regions . compress_flood_fill_regions ( targets )
# Load the APLX data
with open ( aplx , "rb" ) as f :
aplx_data = f . read ( )
n_blocks = ( ( len ( aplx_data ) + self . scp_data_length - 1 ) // self . scp_data_length )
# Start the flood fill for this application
# Get an index for the nearest neighbour operation
pid = self . _get_next_nn_id ( )
# Send the flood - fill start packet
self . _send_ffs ( pid , n_blocks , fr )
# Send the core select packets
for ( region , cores ) in fills :
self . _send_ffcs ( region , cores , fr )
# Send the data
base_address = self . read_struct_field ( "sv" , "sdram_sys" , 255 , 255 )
self . _send_ffd ( pid , aplx_data , base_address )
# Send the flood - fill END packet
self . _send_ffe ( pid , app_id , flags , fr )
|
def validate ( self , result , spec ) : # noqa Yes , it ' s too complex .
"""Validate that the result has the correct structure ."""
|
if spec is None : # None matches anything .
return
if isinstance ( spec , dict ) :
if not isinstance ( result , dict ) :
raise ValueError ( 'Dictionary expected, but %r found.' % result )
if spec :
spec_value = next ( iter ( spec . values ( ) ) )
# Yay Python 3!
for value in result . values ( ) :
self . validate ( value , spec_value )
spec_key = next ( iter ( spec . keys ( ) ) )
for key in result :
self . validate ( key , spec_key )
if isinstance ( spec , list ) :
if not isinstance ( result , list ) :
raise ValueError ( 'List expected, but %r found.' % result )
if spec :
for value in result :
self . validate ( value , spec [ 0 ] )
if isinstance ( spec , tuple ) :
if not isinstance ( result , tuple ) :
raise ValueError ( 'Tuple expected, but %r found.' % result )
if len ( result ) != len ( spec ) :
raise ValueError ( 'Expected %d elements in tuple %r.' % ( len ( spec ) , result ) )
for s , value in zip ( spec , result ) :
self . validate ( value , s )
if isinstance ( spec , six . string_types ) :
if not isinstance ( result , six . string_types ) :
raise ValueError ( 'String expected, but %r found.' % result )
if isinstance ( spec , int ) :
if not isinstance ( result , int ) :
raise ValueError ( 'Integer expected, but %r found.' % result )
if isinstance ( spec , bool ) :
if not isinstance ( result , bool ) :
raise ValueError ( 'Boolean expected, but %r found.' % result )
|
def list_annotations ( self ) -> List [ Namespace ] :
"""List all annotations ."""
|
return self . session . query ( Namespace ) . filter ( Namespace . is_annotation ) . all ( )
|
def cosinebell ( n , fraction ) :
"""Return a cosine bell spanning n pixels , masking a fraction of pixels
Parameters
n : int
Number of pixels .
fraction : float
Length fraction over which the data will be masked ."""
|
mask = np . ones ( n )
nmasked = int ( fraction * n )
for i in range ( nmasked ) :
yval = 0.5 * ( 1 - np . cos ( np . pi * float ( i ) / float ( nmasked ) ) )
mask [ i ] = yval
mask [ n - i - 1 ] = yval
return mask
|
def ustack ( arrs , axis = 0 ) :
"""Join a sequence of arrays along a new axis while preserving units
The axis parameter specifies the index of the new axis in the
dimensions of the result . For example , if ` ` axis = 0 ` ` it will be the
first dimension and if ` ` axis = - 1 ` ` it will be the last dimension .
This is a wrapper around np . stack that preserves units . See the
documentation for np . stack for full details .
Examples
> > > from unyt import km
> > > a = [ 1 , 2 , 3 ] * km
> > > b = [ 2 , 3 , 4 ] * km
> > > print ( ustack ( [ a , b ] ) )
[ [ 1 2 3]
[2 3 4 ] ] km"""
|
v = np . stack ( arrs , axis = axis )
v = _validate_numpy_wrapper_units ( v , arrs )
return v
|
def _folder_item_report_visibility ( self , analysis_brain , item ) :
"""Set if the hidden field can be edited ( enabled / disabled )
: analysis _ brain : Brain that represents an analysis
: item : analysis ' dictionary counterpart to be represented as a row"""
|
# Users that can Add Analyses to an Analysis Request must be able to
# set the visibility of the analysis in results report , also if the
# current state of the Analysis Request ( e . g . verified ) does not allow
# the edition of other fields . Note that an analyst has no privileges
# by default to edit this value , cause this " visibility " field is
# related with results reporting and / or visibility from the client
# side . This behavior only applies to routine analyses , the visibility
# of QC analyses is managed in publish and are not visible to clients .
if 'Hidden' not in self . columns :
return
full_obj = self . get_object ( analysis_brain )
item [ 'Hidden' ] = full_obj . getHidden ( )
if self . has_permission ( FieldEditAnalysisHidden , obj = full_obj ) :
item [ 'allow_edit' ] . append ( 'Hidden' )
|
def tokenizer ( self ) :
"""Datasets can provide support for segmentation ( aka tokenization ) in two ways :
- by providing an orthography profile at etc / orthography . tsv or
- by overwriting this method to return a custom tokenizer callable .
: return : A callable to do segmentation .
The expected signature of the callable is
def t ( item , string , * * kw )
where
- ` item ` is a ` dict ` representing the complete CLDF FormTable row
- ` string ` is the string to be segmented
- ` kw ` may be used to pass any context info to the tokenizer , when called
explicitly ."""
|
profile = self . dir / 'etc' / 'orthography.tsv'
if profile . exists ( ) :
profile = Profile . from_file ( str ( profile ) , form = 'NFC' )
default_spec = list ( next ( iter ( profile . graphemes . values ( ) ) ) . keys ( ) )
for grapheme in [ '^' , '$' ] :
if grapheme not in profile . graphemes :
profile . graphemes [ grapheme ] = { k : None for k in default_spec }
profile . tree = Tree ( list ( profile . graphemes . keys ( ) ) )
tokenizer = Tokenizer ( profile = profile , errors_replace = lambda c : '<{0}>' . format ( c ) )
def _tokenizer ( item , string , ** kw ) :
kw . setdefault ( "column" , "IPA" )
kw . setdefault ( "separator" , " + " )
return tokenizer ( unicodedata . normalize ( 'NFC' , '^' + string + '$' ) , ** kw ) . split ( )
return _tokenizer
|
def get_points ( self , measurement = None , tags = None ) :
"""Return a generator for all the points that match the given filters .
: param measurement : The measurement name
: type measurement : str
: param tags : Tags to look for
: type tags : dict
: return : Points generator"""
|
# Raise error if measurement is not str or bytes
if not isinstance ( measurement , ( bytes , type ( b'' . decode ( ) ) , type ( None ) ) ) :
raise TypeError ( 'measurement must be an str or None' )
for series in self . _get_series ( ) :
series_name = series . get ( 'measurement' , series . get ( 'name' , 'results' ) )
if series_name is None : # this is a " system " query or a query which
# doesn ' t return a name attribute .
# like ' show retention policies ' . .
if tags is None :
for item in self . _get_points_for_series ( series ) :
yield item
elif measurement in ( None , series_name ) : # by default if no tags was provided then
# we will matches every returned series
series_tags = series . get ( 'tags' , { } )
for item in self . _get_points_for_series ( series ) :
if tags is None or self . _tag_matches ( item , tags ) or self . _tag_matches ( series_tags , tags ) :
yield item
|
def convert_strtime_datetime ( dt_str ) :
"""Converts datetime isoformat string to datetime ( dt ) object
Args :
: dt _ str ( str ) : input string in ' 2017-12-30T18:48:00.353Z ' form
or similar
Returns :
TYPE : datetime object"""
|
dt , _ , us = dt_str . partition ( "." )
dt = datetime . datetime . strptime ( dt , "%Y-%m-%dT%H:%M:%S" )
us = int ( us . rstrip ( "Z" ) , 10 )
return dt + datetime . timedelta ( microseconds = us )
|
def CreateGroup ( r , name , alloc_policy = None , dry_run = False ) :
"""Creates a new node group .
@ type name : str
@ param name : the name of node group to create
@ type alloc _ policy : str
@ param alloc _ policy : the desired allocation policy for the group , if any
@ type dry _ run : bool
@ param dry _ run : whether to peform a dry run
@ rtype : int
@ return : job id"""
|
query = { "dry-run" : dry_run , }
body = { "name" : name , "alloc_policy" : alloc_policy }
return r . request ( "post" , "/2/groups" , query = query , content = body )
|
def sum_transactions ( transactions ) :
"""Sums transactions into a total of remaining vacation days ."""
|
workdays_per_year = 250
previous_date = None
rate = 0
day_sum = 0
for transaction in transactions :
date , action , value = _parse_transaction_entry ( transaction )
if previous_date is None :
previous_date = date
elapsed = workdays . networkdays ( previous_date , date , stat_holidays ( ) ) - 1
if action == 'rate' :
rate = float ( value ) / workdays_per_year
elif action == 'off' :
elapsed -= 1
# Didn ' t work that day
day_sum -= 1
# And we used a day
day_sum += rate * elapsed
if action == 'days' :
day_sum = value
# Fixed value as of this entry
previous_date = date
return day_sum
|
def get_column_names ( engine : Engine , tablename : str ) -> List [ str ] :
"""Get all the database column names for the specified table ."""
|
return [ info . name for info in gen_columns_info ( engine , tablename ) ]
|
def to_valid_density_matrix ( density_matrix_rep : Union [ int , np . ndarray ] , num_qubits : int , dtype : Type [ np . number ] = np . complex64 ) -> np . ndarray :
"""Verifies the density _ matrix _ rep is valid and converts it to ndarray form .
This method is used to support passing a matrix , a vector ( wave function ) ,
or a computational basis state as a representation of a state .
Args :
density _ matrix _ rep : If an numpy array , if it is of rank 2 ( a matrix ) ,
then this is the density matrix . If it is a numpy array of rank 1
( a vector ) then this is a wave function . If this is an int ,
then this is the computation basis state .
num _ qubits : The number of qubits for the density matrix . The
density _ matrix _ rep must be valid for this number of qubits .
dtype : The numpy dtype of the density matrix , will be used when creating
the state for a computational basis state ( int ) , or validated
against if density _ matrix _ rep is a numpy array .
Returns :
A numpy matrix corresponding to the density matrix on the given number
of qubits .
Raises :
ValueError if the density _ matrix _ rep is not valid ."""
|
if ( isinstance ( density_matrix_rep , np . ndarray ) and density_matrix_rep . ndim == 2 ) :
if density_matrix_rep . shape != ( 2 ** num_qubits , 2 ** num_qubits ) :
raise ValueError ( 'Density matrix was not square and of size 2 ** num_qubit, ' 'instead was {}' . format ( density_matrix_rep . shape ) )
if not np . allclose ( density_matrix_rep , np . transpose ( np . conj ( density_matrix_rep ) ) ) :
raise ValueError ( 'The density matrix is not hermitian.' )
if not np . isclose ( np . trace ( density_matrix_rep ) , 1.0 ) :
raise ValueError ( 'Density matrix did not have trace 1 but instead {}' . format ( np . trace ( density_matrix_rep ) ) )
if density_matrix_rep . dtype != dtype :
raise ValueError ( 'Density matrix had dtype {} but expected {}' . format ( density_matrix_rep . dtype , dtype ) )
if not np . all ( np . linalg . eigvalsh ( density_matrix_rep ) > - 1e-8 ) :
raise ValueError ( 'The density matrix is not positive semidefinite.' )
return density_matrix_rep
state_vector = wave_function . to_valid_state_vector ( density_matrix_rep , num_qubits , dtype )
return np . outer ( state_vector , np . conj ( state_vector ) )
|
def get_learning_objectives ( self ) :
"""This method also mirrors that in the Item ."""
|
# This is pretty much identicial to the method in assessment . Item !
mgr = self . _get_provider_manager ( 'LEARNING' )
lookup_session = mgr . get_objective_lookup_session ( proxy = getattr ( self , "_proxy" , None ) )
lookup_session . use_federated_objective_bank_view ( )
return lookup_session . get_objectives_by_ids ( self . get_learning_objective_ids ( ) )
|
def get_waveset ( model ) :
"""Get optimal wavelengths for sampling a given model .
Parameters
model : ` ~ astropy . modeling . Model `
Model .
Returns
waveset : array - like or ` None `
Optimal wavelengths . ` None ` if undefined .
Raises
synphot . exceptions . SynphotError
Invalid model ."""
|
if not isinstance ( model , Model ) :
raise SynphotError ( '{0} is not a model.' . format ( model ) )
if isinstance ( model , _CompoundModel ) :
waveset = model . _tree . evaluate ( WAVESET_OPERATORS , getter = None )
else :
waveset = _get_sampleset ( model )
return waveset
|
def make_sloppy_codec ( encoding ) :
"""Take a codec name , and return a ' sloppy ' version of that codec that can
encode and decode the unassigned bytes in that encoding .
Single - byte encodings in the standard library are defined using some
boilerplate classes surrounding the functions that do the actual work ,
` codecs . charmap _ decode ` and ` charmap _ encode ` . This function , given an
encoding name , * defines * those boilerplate classes ."""
|
# Make a bytestring of all 256 possible bytes .
all_bytes = bytes ( range ( 256 ) )
# Get a list of what they would decode to in Latin - 1.
sloppy_chars = list ( all_bytes . decode ( 'latin-1' ) )
# Get a list of what they decode to in the given encoding . Use the
# replacement character for unassigned bytes .
if PY26 :
decoded_chars = all_bytes . decode ( encoding , 'replace' )
else :
decoded_chars = all_bytes . decode ( encoding , errors = 'replace' )
# Update the sloppy _ chars list . Each byte that was successfully decoded
# gets its decoded value in the list . The unassigned bytes are left as
# they are , which gives their decoding in Latin - 1.
for i , char in enumerate ( decoded_chars ) :
if char != REPLACEMENT_CHAR :
sloppy_chars [ i ] = char
# For ftfy ' s own purposes , we ' re going to allow byte 1A , the " Substitute "
# control code , to encode the Unicode replacement character U + FFFD .
sloppy_chars [ 0x1a ] = REPLACEMENT_CHAR
# Create the data structures that tell the charmap methods how to encode
# and decode in this sloppy encoding .
decoding_table = '' . join ( sloppy_chars )
encoding_table = codecs . charmap_build ( decoding_table )
# Now produce all the class boilerplate . Look at the Python source for
# ` encodings . cp1252 ` for comparison ; this is almost exactly the same ,
# except I made it follow pep8.
class Codec ( codecs . Codec ) :
def encode ( self , input , errors = 'strict' ) :
return codecs . charmap_encode ( input , errors , encoding_table )
def decode ( self , input , errors = 'strict' ) :
return codecs . charmap_decode ( input , errors , decoding_table )
class IncrementalEncoder ( codecs . IncrementalEncoder ) :
def encode ( self , input , final = False ) :
return codecs . charmap_encode ( input , self . errors , encoding_table ) [ 0 ]
class IncrementalDecoder ( codecs . IncrementalDecoder ) :
def decode ( self , input , final = False ) :
return codecs . charmap_decode ( input , self . errors , decoding_table ) [ 0 ]
class StreamWriter ( Codec , codecs . StreamWriter ) :
pass
class StreamReader ( Codec , codecs . StreamReader ) :
pass
return codecs . CodecInfo ( name = 'sloppy-' + encoding , encode = Codec ( ) . encode , decode = Codec ( ) . decode , incrementalencoder = IncrementalEncoder , incrementaldecoder = IncrementalDecoder , streamreader = StreamReader , streamwriter = StreamWriter , )
|
def parse ( readDataInstance , sectionHeadersInstance ) :
"""Returns a new L { Sections } object .
@ type readDataInstance : L { ReadData }
@ param readDataInstance : A L { ReadData } object with data to be parsed as a L { Sections } object .
@ type sectionHeadersInstance : instance
@ param sectionHeadersInstance : The L { SectionHeaders } instance with the necessary to parse every section data .
@ rtype : L { Sections }
@ return : A new L { Sections } object ."""
|
sData = Sections ( )
for sectionHdr in sectionHeadersInstance :
if sectionHdr . sizeOfRawData . value > len ( readDataInstance . data ) :
print "Warning: SizeOfRawData is larger than file."
if sectionHdr . pointerToRawData . value > len ( readDataInstance . data ) :
print "Warning: PointerToRawData points beyond the end of the file."
if sectionHdr . misc . value > 0x10000000 :
print "Warning: VirtualSize is extremely large > 256MiB."
if sectionHdr . virtualAddress . value > 0x10000000 :
print "Warning: VirtualAddress is beyond 0x10000000"
# skip sections with pointerToRawData = = 0 . According to PECOFF , it contains uninitialized data
if sectionHdr . pointerToRawData . value :
sData . append ( readDataInstance . read ( sectionHdr . sizeOfRawData . value ) )
return sData
|
def decode_aes256 ( cipher , iv , data , encryption_key ) :
"""Decrypt AES - 256 bytes .
Allowed ciphers are : : ecb , : cbc .
If for : ecb iv is not used and should be set to " " ."""
|
if cipher == 'cbc' :
aes = AES . new ( encryption_key , AES . MODE_CBC , iv )
elif cipher == 'ecb' :
aes = AES . new ( encryption_key , AES . MODE_ECB )
else :
raise ValueError ( 'Unknown AES mode' )
d = aes . decrypt ( data )
# http : / / passingcuriosity . com / 2009 / aes - encryption - in - python - with - m2crypto /
unpad = lambda s : s [ 0 : - ord ( d [ - 1 : ] ) ]
return unpad ( d )
|
def get_bitcoind_client ( ) :
"""Connect to the bitcoind node"""
|
bitcoind_opts = get_bitcoin_opts ( )
bitcoind_host = bitcoind_opts [ 'bitcoind_server' ]
bitcoind_port = bitcoind_opts [ 'bitcoind_port' ]
bitcoind_user = bitcoind_opts [ 'bitcoind_user' ]
bitcoind_passwd = bitcoind_opts [ 'bitcoind_passwd' ]
return create_bitcoind_service_proxy ( bitcoind_user , bitcoind_passwd , server = bitcoind_host , port = bitcoind_port )
|
def compile_files_cwd ( * args , ** kwargs ) :
"""change working directory to contract ' s dir in order to avoid symbol
name conflicts"""
|
# get root directory of the contracts
compile_wd = os . path . commonprefix ( args [ 0 ] )
# edge case - compiling a single file
if os . path . isfile ( compile_wd ) :
compile_wd = os . path . dirname ( compile_wd )
# remove prefix from the files
if compile_wd [ - 1 ] != '/' :
compile_wd += '/'
file_list = [ x . replace ( compile_wd , '' ) for x in args [ 0 ] ]
cwd = os . getcwd ( )
try :
os . chdir ( compile_wd )
compiled_contracts = compile_files ( source_files = file_list , # We need to specify output values here because py - solc by default
# provides them all and does not know that " clone - bin " does not exist
# in solidity > = v0.5.0
output_values = ( 'abi' , 'asm' , 'ast' , 'bin' , 'bin-runtime' ) , ** kwargs , )
finally :
os . chdir ( cwd )
return compiled_contracts
|
def sanitize ( self ) :
"""Sanitize all fields of the KNX message ."""
|
self . repeat = self . repeat % 2
self . priority = self . priority % 4
self . src_addr = self . src_addr % 0x10000
self . dst_addr = self . dst_addr % 0x10000
self . multicast = self . multicast % 2
self . routing = self . routing % 8
self . length = self . length % 16
for i in range ( 0 , self . length - 1 ) :
self . data [ i ] = self . data [ i ] % 0x100
|
def snmp_server_host_source_interface_source_interface_type_loopback_loopback ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
snmp_server = ET . SubElement ( config , "snmp-server" , xmlns = "urn:brocade.com:mgmt:brocade-snmp" )
host = ET . SubElement ( snmp_server , "host" )
ip_key = ET . SubElement ( host , "ip" )
ip_key . text = kwargs . pop ( 'ip' )
community_key = ET . SubElement ( host , "community" )
community_key . text = kwargs . pop ( 'community' )
source_interface = ET . SubElement ( host , "source-interface" )
source_interface_type = ET . SubElement ( source_interface , "source-interface-type" )
loopback = ET . SubElement ( source_interface_type , "loopback" )
loopback = ET . SubElement ( loopback , "loopback" )
loopback . text = kwargs . pop ( 'loopback' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def query ( self , query , args = ( ) , as_dict = False , suppress_warnings = True , reconnect = None ) :
"""Execute the specified query and return the tuple generator ( cursor ) .
: param query : mysql query
: param args : additional arguments for the client . cursor
: param as _ dict : If as _ dict is set to True , the returned cursor objects returns
query results as dictionary .
: param suppress _ warnings : If True , suppress all warnings arising from underlying query library"""
|
if reconnect is None :
reconnect = config [ 'database.reconnect' ]
cursor = client . cursors . DictCursor if as_dict else client . cursors . Cursor
cur = self . _conn . cursor ( cursor = cursor )
logger . debug ( "Executing SQL:" + query [ 0 : 300 ] )
try :
with warnings . catch_warnings ( ) :
if suppress_warnings : # suppress all warnings arising from underlying SQL library
warnings . simplefilter ( "ignore" )
cur . execute ( query , args )
except ( err . InterfaceError , err . OperationalError ) as e :
if is_connection_error ( e ) and reconnect :
warnings . warn ( "Mysql server has gone away. Reconnecting to the server." )
self . connect ( )
if self . _in_transaction :
self . cancel_transaction ( )
raise DataJointError ( "Connection was lost during a transaction." )
else :
logger . debug ( "Re-executing SQL" )
cur = self . query ( query , args = args , as_dict = as_dict , suppress_warnings = suppress_warnings , reconnect = False )
else :
logger . debug ( "Caught InterfaceError/OperationalError." )
raise
except err . ProgrammingError as e :
if e . args [ 0 ] == server_error_codes [ 'parse error' ] :
raise DataJointError ( "\n" . join ( ( "Error in query:" , query , "Please check spelling, syntax, and existence of tables and attributes." , "When restricting a relation by a condition in a string, enclose attributes in backquotes." ) ) ) from None
return cur
|
def angularjs ( parser , token ) :
"""Conditionally switch between AngularJS and Django variable expansion for ` ` { { ` ` and ` ` } } ` `
keeping Django ' s expansion for ` ` { % ` ` and ` ` % } ` `
Usage : :
{ % angularjs 1 % } or simply { % angularjs % }
{ % process variables through the AngularJS template engine % }
{ % endangularjs % }
{ % angularjs 0 % }
{ % process variables through the Django template engine % }
{ % endangularjs % }
Instead of 0 and 1 , it is possible to use a context variable ."""
|
bits = token . contents . split ( )
if len ( bits ) < 2 :
bits . append ( '1' )
values = [ parser . compile_filter ( bit ) for bit in bits [ 1 : ] ]
django_nodelist = parser . parse ( ( 'endangularjs' , ) )
angular_nodelist = NodeList ( )
for node in django_nodelist : # convert all occurrences of VariableNode into a TextNode using the
# AngularJS double curly bracket notation
if isinstance ( node , VariableNode ) : # convert Django ' s array notation into JS array notation
tokens = node . filter_expression . token . split ( '.' )
token = tokens [ 0 ]
for part in tokens [ 1 : ] :
if part . isdigit ( ) :
token += '[%s]' % part
else :
token += '.%s' % part
node = TextNode ( '{{ %s }}' % token )
angular_nodelist . append ( node )
parser . delete_first_token ( )
return AngularJsNode ( django_nodelist , angular_nodelist , values [ 0 ] )
|
def zip_a_folder ( src , dst ) :
"""Add a folder and everything inside to zip archive .
Example : :
| - - - paper
| - - - algorithm . pdf
| - - - images
| - - - 1 . jpg
zip _ a _ folder ( " paper " , " paper . zip " )
paper . zip
| - - - paper
| - - - algorithm . pdf
| - - - images
| - - - 1 . jpg
* * 中文文档 * *
将整个文件夹添加到压缩包 , 包括根目录本身 。"""
|
src , dst = os . path . abspath ( src ) , os . path . abspath ( dst )
cwd = os . getcwd ( )
todo = list ( )
dirname , basename = os . path . split ( src )
os . chdir ( dirname )
for dirname , _ , fnamelist in os . walk ( basename ) :
for fname in fnamelist :
newname = os . path . join ( dirname , fname )
todo . append ( newname )
with ZipFile ( dst , "w" ) as f :
for newname in todo :
f . write ( newname )
os . chdir ( cwd )
|
def _remove_layer ( self , layer ) :
"""remove the layer and its input / output edges"""
|
successors = self . get_successors ( layer )
predecessors = self . get_predecessors ( layer )
# remove all edges
for succ in successors :
self . _remove_edge ( layer , succ )
for pred in predecessors :
self . _remove_edge ( pred , layer )
# remove layer in the data structures
self . keras_layer_map . pop ( layer )
self . layer_list . remove ( layer )
|
def finalize_download ( url , download_to_file , content_type , request ) :
"""Finalizes the download operation by doing various checks , such as format
type , size check etc ."""
|
# If format is given , a format check is performed .
if content_type and content_type not in request . headers [ 'content-type' ] :
msg = 'The downloaded file is not of the desired format'
raise InvenioFileDownloadError ( msg )
# Save the downloaded file to desired or generated location .
to_file = open ( download_to_file , 'w' )
try :
try :
while True :
block = request . read ( CFG_FILEUTILS_BLOCK_SIZE )
if not block :
break
to_file . write ( block )
except Exception as e :
msg = "Error when downloading %s into %s: %s" % ( url , download_to_file , e )
raise InvenioFileDownloadError ( msg )
finally :
to_file . close ( )
# Check Size
filesize = os . path . getsize ( download_to_file )
if filesize == 0 :
raise InvenioFileDownloadError ( "%s seems to be empty" % ( url , ) )
# download successful , return the new path
return download_to_file
|
def compress ( condition , data , axis = 0 , out = None , blen = None , storage = None , create = 'array' , ** kwargs ) :
"""Return selected slices of an array along given axis ."""
|
# setup
if out is not None : # argument is only there for numpy API compatibility
raise NotImplementedError ( 'out argument is not supported' )
storage = _util . get_storage ( storage )
blen = _util . get_blen_array ( data , blen )
length = len ( data )
nnz = count_nonzero ( condition )
if axis == 0 :
_util . check_equal_length ( data , condition )
# block iteration
out = None
for i in range ( 0 , length , blen ) :
j = min ( i + blen , length )
bcond = np . asarray ( condition [ i : j ] )
# don ' t access any data unless we have to
if np . any ( bcond ) :
block = np . asarray ( data [ i : j ] )
res = np . compress ( bcond , block , axis = 0 )
if out is None :
out = getattr ( storage , create ) ( res , expectedlen = nnz , ** kwargs )
else :
out . append ( res )
return out
elif axis == 1 : # block iteration
out = None
condition = np . asanyarray ( condition )
for i in range ( 0 , length , blen ) :
j = min ( i + blen , length )
block = np . asarray ( data [ i : j ] )
res = np . compress ( condition , block , axis = 1 )
if out is None :
out = getattr ( storage , create ) ( res , expectedlen = length , ** kwargs )
else :
out . append ( res )
return out
else :
raise NotImplementedError ( 'axis not supported: %s' % axis )
|
def create_process_behavior ( self , behavior , process_id ) :
"""CreateProcessBehavior .
[ Preview API ] Creates a single behavior in the given process .
: param : class : ` < ProcessBehaviorCreateRequest > < azure . devops . v5_0 . work _ item _ tracking _ process . models . ProcessBehaviorCreateRequest > ` behavior :
: param str process _ id : The ID of the process
: rtype : : class : ` < ProcessBehavior > < azure . devops . v5_0 . work _ item _ tracking _ process . models . ProcessBehavior > `"""
|
route_values = { }
if process_id is not None :
route_values [ 'processId' ] = self . _serialize . url ( 'process_id' , process_id , 'str' )
content = self . _serialize . body ( behavior , 'ProcessBehaviorCreateRequest' )
response = self . _send ( http_method = 'POST' , location_id = 'd1800200-f184-4e75-a5f2-ad0b04b4373e' , version = '5.0-preview.2' , route_values = route_values , content = content )
return self . _deserialize ( 'ProcessBehavior' , response )
|
def trim ( value ) :
'''Raise an exception if value is empty . Otherwise strip it down .
: param value :
: return :'''
|
value = ( value or '' ) . strip ( )
if not value :
raise CommandExecutionError ( "Empty value during sanitation" )
return six . text_type ( value )
|
def _match_vcs_scheme ( url ) : # type : ( str ) - > Optional [ str ]
"""Look for VCS schemes in the URL .
Returns the matched VCS scheme , or None if there ' s no match ."""
|
from pipenv . patched . notpip . _internal . vcs import VcsSupport
for scheme in VcsSupport . schemes :
if url . lower ( ) . startswith ( scheme ) and url [ len ( scheme ) ] in '+:' :
return scheme
return None
|
def CheckStyle ( filename , linenumber , clean_lines , errors ) :
"""Check style issues . These are :
No extra spaces between command and parenthesis
Matching spaces between parenthesis and arguments
No repeated logic in else ( ) , endif ( ) , endmacro ( )"""
|
CheckIndent ( filename , linenumber , clean_lines , errors )
CheckCommandSpaces ( filename , linenumber , clean_lines , errors )
line = clean_lines . raw_lines [ linenumber ]
if line . find ( '\t' ) != - 1 :
errors ( filename , linenumber , 'whitespace/tabs' , 'Tab found; please use spaces' )
if line and line [ - 1 ] . isspace ( ) :
errors ( filename , linenumber , 'whitespace/eol' , 'Line ends in whitespace' )
CheckRepeatLogic ( filename , linenumber , clean_lines , errors )
|
def chained_parameter_variation ( subject , durations , y0 , varied_params , default_params = None , integrate_kwargs = None , x0 = None , npoints = 1 , numpy = None ) :
"""Integrate an ODE - system for a serie of durations with some parameters changed in - between
Parameters
subject : function or ODESys instance
If a function : should have the signature of : meth : ` pyodesys . ODESys . integrate `
( and resturn a : class : ` pyodesys . results . Result ` object ) .
If a ODESys instance : the ` ` integrate ` ` method will be used .
durations : iterable of floats
Spans of the independent variable .
y0 : dict or array _ like
varied _ params : dict mapping parameter name ( or index ) to array _ like
Each array _ like need to be of same length as durations .
default _ params : dict or array _ like
Default values for the parameters of the ODE system .
integrate _ kwargs : dict
Keyword arguments passed on to ` ` integrate ` ` .
x0 : float - like
First value of independent variable . default : 0.
npoints : int
Number of points per sub - interval .
Examples
> > > odesys = ODESys ( lambda t , y , p : [ - p [ 0 ] * y [ 0 ] ] )
> > > int _ kw = dict ( integrator = ' cvode ' , method = ' adams ' , atol = 1e - 12 , rtol = 1e - 12)
> > > kwargs = dict ( default _ params = [ 0 ] , integrate _ kwargs = int _ kw )
> > > res = chained _ parameter _ variation ( odesys , [ 2 , 3 ] , [ 42 ] , { 0 : [ . 7 , . 1 ] } , * * kwargs )
> > > mask1 = res . xout < = 2
> > > import numpy as np
> > > np . allclose ( res . yout [ mask1 , 0 ] , 42 * np . exp ( - . 7 * res . xout [ mask1 ] ) )
True
> > > mask2 = 2 < = res . xout
> > > np . allclose ( res . yout [ mask2 , 0 ] , res . yout [ mask2 , 0 ] [ 0 ] * np . exp ( - . 1 * ( res . xout [ mask2 ] - res . xout [ mask2 ] [ 0 ] ) ) )
True"""
|
assert len ( durations ) > 0 , 'need at least 1 duration (preferably many)'
assert npoints > 0 , 'need at least 1 point per duration'
for k , v in varied_params . items ( ) :
if len ( v ) != len ( durations ) :
raise ValueError ( "Mismathced lengths of durations and varied_params" )
if isinstance ( subject , ODESys ) :
integrate = subject . integrate
numpy = numpy or subject . numpy
else :
integrate = subject
numpy = numpy or np
default_params = default_params or { }
integrate_kwargs = integrate_kwargs or { }
def _get_idx ( cont , idx ) :
if isinstance ( cont , dict ) :
return { k : ( v [ idx ] if hasattr ( v , '__len__' ) and getattr ( v , 'ndim' , 1 ) > 0 else v ) for k , v in cont . items ( ) }
else :
return cont [ idx ]
durations = numpy . cumsum ( durations )
for idx_dur in range ( len ( durations ) ) :
params = copy . copy ( default_params )
for k , v in varied_params . items ( ) :
params [ k ] = v [ idx_dur ]
if idx_dur == 0 :
if x0 is None :
x0 = durations [ 0 ] * 0
out = integrate ( numpy . linspace ( x0 , durations [ 0 ] , npoints + 1 ) , y0 , params , ** integrate_kwargs )
else :
if isinstance ( out , Result ) :
out . extend_by_integration ( durations [ idx_dur ] , params , npoints = npoints , ** integrate_kwargs )
else :
for idx_res , r in enumerate ( out ) :
r . extend_by_integration ( durations [ idx_dur ] , _get_idx ( params , idx_res ) , npoints = npoints , ** integrate_kwargs )
return out
|
def _parse_wikiheadlines ( path ) :
"""Generates examples from Wikiheadlines dataset file ."""
|
lang_match = re . match ( r".*\.([a-z][a-z])-([a-z][a-z])$" , path )
assert lang_match is not None , "Invalid Wikiheadlines filename: %s" % path
l1 , l2 = lang_match . groups ( )
with tf . io . gfile . GFile ( path ) as f :
for line in f :
s1 , s2 = line . split ( "|||" )
yield { l1 : s1 . strip ( ) , l2 : s2 . strip ( ) }
|
def process_output ( self , data , output_prompt , input_lines , output , is_doctest , decorator , image_file ) :
"""Process data block for OUTPUT token ."""
|
TAB = ' ' * 4
if is_doctest and output is not None :
found = output
found = found . strip ( )
submitted = data . strip ( )
if self . directive is None :
source = 'Unavailable'
content = 'Unavailable'
else :
source = self . directive . state . document . current_source
content = self . directive . content
# Add tabs and join into a single string .
content = '\n' . join ( [ TAB + line for line in content ] )
# Make sure the output contains the output prompt .
ind = found . find ( output_prompt )
if ind < 0 :
e = ( 'output does not contain output prompt\n\n' 'Document source: {0}\n\n' 'Raw content: \n{1}\n\n' 'Input line(s):\n{TAB}{2}\n\n' 'Output line(s):\n{TAB}{3}\n\n' )
e = e . format ( source , content , '\n' . join ( input_lines ) , repr ( found ) , TAB = TAB )
raise RuntimeError ( e )
found = found [ len ( output_prompt ) : ] . strip ( )
# Handle the actual doctest comparison .
if decorator . strip ( ) == '@doctest' : # Standard doctest
if found != submitted :
e = ( 'doctest failure\n\n' 'Document source: {0}\n\n' 'Raw content: \n{1}\n\n' 'On input line(s):\n{TAB}{2}\n\n' 'we found output:\n{TAB}{3}\n\n' 'instead of the expected:\n{TAB}{4}\n\n' )
e = e . format ( source , content , '\n' . join ( input_lines ) , repr ( found ) , repr ( submitted ) , TAB = TAB )
raise RuntimeError ( e )
else :
self . custom_doctest ( decorator , input_lines , found , submitted )
|
def _get_algorithm ( self , algorithm_name ) :
"""Get the specific algorithm .
: param str algorithm _ name : name of the algorithm to use ( file name ) .
: return : algorithm object ."""
|
try :
algorithm = anomaly_detector_algorithms [ algorithm_name ]
return algorithm
except KeyError :
raise exceptions . AlgorithmNotFound ( 'luminol.AnomalyDetector: ' + str ( algorithm_name ) + ' not found.' )
|
def update_schema ( schema_old , schema_new ) :
"""Given an old BigQuery schema , update it with a new one .
Where a field name is the same , the new will replace the old . Any
new fields not present in the old schema will be added .
Arguments :
schema _ old : the old schema to update
schema _ new : the new schema which will overwrite / extend the old"""
|
old_fields = schema_old [ "fields" ]
new_fields = schema_new [ "fields" ]
output_fields = list ( old_fields )
field_indices = { field [ "name" ] : i for i , field in enumerate ( output_fields ) }
for field in new_fields :
name = field [ "name" ]
if name in field_indices : # replace old field with new field of same name
output_fields [ field_indices [ name ] ] = field
else : # add new field
output_fields . append ( field )
return { "fields" : output_fields }
|
def should_be_hidden_as_cause ( exc ) :
"""Used everywhere to decide if some exception type should be displayed or hidden as the casue of an error"""
|
# reduced traceback in case of HasWrongType ( instance _ of checks )
from valid8 . validation_lib . types import HasWrongType , IsWrongType
return isinstance ( exc , ( HasWrongType , IsWrongType ) )
|
def clear_queues ( self , manager ) :
"""Release the resources associated to the queues of this instance
: param manager : Manager ( ) object
: type manager : None | object
: return : None"""
|
for queue in ( self . to_q , self . from_q ) :
if queue is None :
continue
# If we got no manager , we directly call the clean
if not manager :
try :
queue . close ( )
queue . join_thread ( )
except AttributeError :
pass
# else :
# q . _ callmethod ( ' close ' )
# q . _ callmethod ( ' join _ thread ' )
self . to_q = self . from_q = None
|
def pore_coords ( target ) :
r"""The average of the pore coords"""
|
network = target . project . network
Ts = network . throats ( target . name )
conns = network [ 'throat.conns' ]
coords = network [ 'pore.coords' ]
return _sp . mean ( coords [ conns ] , axis = 1 ) [ Ts ]
|
def worker_exec ( self , max_num , default_ext = '' , queue_timeout = 5 , req_timeout = 5 , ** kwargs ) :
"""Target method of workers .
Get task from ` ` task _ queue ` ` and then download files and process meta
data . A downloader thread will exit in either of the following cases :
1 . All parser threads have exited and the task _ queue is empty .
2 . Downloaded image number has reached required number ( max _ num ) .
Args :
queue _ timeout ( int ) : Timeout of getting tasks from ` ` task _ queue ` ` .
req _ timeout ( int ) : Timeout of making requests for downloading pages .
* * kwargs : Arguments passed to the : func : ` download ` method ."""
|
self . max_num = max_num
while True :
if self . signal . get ( 'reach_max_num' ) :
self . logger . info ( 'downloaded images reach max num, thread %s' ' is ready to exit' , current_thread ( ) . name )
break
try :
task = self . in_queue . get ( timeout = queue_timeout )
except queue . Empty :
if self . signal . get ( 'parser_exited' ) :
self . logger . info ( 'no more download task for thread %s' , current_thread ( ) . name )
break
else :
self . logger . info ( '%s is waiting for new download tasks' , current_thread ( ) . name )
except :
self . logger . error ( 'exception in thread %s' , current_thread ( ) . name )
else :
self . download ( task , default_ext , req_timeout , ** kwargs )
self . process_meta ( task )
self . in_queue . task_done ( )
self . logger . info ( 'thread {} exit' . format ( current_thread ( ) . name ) )
|
def listen ( self , once = False ) :
"""Listen for changes in all registered listeners .
Use add _ listener before calling this funcion to listen for desired
events or set ` once ` to True to listen for initial room information"""
|
if once : # we listen for time event and return false so our
# run _ queues function will be also falsy and break the loop
self . add_listener ( "time" , lambda _ : False )
return self . conn . listen ( )
|
def validate ( config ) :
"""Validate a configuration file ."""
|
with open ( config ) as fh :
data = utils . yaml_load ( fh . read ( ) )
jsonschema . validate ( data , CONFIG_SCHEMA )
|
def get_bucket_type_props ( self , bucket_type ) :
"""Get properties for a bucket - type"""
|
self . _check_bucket_types ( bucket_type )
url = self . bucket_type_properties_path ( bucket_type . name )
status , headers , body = self . _request ( 'GET' , url )
if status == 200 :
props = json . loads ( bytes_to_str ( body ) )
return props [ 'props' ]
else :
raise RiakError ( 'Error getting bucket-type properties.' )
|
def add_token_without_limits ( self , token_address : TokenAddress , ) -> Address :
"""Register token of ` token _ address ` with the token network .
This applies for versions prior to 0.13.0 of raiden - contracts ,
since limits were hardcoded into the TokenNetwork contract ."""
|
return self . _add_token ( token_address = token_address , additional_arguments = dict ( ) , )
|
def _put ( self , * args , ** kwargs ) :
"""A wrapper for putting things . It will also json encode your ' data ' parameter
: returns : The response of your put
: rtype : dict"""
|
if 'data' in kwargs :
kwargs [ 'data' ] = json . dumps ( kwargs [ 'data' ] )
response = requests . put ( * args , ** kwargs )
response . raise_for_status ( )
|
def decrypt_from ( self , f , mac_bytes = 10 ) :
"""Decrypts a message from f ."""
|
ctx = DecryptionContext ( self . curve , f , self , mac_bytes )
yield ctx
ctx . read ( )
|
def note_revert ( self , note_id , version ) :
"""Function to revert a specific note ( Requires login ) ( UNTESTED ) .
Parameters :
note _ id ( int ) : The note id to update .
version ( int ) : The version to revert to ."""
|
params = { 'id' : note_id , 'version' : version }
return self . _get ( 'note/revert' , params , method = 'PUT' )
|
def post_helper ( form_tag = True , edit_mode = False ) :
"""Post ' s form layout helper"""
|
helper = FormHelper ( )
helper . form_action = '.'
helper . attrs = { 'data_abide' : '' }
helper . form_tag = form_tag
fieldsets = [ Row ( Column ( 'text' , css_class = 'small-12' ) , ) , ]
# Threadwatch option is not in edit form
if not edit_mode :
fieldsets . append ( Row ( Column ( 'threadwatch' , css_class = 'small-12' ) , ) , )
fieldsets = fieldsets + [ ButtonHolderPanel ( Submit ( 'submit' , _ ( 'Submit' ) ) , css_class = 'text-right' , ) , ]
helper . layout = Layout ( * fieldsets )
return helper
|
def _parse_pairwise_input ( indices1 , indices2 , MDlogger , fname = '' ) :
r"""For input of pairwise type ( distances , inverse distances , contacts ) checks the
type of input the user gave and reformats it so that : py : func : ` DistanceFeature ` ,
: py : func : ` InverseDistanceFeature ` , and ContactFeature can work .
In case the input isn ' t already a list of distances , this function will :
- sort the indices1 array
- check for duplicates within the indices1 array
- sort the indices2 array
- check for duplicates within the indices2 array
- check for duplicates between the indices1 and indices2 array
- if indices2 is None , produce a list of pairs of indices in indices1 , or
- if indices2 is not None , produce a list of pairs of ( i , j ) where i comes from indices1 , and j from indices2"""
|
if is_iterable_of_int ( indices1 ) :
MDlogger . warning ( 'The 1D arrays input for %s have been sorted, and ' 'index duplicates have been eliminated.\n' 'Check the output of describe() to see the actual order of the features' % fname )
# Eliminate duplicates and sort
indices1 = np . unique ( indices1 )
# Intra - group distances
if indices2 is None :
atom_pairs = combinations ( indices1 , 2 )
# Inter - group distances
elif is_iterable_of_int ( indices2 ) : # Eliminate duplicates and sort
indices2 = np . unique ( indices2 )
# Eliminate duplicates between indices1 and indices1
uniqs = np . in1d ( indices2 , indices1 , invert = True )
indices2 = indices2 [ uniqs ]
atom_pairs = product ( indices1 , indices2 )
else :
atom_pairs = indices1
return atom_pairs
|
def get_cusum ( self , mag ) :
"""Return max - min of cumulative sum .
Parameters
mag : array _ like
An array of magnitudes .
Returns
mm _ cusum : float
Max - min of cumulative sum ."""
|
c = np . cumsum ( mag - self . weighted_mean ) / len ( mag ) / self . weighted_std
return np . max ( c ) - np . min ( c )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.