signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def train ( sess , loss , x_train , y_train , init_all = False , evaluate = None , feed = None , args = None , rng = None , var_list = None , fprop_args = None , optimizer = None , devices = None , x_batch_preprocessor = None , use_ema = False , ema_decay = .998 , run_canary = None , loss_threshold = 1e5 , dataset_train = None , dataset_size = None ) :
"""Run ( optionally multi - replica , synchronous ) training to minimize ` loss `
: param sess : TF session to use when training the graph
: param loss : tensor , the loss to minimize
: param x _ train : numpy array with training inputs or tf Dataset
: param y _ train : numpy array with training outputs or tf Dataset
: param init _ all : ( boolean ) If set to true , all TF variables in the session
are ( re ) initialized , otherwise only previously
uninitialized variables are initialized before training .
: param evaluate : function that is run after each training iteration
( typically to display the test / validation accuracy ) .
: param feed : An optional dictionary that is appended to the feeding
dictionary before the session runs . Can be used to feed
the learning phase of a Keras model for instance .
: param args : dict or argparse ` Namespace ` object .
Should contain ` nb _ epochs ` , ` learning _ rate ` ,
` batch _ size `
: param rng : Instance of numpy . random . RandomState
: param var _ list : Optional list of parameters to train .
: param fprop _ args : dict , extra arguments to pass to fprop ( loss and model ) .
: param optimizer : Optimizer to be used for training
: param devices : list of device names to use for training
If None , defaults to : all GPUs , if GPUs are available
all devices , if no GPUs are available
: param x _ batch _ preprocessor : callable
Takes a single tensor containing an x _ train batch as input
Returns a single tensor containing an x _ train batch as output
Called to preprocess the data before passing the data to the Loss
: param use _ ema : bool
If true , uses an exponential moving average of the model parameters
: param ema _ decay : float or callable
The decay parameter for EMA , if EMA is used
If a callable rather than a float , this is a callable that takes
the epoch and batch as arguments and returns the ema _ decay for
the current batch .
: param loss _ threshold : float
Raise an exception if the loss exceeds this value .
This is intended to rapidly detect numerical problems .
Sometimes the loss may legitimately be higher than this value . In
such cases , raise the value . If needed it can be np . inf .
: param dataset _ train : tf Dataset instance .
Used as a replacement for x _ train , y _ train for faster performance .
: param dataset _ size : integer , the size of the dataset _ train .
: return : True if model trained"""
|
# Check whether the hardware is working correctly
canary . run_canary ( )
if run_canary is not None :
warnings . warn ( "The `run_canary` argument is deprecated. The canary " "is now much cheaper and thus runs all the time. The " "canary now uses its own loss function so it is not " "necessary to turn off the canary when training with " " a stochastic loss. Simply quit passing `run_canary`." "Passing `run_canary` may become an error on or after " "2019-10-16." )
args = _ArgsWrapper ( args or { } )
fprop_args = fprop_args or { }
# Check that necessary arguments were given ( see doc above )
# Be sure to support 0 epochs for debugging purposes
if args . nb_epochs is None :
raise ValueError ( "`args` must specify number of epochs" )
if optimizer is None :
if args . learning_rate is None :
raise ValueError ( "Learning rate was not given in args dict" )
assert args . batch_size , "Batch size was not given in args dict"
if rng is None :
rng = np . random . RandomState ( )
if optimizer is None :
optimizer = tf . train . AdamOptimizer ( learning_rate = args . learning_rate )
else :
if not isinstance ( optimizer , tf . train . Optimizer ) :
raise ValueError ( "optimizer object must be from a child class of " "tf.train.Optimizer" )
grads = [ ]
xs = [ ]
preprocessed_xs = [ ]
ys = [ ]
if dataset_train is not None :
assert x_train is None and y_train is None and x_batch_preprocessor is None
if dataset_size is None :
raise ValueError ( "You must provide a dataset size" )
data_iterator = dataset_train . make_one_shot_iterator ( ) . get_next ( )
x_train , y_train = sess . run ( data_iterator )
devices = infer_devices ( devices )
for device in devices :
with tf . device ( device ) :
x = tf . placeholder ( x_train . dtype , ( None , ) + x_train . shape [ 1 : ] )
y = tf . placeholder ( y_train . dtype , ( None , ) + y_train . shape [ 1 : ] )
xs . append ( x )
ys . append ( y )
if x_batch_preprocessor is not None :
x = x_batch_preprocessor ( x )
# We need to keep track of these so that the canary can feed
# preprocessed values . If the canary had to feed raw values ,
# stochastic preprocessing could make the canary fail .
preprocessed_xs . append ( x )
loss_value = loss . fprop ( x , y , ** fprop_args )
grads . append ( optimizer . compute_gradients ( loss_value , var_list = var_list ) )
num_devices = len ( devices )
print ( "num_devices: " , num_devices )
grad = avg_grads ( grads )
# Trigger update operations within the default graph ( such as batch _ norm ) .
with tf . control_dependencies ( tf . get_collection ( tf . GraphKeys . UPDATE_OPS ) ) :
train_step = optimizer . apply_gradients ( grad )
epoch_tf = tf . placeholder ( tf . int32 , [ ] )
batch_tf = tf . placeholder ( tf . int32 , [ ] )
if use_ema :
if callable ( ema_decay ) :
ema_decay = ema_decay ( epoch_tf , batch_tf )
ema = tf . train . ExponentialMovingAverage ( decay = ema_decay )
with tf . control_dependencies ( [ train_step ] ) :
train_step = ema . apply ( var_list )
# Get pointers to the EMA ' s running average variables
avg_params = [ ema . average ( param ) for param in var_list ]
# Make temporary buffers used for swapping the live and running average
# parameters
tmp_params = [ tf . Variable ( param , trainable = False ) for param in var_list ]
# Define the swapping operation
param_to_tmp = [ tf . assign ( tmp , param ) for tmp , param in safe_zip ( tmp_params , var_list ) ]
with tf . control_dependencies ( param_to_tmp ) :
avg_to_param = [ tf . assign ( param , avg ) for param , avg in safe_zip ( var_list , avg_params ) ]
with tf . control_dependencies ( avg_to_param ) :
tmp_to_avg = [ tf . assign ( avg , tmp ) for avg , tmp in safe_zip ( avg_params , tmp_params ) ]
swap = tmp_to_avg
batch_size = args . batch_size
assert batch_size % num_devices == 0
device_batch_size = batch_size // num_devices
if init_all :
sess . run ( tf . global_variables_initializer ( ) )
else :
initialize_uninitialized_global_variables ( sess )
for epoch in xrange ( args . nb_epochs ) :
if dataset_train is not None :
nb_batches = int ( math . ceil ( float ( dataset_size ) / batch_size ) )
else : # Indices to shuffle training set
index_shuf = list ( range ( len ( x_train ) ) )
# Randomly repeat a few training examples each epoch to avoid
# having a too - small batch
while len ( index_shuf ) % batch_size != 0 :
index_shuf . append ( rng . randint ( len ( x_train ) ) )
nb_batches = len ( index_shuf ) // batch_size
rng . shuffle ( index_shuf )
# Shuffling here versus inside the loop doesn ' t seem to affect
# timing very much , but shuffling here makes the code slightly
# easier to read
x_train_shuffled = x_train [ index_shuf ]
y_train_shuffled = y_train [ index_shuf ]
prev = time . time ( )
for batch in range ( nb_batches ) :
if dataset_train is not None :
x_train_shuffled , y_train_shuffled = sess . run ( data_iterator )
start , end = 0 , batch_size
else : # Compute batch start and end indices
start = batch * batch_size
end = ( batch + 1 ) * batch_size
# Perform one training step
diff = end - start
assert diff == batch_size
feed_dict = { epoch_tf : epoch , batch_tf : batch }
for dev_idx in xrange ( num_devices ) :
cur_start = start + dev_idx * device_batch_size
cur_end = start + ( dev_idx + 1 ) * device_batch_size
feed_dict [ xs [ dev_idx ] ] = x_train_shuffled [ cur_start : cur_end ]
feed_dict [ ys [ dev_idx ] ] = y_train_shuffled [ cur_start : cur_end ]
if cur_end != end and dataset_train is None :
msg = ( "batch_size (%d) must be a multiple of num_devices " "(%d).\nCUDA_VISIBLE_DEVICES: %s" "\ndevices: %s" )
args = ( batch_size , num_devices , os . environ [ 'CUDA_VISIBLE_DEVICES' ] , str ( devices ) )
raise ValueError ( msg % args )
if feed is not None :
feed_dict . update ( feed )
_ , loss_numpy = sess . run ( [ train_step , loss_value ] , feed_dict = feed_dict )
if np . abs ( loss_numpy ) > loss_threshold :
raise ValueError ( "Extreme loss during training: " , loss_numpy )
if np . isnan ( loss_numpy ) or np . isinf ( loss_numpy ) :
raise ValueError ( "NaN/Inf loss during training" )
assert ( dataset_train is not None or end == len ( index_shuf ) )
# Check that all examples were used
cur = time . time ( )
_logger . info ( "Epoch " + str ( epoch ) + " took " + str ( cur - prev ) + " seconds" )
if evaluate is not None :
if use_ema : # Before running evaluation , load the running average
# parameters into the live slot , so we can see how well
# the EMA parameters are performing
sess . run ( swap )
evaluate ( )
if use_ema : # Swap the parameters back , so that we continue training
# on the live parameters
sess . run ( swap )
if use_ema : # When training is done , swap the running average parameters into
# the live slot , so that we use them when we deploy the model
sess . run ( swap )
return True
|
def draw ( self , true_classes ) :
"""Draw samples from log uniform distribution and returns sampled candidates ,
expected count for true classes and sampled classes ."""
|
range_max = self . range_max
num_sampled = self . num_sampled
ctx = true_classes . context
log_range = math . log ( range_max + 1 )
num_tries = 0
true_classes = true_classes . reshape ( ( - 1 , ) )
sampled_classes , num_tries = self . sampler . sample_unique ( num_sampled )
true_cls = true_classes . as_in_context ( ctx ) . astype ( 'float64' )
prob_true = ( ( true_cls + 2.0 ) / ( true_cls + 1.0 ) ) . log ( ) / log_range
count_true = self . _prob_helper ( num_tries , num_sampled , prob_true )
sampled_classes = ndarray . array ( sampled_classes , ctx = ctx , dtype = 'int64' )
sampled_cls_fp64 = sampled_classes . astype ( 'float64' )
prob_sampled = ( ( sampled_cls_fp64 + 2.0 ) / ( sampled_cls_fp64 + 1.0 ) ) . log ( ) / log_range
count_sampled = self . _prob_helper ( num_tries , num_sampled , prob_sampled )
return [ sampled_classes , count_true , count_sampled ]
|
def _deserialize ( self ) :
"""Try and deserialize a response body based upon the specified
content type .
: rtype : mixed"""
|
if not self . _responses or not self . _responses [ - 1 ] . body :
return None
if 'Content-Type' not in self . _responses [ - 1 ] . headers :
return self . _responses [ - 1 ] . body
try :
content_type = algorithms . select_content_type ( [ headers . parse_content_type ( self . _responses [ - 1 ] . headers [ 'Content-Type' ] ) ] , AVAILABLE_CONTENT_TYPES )
except errors . NoMatch :
return self . _responses [ - 1 ] . body
if content_type [ 0 ] == CONTENT_TYPE_JSON :
return self . _decode ( self . _json . loads ( self . _decode ( self . _responses [ - 1 ] . body ) ) )
elif content_type [ 0 ] == CONTENT_TYPE_MSGPACK : # pragma : nocover
return self . _decode ( self . _msgpack . unpackb ( self . _responses [ - 1 ] . body ) )
|
def cmpr ( self , val1 , name , val2 ) :
'''Compare the two values using the given type specific comparator .'''
|
ctor = self . getCmprCtor ( name )
if ctor is None :
raise s_exc . NoSuchCmpr ( cmpr = name , name = self . name )
norm1 = self . norm ( val1 ) [ 0 ]
norm2 = self . norm ( val2 ) [ 0 ]
return ctor ( norm2 ) ( norm1 )
|
def calculate_shannon_entropy ( self , data ) :
"""In our investigations , we have found that when the input is all digits ,
the number of false positives we get greatly exceeds realistic true
positive scenarios .
Therefore , this tries to capture this heuristic mathemetically .
We do this by noting that the maximum shannon entropy for this charset
is ~ 3.32 ( e . g . " 0123456789 " , with every digit different ) , and we want
to lower that below the standard limit , 3 . However , at the same time ,
we also want to accommodate the fact that longer strings have a higher
chance of being a true positive , which means " 01234567890123456789"
should be closer to the maximum entropy than the shorter version ."""
|
entropy = super ( HexHighEntropyString , self ) . calculate_shannon_entropy ( data )
if len ( data ) == 1 :
return entropy
try :
int ( data )
# This multiplier was determined through trial and error , with the
# intent of keeping it simple , yet achieving our goals .
entropy -= 1.2 / math . log ( len ( data ) , 2 )
except ValueError :
pass
return entropy
|
def references ( self , env , object_name , model , assoc_class , result_class_name , role , result_role , keys_only ) :
"""Instrument Associations .
All four association - related operations ( Associators , AssociatorNames ,
References , ReferenceNames ) are mapped to this method .
This method is a python generator
Keyword arguments :
env - - Provider Environment ( pycimmb . ProviderEnvironment )
object _ name - - A pywbem . CIMInstanceName that defines the source
CIM Object whose associated Objects are to be returned .
model - - A template pywbem . CIMInstance to serve as a model
of the objects to be returned . Only properties present on this
model need to be set .
assoc _ class - - The pywbem . CIMClass .
result _ class _ name - - If not empty , this string acts as a filter on
the returned set of Instances by mandating that each returned
Instances MUST represent an association between object _ name
and an Instance of a Class whose name matches this parameter
or a subclass .
role - - If not empty , MUST be a valid Property name . It acts as a
filter on the returned set of Instances by mandating that each
returned Instance MUST refer to object _ name via a Property
whose name matches the value of this parameter .
result _ role - - If not empty , MUST be a valid Property name . It acts
as a filter on the returned set of Instances by mandating that
each returned Instance MUST represent associations of
object _ name to other Instances , where the other Instances play
the specified result _ role in the association ( i . e . the
name of the Property in the Association Class that refers to
the Object related to object _ name MUST match the value of this
parameter ) .
keys _ only - - A boolean . True if only the key properties should be
set on the generated instances .
The following diagram may be helpful in understanding the role ,
result _ role , and result _ class _ name parameters .
| object _ name . classname | | result _ class _ name |
| | [ Association ] assoc _ class | |
| object _ name | ~ ~ ~ ~ ~ | |
+ - - - - - + object _ name . classname REF role | |
( CIMInstanceName ) | result _ class _ name REF result _ role + - - - - - +
| | ( CIMInstanceName )
Possible Errors :
CIM _ ERR _ ACCESS _ DENIED
CIM _ ERR _ NOT _ SUPPORTED
CIM _ ERR _ INVALID _ NAMESPACE
CIM _ ERR _ INVALID _ PARAMETER ( including missing , duplicate , unrecognized
or otherwise incorrect parameters )
CIM _ ERR _ FAILED ( some other unspecified error occurred )"""
|
pass
|
def call ( self , transaction = None , block_identifier = 'latest' ) :
"""Execute a contract function call using the ` eth _ call ` interface .
This method prepares a ` ` Caller ` ` object that exposes the contract
functions and public variables as callable Python functions .
Reading a public ` ` owner ` ` address variable example :
. . code - block : : python
ContractFactory = w3 . eth . contract (
abi = wallet _ contract _ definition [ " abi " ]
# Not a real contract address
contract = ContractFactory ( " 0x2f70d3d26829e412A602E83FE8EeBF80255AEeA5 " )
# Read " owner " public variable
addr = contract . functions . owner ( ) . call ( )
: param transaction : Dictionary of transaction info for web3 interface
: return : ` ` Caller ` ` object that has contract public functions
and variables exposed as Python methods"""
|
if transaction is None :
call_transaction = { }
else :
call_transaction = dict ( ** transaction )
if 'data' in call_transaction :
raise ValueError ( "Cannot set data in call transaction" )
if self . address :
call_transaction . setdefault ( 'to' , self . address )
if self . web3 . eth . defaultAccount is not empty :
call_transaction . setdefault ( 'from' , self . web3 . eth . defaultAccount )
if 'to' not in call_transaction :
if isinstance ( self , type ) :
raise ValueError ( "When using `Contract.[methodtype].[method].call()` from" " a contract factory you " "must provide a `to` address with the transaction" )
else :
raise ValueError ( "Please ensure that this contract instance has an address." )
block_id = parse_block_identifier ( self . web3 , block_identifier )
return call_contract_function ( self . web3 , self . address , self . _return_data_normalizers , self . function_identifier , call_transaction , block_id , self . contract_abi , self . abi , * self . args , ** self . kwargs )
|
def updateParamsets ( self ) :
"""Devices should update their own paramsets . They rely on the state of the server . Hence we pull all paramsets ."""
|
try :
for ps in self . _PARAMSETS :
self . updateParamset ( ps )
return True
except Exception as err :
LOG . error ( "HMGeneric.updateParamsets: Exception: " + str ( err ) )
return False
|
def get_nailing ( expnum , ccd ) :
"""Get the ' nailing ' images associated with expnum"""
|
sql = """
SELECT e.expnum, (e.mjdate - f.mjdate) dt
FROM bucket.exposure e
JOIN bucket.exposure f
JOIN bucket.association b ON b.expnum=f.expnum
JOIN bucket.association a ON a.pointing=b.pointing AND a.expnum=e.expnum
WHERE f.expnum=%d
AND abs(e.mjdate - f.mjdate) > 0.5
AND abs(e.mjdate - f.mjdate) < 15.0
ORDER BY abs(e.mjdate-f.mjdate)
""" % ( expnum )
try :
import MOPdbaccess
mysql = MOPdbaccess . connect ( 'bucket' , 'cfhls' , dbSystem = 'MYSQL' )
bucket = mysql . cursor ( )
bucket . execute ( sql )
nailings = bucket . fetchall ( )
mysql . close ( )
if int ( ccd ) < 18 :
cutout = "[-*,-*]"
else :
cutout = None
import MOPfits
for nailing in nailings :
filename = MOPfits . adGet ( str ( nailing [ 0 ] ) + opt . raw , extno = int ( ccd ) , cutout = cutout )
except :
raise TaskError , "get nailing failed"
|
def copy ( self ) :
"""Make a copy of this SimLibrary , allowing it to be mutated without affecting the global version .
: return : A new SimLibrary object with the same library references but different dict / list references"""
|
o = SimLibrary ( )
o . procedures = dict ( self . procedures )
o . non_returning = set ( self . non_returning )
o . prototypes = dict ( self . prototypes )
o . default_ccs = dict ( self . default_ccs )
o . names = list ( self . names )
return o
|
def get ( self , uri ) :
'''Get the content for the bodypart identified by the uri .'''
|
if uri . startswith ( 'cid:' ) : # Content - ID , so raise exception if not found .
head , part = self . id_dict [ uri [ 4 : ] ]
return StringIO . StringIO ( part . getvalue ( ) )
if self . loc_dict . has_key ( uri ) :
head , part = self . loc_dict [ uri ]
return StringIO . StringIO ( part . getvalue ( ) )
return None
|
def __clean ( self , misp_response ) :
""": param misp _ response :
: return :"""
|
response = [ ]
for event in misp_response . get ( 'response' , [ ] ) :
response . append ( self . __clean_event ( event [ 'Event' ] ) )
return response
|
def getSizedInteger ( self , data , byteSize , as_number = False ) :
"""Numbers of 8 bytes are signed integers when they refer to numbers , but unsigned otherwise ."""
|
result = 0
if byteSize == 0 :
raise InvalidPlistException ( "Encountered integer with byte size of 0." )
# 1 , 2 , and 4 byte integers are unsigned
elif byteSize == 1 :
result = unpack ( '>B' , data ) [ 0 ]
elif byteSize == 2 :
result = unpack ( '>H' , data ) [ 0 ]
elif byteSize == 4 :
result = unpack ( '>L' , data ) [ 0 ]
elif byteSize == 8 :
if as_number :
result = unpack ( '>q' , data ) [ 0 ]
else :
result = unpack ( '>Q' , data ) [ 0 ]
elif byteSize <= 16 : # Handle odd - sized or integers larger than 8 bytes
# Don ' t naively go over 16 bytes , in order to prevent infinite loops .
result = 0
if hasattr ( int , 'from_bytes' ) :
result = int . from_bytes ( data , 'big' )
else :
for byte in data :
if not isinstance ( byte , int ) : # Python3.0-3.1 . x return ints , 2 . x return str
byte = unpack_from ( '>B' , byte ) [ 0 ]
result = ( result << 8 ) | byte
else :
raise InvalidPlistException ( "Encountered integer longer than 16 bytes." )
return result
|
def delete_dataset ( self , dataset_id , delete_contents = False , project_id = None ) :
"""Delete a BigQuery dataset .
Parameters
dataset _ id : str
Unique ` ` str ` ` identifying the dataset with the project ( the
referenceId of the dataset )
Unique ` ` str ` ` identifying the BigQuery project contains the dataset
delete _ contents : bool , optional
If True , forces the deletion of the dataset even when the dataset
contains data ( Default = False )
project _ id : str , optional
Returns
Union [ bool , dict [
ool indicating if the delete was successful or not , or response
from BigQuery if swallow _ results is set for False
Raises
HttpError
404 when dataset with dataset _ id does not exist"""
|
project_id = self . _get_project_id ( project_id )
try :
datasets = self . bigquery . datasets ( )
request = datasets . delete ( projectId = project_id , datasetId = dataset_id , deleteContents = delete_contents )
response = request . execute ( num_retries = self . num_retries )
if self . swallow_results :
return True
else :
return response
except HttpError as e :
logger . error ( 'Cannot delete dataset {0}: {1}' . format ( dataset_id , e ) )
if self . swallow_results :
return False
else :
return { }
|
def get_child_values ( parent , names ) :
"""return a list of values for the specified child fields . If field not in Element then replace with nan ."""
|
vals = [ ]
for name in names :
if parent . HasElement ( name ) :
vals . append ( XmlHelper . as_value ( parent . GetElement ( name ) ) )
else :
vals . append ( np . nan )
return vals
|
def read_model ( self ) :
"""Read the model and the couplings from the model file ."""
|
if self . verbosity > 0 :
settings . m ( 0 , 'reading model' , self . model )
# read model
boolRules = [ ]
for line in open ( self . model ) :
if line . startswith ( '#' ) and 'modelType =' in line :
keyval = line
if '|' in line :
keyval , type = line . split ( '|' ) [ : 2 ]
self . modelType = keyval . split ( '=' ) [ 1 ] . strip ( )
if line . startswith ( '#' ) and 'invTimeStep =' in line :
keyval = line
if '|' in line :
keyval , type = line . split ( '|' ) [ : 2 ]
self . invTimeStep = float ( keyval . split ( '=' ) [ 1 ] . strip ( ) )
if not line . startswith ( '#' ) :
boolRules . append ( [ s . strip ( ) for s in line . split ( '=' ) ] )
if line . startswith ( '# coupling list:' ) :
break
self . dim = len ( boolRules )
self . boolRules = collections . OrderedDict ( boolRules )
self . varNames = collections . OrderedDict ( [ ( s , i ) for i , s in enumerate ( self . boolRules . keys ( ) ) ] )
names = self . varNames
# read couplings via names
self . Coupl = np . zeros ( ( self . dim , self . dim ) )
boolContinue = True
for line in open ( self . model ) : # open ( self . model . replace ( ' / model ' , ' / couplList ' ) ) :
if line . startswith ( '# coupling list:' ) :
boolContinue = False
if boolContinue :
continue
if not line . startswith ( '#' ) :
gps , gs , val = line . strip ( ) . split ( )
self . Coupl [ int ( names [ gps ] ) , int ( names [ gs ] ) ] = float ( val )
# adjancecy matrices
self . Adj_signed = np . sign ( self . Coupl )
self . Adj = np . abs ( np . array ( self . Adj_signed ) )
# build bool coefficients ( necessary for odefy type
# version of the discrete model )
self . build_boolCoeff ( )
|
def and_yields ( self , * values ) :
"""Expects the return value of the expectation to be a generator of the
given values"""
|
def generator ( ) :
for value in values :
yield value
self . __expect ( Expectation , Invoke ( generator ) )
|
def serveUpcoming ( self , request ) :
"""Upcoming events list view ."""
|
myurl = self . get_url ( request )
today = timezone . localdate ( )
monthlyUrl = myurl + self . reverse_subpage ( 'serveMonth' , args = [ today . year , today . month ] )
weekNum = gregorian_to_week_date ( today ) [ 1 ]
weeklyUrl = myurl + self . reverse_subpage ( 'serveWeek' , args = [ today . year , weekNum ] )
listUrl = myurl + self . reverse_subpage ( 'servePast' )
upcomingEvents = self . _getUpcomingEvents ( request )
paginator = Paginator ( upcomingEvents , self . EventsPerPage )
try :
eventsPage = paginator . page ( request . GET . get ( 'page' ) )
except PageNotAnInteger :
eventsPage = paginator . page ( 1 )
except EmptyPage :
eventsPage = paginator . page ( paginator . num_pages )
# TODO Consider changing to a TemplateResponse
# https : / / stackoverflow . com / questions / 38838601
return render ( request , "joyous/calendar_list_upcoming.html" , { 'self' : self , 'page' : self , 'version' : __version__ , 'today' : today , 'weeklyUrl' : weeklyUrl , 'monthlyUrl' : monthlyUrl , 'listUrl' : listUrl , 'events' : eventsPage } )
|
def _proxy ( self ) :
"""Generate an instance context for the instance , the context is capable of
performing various actions . All instance actions are proxied to the context
: returns : ModelBuildContext for this ModelBuildInstance
: rtype : twilio . rest . autopilot . v1 . assistant . model _ build . ModelBuildContext"""
|
if self . _context is None :
self . _context = ModelBuildContext ( self . _version , assistant_sid = self . _solution [ 'assistant_sid' ] , sid = self . _solution [ 'sid' ] , )
return self . _context
|
def _to_str_columns ( self ) :
"""Render a DataFrame to a list of columns ( as lists of strings ) ."""
|
frame = self . tr_frame
# may include levels names also
str_index = self . _get_formatted_index ( frame )
str_columns = self . _get_formatted_column_labels ( frame )
if self . header :
stringified = [ ]
for i , c in enumerate ( frame . columns ) :
cheader = str_columns [ i ]
max_colwidth = max ( self . col_space or 0 , * ( self . adj . len ( x ) for x in cheader ) )
fmt_values = self . _format_col ( i )
fmt_values = _make_fixed_width ( fmt_values , self . justify , minimum = max_colwidth , adj = self . adj )
max_len = max ( max ( [ self . adj . len ( x ) for x in fmt_values ] ) , max_colwidth )
cheader = self . adj . justify ( cheader , max_len , mode = self . justify )
stringified . append ( cheader + fmt_values )
else :
stringified = [ ]
for i , c in enumerate ( frame ) :
fmt_values = self . _format_col ( i )
fmt_values = _make_fixed_width ( fmt_values , self . justify , minimum = ( self . col_space or 0 ) , adj = self . adj )
stringified . append ( fmt_values )
strcols = stringified
if self . index :
strcols . insert ( 0 , str_index )
# Add . . . to signal truncated
truncate_h = self . truncate_h
truncate_v = self . truncate_v
if truncate_h :
col_num = self . tr_col_num
col_width = self . adj . len ( strcols [ self . tr_size_col ] [ 0 ] )
# infer from column header
strcols . insert ( self . tr_col_num + 1 , [ '...' . center ( col_width ) ] * ( len ( str_index ) ) )
if truncate_v :
n_header_rows = len ( str_index ) - len ( frame )
row_num = self . tr_row_num
for ix , col in enumerate ( strcols ) :
cwidth = self . adj . len ( strcols [ ix ] [ row_num ] )
# infer from above row
is_dot_col = False
if truncate_h :
is_dot_col = ix == col_num + 1
if cwidth > 3 or is_dot_col :
my_str = '...'
else :
my_str = '..'
if ix == 0 :
dot_mode = 'left'
elif is_dot_col :
cwidth = self . adj . len ( strcols [ self . tr_size_col ] [ 0 ] )
dot_mode = 'center'
else :
dot_mode = 'right'
dot_str = self . adj . justify ( [ my_str ] , cwidth , mode = dot_mode ) [ 0 ]
strcols [ ix ] . insert ( row_num + n_header_rows , dot_str )
return strcols
|
def send_vdp_msg ( self , mode , mgrid , typeid , typeid_ver , vsiid_frmt , vsiid , filter_frmt , gid , mac , vlan , oui_id , oui_data , sw_resp ) :
"""Constructs and Sends the VDP Message .
Please refer http : / / www . ieee802 . org / 1 / pages / 802.1bg . html VDP
Section for more detailed information
: param mode : Associate or De - associate
: param mgrid : MGR ID
: param typeid : Type ID
: param typeid _ ver : Version of the Type ID
: param vsiid _ frmt : Format of the following VSI argument
: param vsiid : VSI value
: param filter _ frmt : Filter Format
: param gid : Group ID the vNIC belongs to
: param mac : MAC Address of the vNIC
: param vlan : VLAN of the vNIC
: param oui _ id : OUI Type
: param oui _ data : OUI Data
: param sw _ resp : Flag indicating if response is required from the daemon
: return reply : Reply from vdptool"""
|
if not self . is_ncb :
LOG . error ( "EVB cannot be set on NB" )
return
vdp_key_str = self . construct_vdp_dict ( mode , mgrid , typeid , typeid_ver , vsiid_frmt , vsiid , filter_frmt , gid , mac , vlan , oui_id , oui_data )
if len ( vdp_key_str ) == 0 :
LOG . error ( "NULL List" )
return
oui_cmd_str = self . gen_oui_str ( vdp_key_str [ 'oui_list' ] )
if sw_resp : # If filter is not VID and if VLAN is 0 , Query for the TLV first ,
# if found VDP will return the VLAN . Add support for this once
# vdptool has the support for querying exact VSI filters
# fixme ( padkrish )
reply = self . run_vdptool ( [ "-T" , "-i" , self . port_name , "-W" , "-V" , mode , "-c" , vdp_key_str [ 'mode' ] , "-c" , vdp_key_str [ 'mgrid' ] , "-c" , vdp_key_str [ 'typeid' ] , "-c" , vdp_key_str [ 'typeid_ver' ] , "-c" , vdp_key_str [ 'vsiid' ] , "-c" , "hints=none" , "-c" , vdp_key_str [ 'filter' ] ] , oui_args = oui_cmd_str )
else :
reply = self . run_vdptool ( [ "-T" , "-i" , self . port_name , "-V" , mode , "-c" , vdp_key_str [ 'mode' ] , "-c" , vdp_key_str [ 'mgrid' ] , "-c" , vdp_key_str [ 'typeid' ] , "-c" , vdp_key_str [ 'typeid_ver' ] , "-c" , vdp_key_str [ 'vsiid' ] , "-c" , "hints=none" , "-c" , vdp_key_str [ 'filter' ] ] , oui_args = oui_cmd_str )
return reply
|
def set_score ( submission_uuid , points_earned , points_possible , annotation_creator = None , annotation_type = None , annotation_reason = None ) :
"""Set a score for a particular submission .
Sets the score for a particular submission . This score is calculated
externally to the API .
Args :
submission _ uuid ( str ) : UUID for the submission ( must exist ) .
points _ earned ( int ) : The earned points for this submission .
points _ possible ( int ) : The total points possible for this particular student item .
annotation _ creator ( str ) : An optional field for recording who gave this particular score
annotation _ type ( str ) : An optional field for recording what type of annotation should be created ,
e . g . " staff _ override " .
annotation _ reason ( str ) : An optional field for recording why this score was set to its value .
Returns :
None
Raises :
SubmissionInternalError : Thrown if there was an internal error while
attempting to save the score .
SubmissionRequestError : Thrown if the given student item or submission
are not found .
Examples :
> > > set _ score ( " a778b933-9fb3-11e3-9c0f - 040ccee02800 " , 11 , 12)
' student _ item ' : 2,
' submission ' : 1,
' points _ earned ' : 11,
' points _ possible ' : 12,
' created _ at ' : datetime . datetime ( 2014 , 2 , 7 , 20 , 6 , 42 , 331156 , tzinfo = < UTC > )"""
|
try :
submission_model = _get_submission_model ( submission_uuid )
except Submission . DoesNotExist :
raise SubmissionNotFoundError ( u"No submission matching uuid {}" . format ( submission_uuid ) )
except DatabaseError :
error_msg = u"Could not retrieve submission {}." . format ( submission_uuid )
logger . exception ( error_msg )
raise SubmissionRequestError ( msg = error_msg )
score = ScoreSerializer ( data = { "student_item" : submission_model . student_item . pk , "submission" : submission_model . pk , "points_earned" : points_earned , "points_possible" : points_possible , } )
if not score . is_valid ( ) :
logger . exception ( score . errors )
raise SubmissionInternalError ( score . errors )
# When we save the score , a score summary will be created if
# it does not already exist .
# When the database ' s isolation level is set to repeatable - read ,
# it ' s possible for a score summary to exist for this student item ,
# even though we cannot retrieve it .
# In this case , we assume that someone else has already created
# a score summary and ignore the error .
# TODO : once we ' re using Django 1.8 , use transactions to ensure that these
# two models are saved at the same time .
try :
score_model = score . save ( )
_log_score ( score_model )
if annotation_creator is not None :
score_annotation = ScoreAnnotation ( score = score_model , creator = annotation_creator , annotation_type = annotation_type , reason = annotation_reason )
score_annotation . save ( )
# Send a signal out to any listeners who are waiting for scoring events .
score_set . send ( sender = None , points_possible = points_possible , points_earned = points_earned , anonymous_user_id = submission_model . student_item . student_id , course_id = submission_model . student_item . course_id , item_id = submission_model . student_item . item_id , created_at = score_model . created_at , )
except IntegrityError :
pass
|
def assess_car_t_validity ( job , gene_expression , univ_options , reports_options ) :
"""This function creates a report on the available clinical trials and scientific literature
available for the overexpressed genes in the specified tumor type .
It also gives a list of clinical trials available for other types of cancer with the same
overexpressed gene .
: param toil . fileStore . FileID gene _ expression : The resm gene expression
: param dict univ _ options : Dict of universal options used by almost all tools
: param dict reports _ options : Options specific to reporting modules
: return : The results of running assess _ car _ t _ validity
: rtype : toil . fileStore . FileID"""
|
work_dir = os . getcwd ( )
tumor_type = univ_options [ 'tumor_type' ]
input_files = { 'rsem_quant.tsv' : gene_expression , 'car_t_targets.tsv.tar.gz' : reports_options [ 'car_t_targets_file' ] }
input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False )
input_files [ 'car_t_targets.tsv' ] = untargz ( input_files [ 'car_t_targets.tsv.tar.gz' ] , work_dir )
target_data = pd . read_table ( input_files [ 'car_t_targets.tsv' ] , index_col = 0 )
patient_df = pd . read_csv ( 'rsem_quant.tsv' , sep = ' ' , delimiter = '\t' , header = 'infer' , index_col = 0 )
patient_df . index = ( patient_df . index ) . str . replace ( '\\..*$' , '' )
overexpressed = [ ]
# Check if the tumor has a corresponding normal
try :
tissue_of_origin = TCGAToGTEx [ tumor_type ]
except KeyError :
tissue_of_origin = 'NA'
# Write the report
with open ( 'car_t_target_report.txt' , 'w' ) as car_t_report : # print ( target _ data . index , file = car _ t _ report )
if tissue_of_origin in target_data . index :
print ( 'Available clinical trials for ' + str . lower ( tissue_of_origin ) + ' cancer with GTEX and TCGA median values' , file = car_t_report )
print ( ( '\t{:10}{:<10}{:<10}{:<10}{:<40}{:<12}\n' . format ( 'Gene' , 'GTEX' , 'TCGA N' , 'Observed' , 'DOI for gene papers' , 'Clinical Trials' ) ) , file = car_t_report )
collected_values = [ ]
# Get the gene name , GTEX , TCGA , and observed values
for index , row in target_data . iterrows ( ) :
if index == tissue_of_origin :
gene = row [ 'ENSG' ]
gtex = '{0:.2f}' . format ( float ( row [ 'GTEX' ] ) )
tcga = '{0:.2f}' . format ( float ( row [ 'TCGA' ] ) )
observed = '{0:.2f}' . format ( float ( patient_df . loc [ gene , 'TPM' ] ) ) if gene in patient_df . index else 'NA'
doi = row [ 'DOI' ]
target = str . upper ( row [ 'TARGET' ] )
clinical_trial = row [ 'Clinical trials' ]
collection = [ target , gtex , tcga , observed , doi , clinical_trial ]
collected_values . append ( collection )
if observed != 'NA' :
if float ( gtex ) <= float ( observed ) or float ( tcga ) <= float ( observed ) :
overexpressed . append ( gene )
collected_values = sorted ( collected_values , key = lambda col : float ( col [ 3 ] ) , reverse = True )
for entry in collected_values :
print ( ( '\t{:10}{:<10}{:<10}{:<10}{:<40}{:<12}' . format ( entry [ 0 ] , entry [ 1 ] , entry [ 2 ] , str ( entry [ 3 ] ) , entry [ 4 ] , entry [ 5 ] ) ) , file = car_t_report )
print ( '\nBased on the genes overexpressed in this cancer type, here\'s a list of clinical ' 'trials for other types of cancer' , file = car_t_report )
if len ( overexpressed ) != 0 : # Check if there are other clinical trials for other cancer types
print ( ( '\t{:10}{:<10}{:<10}{:<10}{:<40}{:<17}{:<20}\n' . format ( 'Gene' , 'GTEX' , 'TCGA N' , 'Observed' , 'DOI for gene papers' , 'Clinical Trials' , 'Cancer' ) ) , file = car_t_report )
other_trials = [ ]
for index , row in target_data . iterrows ( ) :
if row [ 'ENSG' ] in overexpressed and index != tissue_of_origin :
gene = row [ 'ENSG' ]
gtex = '{0:.2f}' . format ( float ( row [ 'GTEX' ] ) )
tcga = '{0:.2f}' . format ( float ( row [ 'TCGA' ] ) )
doi = row [ 'DOI' ]
target = str . upper ( row [ 'TARGET' ] )
observed = '{0:.2f}' . format ( float ( patient_df . loc [ gene , 'TPM' ] ) ) if gene in patient_df . index else 'NA'
collected_values = [ target , gtex , tcga , observed , doi , row [ 'Clinical trials' ] , index ]
other_trials . append ( collected_values )
other_trials = sorted ( other_trials , key = lambda col : col [ 0 ] )
for entry in other_trials :
print ( ( '\t{:10}{:<10}{:<10}{:<10}{:<40}{:<17}{:<20}' . format ( entry [ 0 ] , entry [ 1 ] , entry [ 2 ] , entry [ 3 ] , entry [ 4 ] , entry [ 5 ] , entry [ 6 ] ) ) , file = car_t_report )
else :
print ( "Data not available" , file = car_t_report )
else :
print ( 'Data not available for ' + tumor_type , file = car_t_report )
output_file = job . fileStore . writeGlobalFile ( car_t_report . name )
export_results ( job , output_file , car_t_report . name , univ_options , subfolder = 'reports' )
job . fileStore . logToMaster ( 'Ran car t validity assessment on %s successfully' % univ_options [ 'patient' ] )
return output_file
|
def fetch ( ) :
"""Downloads the Planck Collaboration ( 2013 ) dust map , placing it in the
default ` ` dustmaps ` ` data directory ."""
|
url = 'http://pla.esac.esa.int/pla/aio/product-action?MAP.MAP_ID=HFI_CompMap_ThermalDustModel_2048_R1.20.fits'
md5 = '8d804f4e64e709f476a63f0dfed1fd11'
fname = os . path . join ( data_dir ( ) , 'planck' , 'HFI_CompMap_ThermalDustModel_2048_R1.20.fits' )
fetch_utils . download_and_verify ( url , md5 , fname = fname )
|
def find_all_output_in_range ( self , ifo , currSeg , useSplitLists = False ) :
"""Return all files that overlap the specified segment ."""
|
if not useSplitLists : # Slower , but simpler method
outFiles = [ i for i in self if ifo in i . ifo_list ]
outFiles = [ i for i in outFiles if i . segment_list . intersects_segment ( currSeg ) ]
else : # Faster , but more complicated
# Basically only check if a subset of files intersects _ segment by
# using a presorted list . Sorting only happens once .
if not self . _check_split_list_validity ( ) : # FIXME : DO NOT hard code this .
self . _temporal_split_list ( 100 )
startIdx = int ( ( currSeg [ 0 ] - self . _splitListsStart ) / self . _splitListsStep )
# Add some small rounding here
endIdx = ( currSeg [ 1 ] - self . _splitListsStart ) / self . _splitListsStep
endIdx = int ( endIdx - 0.000001 )
outFiles = [ ]
for idx in range ( startIdx , endIdx + 1 ) :
if idx < 0 or idx >= self . _splitListsNum :
continue
outFilesTemp = [ i for i in self . _splitLists [ idx ] if ifo in i . ifo_list ]
outFiles . extend ( [ i for i in outFilesTemp if i . segment_list . intersects_segment ( currSeg ) ] )
# Remove duplicates
outFiles = list ( set ( outFiles ) )
return self . __class__ ( outFiles )
|
def cache_control ( max_age = None , private = False , public = False , s_maxage = None , must_revalidate = False , proxy_revalidate = False , no_cache = False , no_store = False ) :
"""Generate the value for a Cache - Control header .
Example :
> > > from rhino . http import cache _ control as cc
> > > from datetime import timedelta
> > > cc ( public = 1 , max _ age = 3600)
' public , max - age = 3600'
> > > cc ( public = 1 , max _ age = timedelta ( hours = 1 ) )
' public , max - age = 3600'
> > > cc ( private = True , no _ cache = True , no _ store = True )
' private , no - cache , no - store '"""
|
if all ( [ private , public ] ) :
raise ValueError ( "'private' and 'public' are mutually exclusive" )
if isinstance ( max_age , timedelta ) :
max_age = int ( total_seconds ( max_age ) )
if isinstance ( s_maxage , timedelta ) :
s_maxage = int ( total_seconds ( s_maxage ) )
directives = [ ]
if public :
directives . append ( 'public' )
if private :
directives . append ( 'private' )
if max_age is not None :
directives . append ( 'max-age=%d' % max_age )
if s_maxage is not None :
directives . append ( 's-maxage=%d' % s_maxage )
if no_cache :
directives . append ( 'no-cache' )
if no_store :
directives . append ( 'no-store' )
if must_revalidate :
directives . append ( 'must-revalidate' )
if proxy_revalidate :
directives . append ( 'proxy-revalidate' )
return ', ' . join ( directives )
|
def list_media_services ( access_token , subscription_id ) :
'''List the media services in a subscription .
Args :
access _ token ( str ) : A valid Azure authentication token .
subscription _ id ( str ) : Azure subscription id .
Returns :
HTTP response . JSON body .'''
|
endpoint = '' . join ( [ get_rm_endpoint ( ) , '/subscriptions/' , subscription_id , '/providers/microsoft.media/mediaservices?api-version=' , MEDIA_API ] )
return do_get ( endpoint , access_token )
|
def get_jobs ( when = None , only_scheduled = False ) :
"""Return a dictionary mapping of job names together with their respective
application class ."""
|
# FIXME : HACK : make sure the project dir is on the path when executed as . / manage . py
try :
cpath = os . path . dirname ( os . path . realpath ( sys . argv [ 0 ] ) )
ppath = os . path . dirname ( cpath )
if ppath not in sys . path :
sys . path . append ( ppath )
except Exception :
pass
_jobs = { }
for app_name in [ app . name for app in apps . get_app_configs ( ) ] :
scandirs = ( None , 'minutely' , 'quarter_hourly' , 'hourly' , 'daily' , 'weekly' , 'monthly' , 'yearly' )
if when :
scandirs = None , when
for subdir in scandirs :
try :
path = find_job_module ( app_name , subdir )
for name in find_jobs ( path ) :
if ( app_name , name ) in _jobs :
raise JobError ( "Duplicate job %s" % name )
job = import_job ( app_name , name , subdir )
if only_scheduled and job . when is None : # only include jobs which are scheduled
continue
if when and job . when != when : # generic job not in same schedule
continue
_jobs [ ( app_name , name ) ] = job
except ImportError : # No job module - - continue scanning
pass
return _jobs
|
def get_prefix ( self , key_prefix , ** kwargs ) :
"""Get a range of keys with a prefix .
: param key _ prefix : first key in range
: param keys _ only : if True , retrieve only the keys , not the values
: returns : sequence of ( value , metadata ) tuples"""
|
range_response = self . get_prefix_response ( key_prefix , ** kwargs )
for kv in range_response . kvs :
yield ( kv . value , KVMetadata ( kv , range_response . header ) )
|
def refresh_session ( self , session = None ) :
"""Return updated session if token has expired , attempts to
refresh using newly acquired token .
If a session object is provided , configure it directly . Otherwise ,
create a new session and return it .
: param session : The session to configure for authentication
: type session : requests . Session
: rtype : requests . Session ."""
|
if 'refresh_token' in self . token :
try :
token = self . _context . acquire_token_with_refresh_token ( self . token [ 'refresh_token' ] , self . id , self . resource , self . secret # This is needed when using Confidential Client
)
self . token = self . _convert_token ( token )
except adal . AdalError as err :
raise_with_traceback ( AuthenticationError , "" , err )
return self . signed_session ( session )
|
def user_info ( name , ** client_args ) :
'''Get information about given user .
name
Name of the user for which to get information .
CLI Example :
. . code - block : : bash
salt ' * ' influxdb . user _ info < name >'''
|
matching_users = ( user for user in list_users ( ** client_args ) if user . get ( 'user' ) == name )
try :
return next ( matching_users )
except StopIteration :
pass
|
def year_origin_filter ( year_predicate = None , origin_predicate = None ) :
"""Returns a predicate for cable identifiers where ` year _ predicate ` and
` origin _ predicate ` must hold true .
If ` year _ predicate ` and ` origin _ predicate ` is ` ` None ` ` the returned
predicate holds always true .
` year _ predicate `
A predicate which returns ` ` True ` ` or ` ` False ` ` for a cable
year .
` ` origin _ predicate `
A predicate which returns ` ` True ` ` or ` ` False ` ` for a given
cable origin"""
|
def accept ( cable_id , predicate ) :
year , origin = _YEAR_ORIGIN_PATTERN . match ( canonicalize_id ( cable_id ) ) . groups ( )
return predicate ( year , origin )
if year_predicate and origin_predicate :
return partial ( accept , predicate = lambda y , o : year_predicate ( y ) and origin_predicate ( o ) )
elif year_predicate :
return partial ( accept , predicate = lambda y , o : year_predicate ( y ) )
elif origin_predicate :
return partial ( accept , predicate = lambda y , o : origin_predicate ( o ) )
return lambda cable_id : True
|
def generic_visit ( self , pattern ) :
"""Check if the pattern match with the checked node .
a node match if :
- type match
- all field match"""
|
return ( isinstance ( pattern , type ( self . node ) ) and all ( self . field_match ( value , getattr ( pattern , field ) ) for field , value in iter_fields ( self . node ) ) )
|
def flush ( self ) :
"""Sends the current . batch to Cloud Bigtable .
For example :
. . literalinclude : : snippets . py
: start - after : [ START bigtable _ batcher _ flush ]
: end - before : [ END bigtable _ batcher _ flush ]"""
|
if len ( self . rows ) != 0 :
self . table . mutate_rows ( self . rows )
self . total_mutation_count = 0
self . total_size = 0
self . rows = [ ]
|
def _autorestart_store_components ( self , bundle ) : # type : ( Bundle ) - > None
"""Stores the components of the given bundle with the auto - restart
property
: param bundle : A Bundle object"""
|
with self . __instances_lock : # Prepare the list of components
store = self . __auto_restart . setdefault ( bundle , [ ] )
for stored_instance in self . __instances . values ( ) : # Get the factory name
factory = stored_instance . factory_name
if self . get_factory_bundle ( factory ) is bundle : # Factory from this bundle
# Test component properties
properties = stored_instance . context . properties
if properties . get ( constants . IPOPO_AUTO_RESTART ) : # Auto - restart property found
store . append ( ( factory , stored_instance . name , properties ) )
|
def output_files ( self ) :
"""Returns the list of output files from this rule .
Paths are generated from the outputs of this rule ' s dependencies , with
their paths translated based on prefix and strip _ prefix .
Returned paths are relative to buildroot ."""
|
for dep in self . subgraph . successors ( self . address ) :
dep_rule = self . subgraph . node [ dep ] [ 'target_obj' ]
for dep_file in dep_rule . output_files :
yield self . translate_path ( dep_file , dep_rule ) . lstrip ( '/' )
|
def set_logxticks ( self , row , column , logticks ) :
"""Manually specify the x - axis log tick values .
: param row , column : specify the subplot .
: param logticks : logarithm of the locations for the ticks along the
axis .
For example , if you specify [ 1 , 2 , 3 ] , ticks will be placed at 10,
100 and 1000."""
|
subplot = self . get_subplot_at ( row , column )
subplot . set_logxticks ( logticks )
|
def make_outpoint ( tx_id_le , index , tree = None ) :
'''byte - like , int , int - > Outpoint'''
|
if 'decred' in riemann . get_current_network_name ( ) :
return tx . DecredOutpoint ( tx_id = tx_id_le , index = utils . i2le_padded ( index , 4 ) , tree = utils . i2le_padded ( tree , 1 ) )
return tx . Outpoint ( tx_id = tx_id_le , index = utils . i2le_padded ( index , 4 ) )
|
def __create ( self , type , ** kwargs ) :
"""Call documentation : ` / user / mfa / create
< https : / / www . wepay . com / developer / reference / user - mfa # create > ` _ , plus
extra keyword parameter :
: keyword bool batch _ mode : turn on / off the batch _ mode , see
: class : ` wepay . api . WePay `
: keyword str batch _ reference _ id : ` reference _ id ` param for batch call ,
see : class : ` wepay . api . WePay `
: keyword str api _ version : WePay API version , see
: class : ` wepay . api . WePay `"""
|
params = { 'type' : type }
return self . make_call ( self . __create , params , kwargs )
|
def tolocal ( self ) :
"""Convert to local mode ."""
|
from thunder . series . readers import fromarray
if self . mode == 'local' :
logging . getLogger ( 'thunder' ) . warn ( 'images already in local mode' )
pass
return fromarray ( self . toarray ( ) , index = self . index , labels = self . labels )
|
def duplicate ( self , fullname , shortname , categoryid , visible = True , ** kwargs ) :
"""Duplicates an existing course with options .
Note : Can be very slow running .
: param string fullname : The new course ' s full name
: param string shortname : The new course ' s short name
: param string categoryid : Category new course should be created under
: keyword bool visible : Defaults to True . The new course ' s visiblity
: keyword bool activities : ( optional ) Defaults to True . Include course activites
: keyword bool blocks : ( optional ) Defaults to True . Include course blocks
: keyword bool filters : ( optional ) Defaults to True . Include course filters
: keyword bool users : ( optional ) Defaults to False . Include users
: keyword bool role _ assignments : ( optional ) Defaults to False . Include role assignments
: keyword bool comments : ( optional ) Defaults to False . Include user comments
: keyword bool usercompletion : ( optional ) Defaults to False . Include user course completion information
: keyword bool logs : ( optional ) Defaults to False . Include course logs
: keyword bool grade _ histories : ( optional ) Defaults to False . Include histories
: returns : response object
Example Usage : :
> > > import muddle
> > > muddle . course ( 10 ) . duplicate ( ' new - fullname ' , ' new - shortname ' , 20)"""
|
# TODO
# Ideally categoryid should be optional here and
# should default to catid of course being duplicated .
allowed_options = [ 'activities' , 'blocks' , 'filters' , 'users' , 'role_assignments' , 'comments' , 'usercompletion' , 'logs' , 'grade_histories' ]
if valid_options ( kwargs , allowed_options ) :
option_params = { }
for index , key in enumerate ( kwargs ) :
option_params . update ( { 'options[' + str ( index ) + '][name]' : key , 'options[' + str ( index ) + '][value]' : int ( kwargs . get ( key ) ) } )
params = { 'wsfunction' : 'core_course_duplicate_course' , 'courseid' : self . course_id , 'fullname' : fullname , 'shortname' : shortname , 'categoryid' : categoryid , 'visible' : int ( visible ) }
params . update ( option_params )
params . update ( self . request_params )
return requests . post ( self . api_url , params = params , verify = False )
|
def write ( self ) :
"""Restore GFF3 entry to original format
Returns :
str : properly formatted string containing the GFF3 entry"""
|
none_type = type ( None )
# Format attributes for writing
attrs = self . attribute_string ( )
# Place holder if field value is NoneType
for attr in self . __dict__ . keys ( ) :
if type ( attr ) == none_type :
setattr ( self , attr , '.' )
# Format entry for writing
fstr = '{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}{9}' . format ( self . seqid , self . source , self . type , str ( self . start ) , str ( self . end ) , self . _score_str , self . strand , self . phase , attrs , os . linesep )
return fstr
|
def wr_hdrs ( self , worksheet , row_idx ) :
"""Print row of column headers"""
|
for col_idx , hdr in enumerate ( self . hdrs ) : # print ( " ROW ( { R } ) COL ( { C } ) HDR ( { H } ) FMT ( { F } ) \ n " . format (
# R = row _ idx , C = col _ idx , H = hdr , F = self . fmt _ hdr ) )
worksheet . write ( row_idx , col_idx , hdr , self . fmt_hdr )
row_idx += 1
return row_idx
|
def wall ( self ) :
"""refresh the token from the API
then call a Wallabag instance
then store the token
: return : wall instance"""
|
us = UserService . objects . get ( user = self . user , name = 'ServiceWallabag' )
params = { 'client_id' : us . client_id , 'client_secret' : us . client_secret , 'username' : us . username , 'password' : us . password , }
try :
token = Wall . get_token ( host = us . host , ** params )
except Exception as e :
update_result ( self . trigger_id , msg = e , status = False )
logger . critical ( '{} {}' . format ( self . user , e ) )
return False
wall = Wall ( host = us . host , client_secret = us . client_secret , client_id = us . client_id , token = token )
UserService . objects . filter ( user = self . user , name = 'ServiceWallabag' ) . update ( token = token )
return wall
|
def create_agent ( self , configuration , agent_id = "" ) :
"""Create an agent .
: param dict configuration : Form given by the craftai documentation .
: param str agent _ id : Optional . The id of the agent to create . It
must be an str containing only characters in " a - zA - Z0-9 _ - " and
must be between 1 and 36 characters .
: default agent _ id : " " , the agent _ id is generated .
: return : agent created .
: rtype : dict .
: raise CraftAiBadRequestError : if the input is not of
the right form ."""
|
# Extra header in addition to the main session ' s
ct_header = { "Content-Type" : "application/json; charset=utf-8" }
# Building payload and checking that it is valid for a JSON
# serialization
payload = { "configuration" : configuration }
if agent_id != "" : # Raises an error when agent _ id is invalid
self . _check_agent_id ( agent_id )
payload [ "id" ] = agent_id
try :
json_pl = json . dumps ( payload )
except TypeError as err :
raise CraftAiBadRequestError ( "Invalid configuration or agent id given. {}" . format ( err . __str__ ( ) ) )
req_url = "{}/agents" . format ( self . _base_url )
resp = self . _requests_session . post ( req_url , headers = ct_header , data = json_pl )
agent = self . _decode_response ( resp )
return agent
|
def todict ( self ) :
"""Convert namedtuple to dict ."""
|
return OrderedDict ( ( name , self [ i ] ) for i , name in enumerate ( self . _fields ) )
|
def _infinite_iterator ( self ) :
"""this iterator wraps the " _ basic _ iterator " when the configuration
specifies that the " number _ of _ submissions " is set to " forever " .
Whenever the " _ basic _ iterator " is exhausted , it is called again to
restart the iteration . It is up to the implementation of the innermost
iterator to define what starting over means . Some iterators may
repeat exactly what they did before , while others may iterate over
new values"""
|
while True :
for crash_id in self . _basic_iterator ( ) :
if self . _filter_disallowed_values ( crash_id ) :
continue
yield crash_id
|
def from_bytes ( cls , bitstream ) :
r'''Parse the given packet and update properties accordingly
> > > data _ hex = ( ' 13000001ae92b5574f849cd00001ac10'
. . . ' 1f0300015cfe1cbd00200001ac101f01 ' )
> > > data = data _ hex . decode ( ' hex ' )
> > > message = ControlMessage . from _ bytes ( data )
> > > message . message _ type
> > > message . authoritative
False
> > > message . probe
True
> > > message . smr
True
> > > message . pitr
False
> > > message . smr _ invoked
False
> > > message . nonce
' \ xae \ x92 \ xb5WO \ x84 \ x9c \ xd0'
> > > message . source _ eid
IPv4Address ( u ' 172.16.31.3 ' )
> > > message . itr _ rlocs
[ IPv4Address ( u ' 92.254.28.189 ' ) ]
> > > message . eid _ prefixes
[ IPv4Network ( u ' 172.16.31.1/32 ' ) ]
> > > message . map _ reply'''
|
packet = cls ( )
# Convert to ConstBitStream ( if not already provided )
if not isinstance ( bitstream , ConstBitStream ) :
if isinstance ( bitstream , Bits ) :
bitstream = ConstBitStream ( auto = bitstream )
else :
bitstream = ConstBitStream ( bytes = bitstream )
# Read the message type
type_nr = bitstream . read ( 'uint:4' )
if type_nr != packet . message_type :
msg = 'Invalid bitstream for a {0} packet'
class_name = packet . __class__ . __name__
raise ValueError ( msg . format ( class_name ) )
# Read the flags
( packet . authoritative , map_data_present , packet . probe , packet . smr , packet . pitr , packet . smr_invoked ) = bitstream . readlist ( '6*bool' )
# Skip over reserved bits
packet . _reserved1 = bitstream . read ( 9 )
# Save the IRC until we reach the actual data
irc = bitstream . read ( 'uint:5' )
# Save the record count until we reach the actual data
record_count = bitstream . read ( 'uint:8' )
# Read the nonce
packet . nonce = bitstream . read ( 'bytes:8' )
# Read the source EID
packet . source_eid = read_afi_address_from_bitstream ( bitstream )
# Read the ITR RLOCs
for dummy in range ( irc + 1 ) :
itr_rloc = read_afi_address_from_bitstream ( bitstream )
packet . itr_rlocs . append ( itr_rloc )
# Read the EIDs
for dummy in range ( record_count ) : # A records begins with 8 reserved bits : skip
bitstream . read ( 8 )
# Read 8 bits for the prefix length
prefix_len = bitstream . read ( 'uint:8' )
# Then an AFI style prefix
eid_prefix = read_afi_address_from_bitstream ( bitstream , prefix_len )
packet . eid_prefixes . append ( eid_prefix )
# Read the map - reply record if present
if map_data_present :
packet . map_reply = MapReplyRecord . from_bytes ( bitstream )
# Verify that the properties make sense
packet . sanitize ( )
return packet
|
async def auth ( self ) :
"""Perform AirPlay device authentication ."""
|
credentials = await self . atv . airplay . generate_credentials ( )
await self . atv . airplay . load_credentials ( credentials )
try :
await self . atv . airplay . start_authentication ( )
pin = await _read_input ( self . loop , 'Enter PIN on screen: ' )
await self . atv . airplay . finish_authentication ( pin )
print ( 'You may now use these credentials:' )
print ( credentials )
return 0
except exceptions . DeviceAuthenticationError :
logging . exception ( 'Failed to authenticate - invalid PIN?' )
return 1
|
def get_params_parser ( ) :
"""Parse command line arguments"""
|
parser = argparse . ArgumentParser ( )
parser . add_argument ( "-e" , "--elastic_url" , default = "http://127.0.0.1:9200" , help = "Host with elastic search (default: http://127.0.0.1:9200)" )
parser . add_argument ( '-g' , '--debug' , dest = 'debug' , action = 'store_true' )
parser . add_argument ( '-t' , '--token' , dest = 'token' , help = "GitHub token" )
parser . add_argument ( '-o' , '--org' , dest = 'org' , nargs = '*' , help = 'GitHub Organization/s to be analyzed' )
parser . add_argument ( '-l' , '--list' , dest = 'list' , action = 'store_true' , help = 'Just list the repositories' )
parser . add_argument ( '-n' , '--nrepos' , dest = 'nrepos' , type = int , default = NREPOS , help = 'Number of GitHub repositories from the Organization to be analyzed (default:0, no limit)' )
parser . add_argument ( '--db-projects-map' , help = "Database to include the projects Mapping DB" )
return parser
|
def unset_iscsi_info ( self ) :
"""Disable iSCSI boot option in UEFI boot mode .
: raises : IloError , on an error from iLO .
: raises : IloCommandNotSupportedInBiosError , if the system is
in the BIOS boot mode ."""
|
if ( self . _is_boot_mode_uefi ( ) is True ) :
iscsi_info = { 'iSCSIBootEnable' : 'Disabled' }
self . _change_iscsi_settings ( iscsi_info )
else :
msg = 'iSCSI boot is not supported in the BIOS boot mode'
raise exception . IloCommandNotSupportedInBiosError ( msg )
|
def scale ( cls , * scaling ) :
"""Create a scaling transform from a scalar or vector .
: param scaling : The scaling factor . A scalar value will
scale in both dimensions equally . A vector scaling
value scales the dimensions independently .
: type scaling : float or sequence
: rtype : Affine"""
|
if len ( scaling ) == 1 :
sx = sy = float ( scaling [ 0 ] )
else :
sx , sy = scaling
return tuple . __new__ ( cls , ( sx , 0.0 , 0.0 , 0.0 , sy , 0.0 , 0.0 , 0.0 , 1.0 ) )
|
def write_pdb ( self , custom_name = '' , out_suffix = '' , out_dir = None , custom_selection = None , force_rerun = False ) :
"""Write a new PDB file for the Structure ' s FIRST MODEL .
Set custom _ selection to a PDB . Select class for custom SMCRA selections .
Args :
custom _ name : Filename of the new file ( without extension )
out _ suffix : Optional string to append to new PDB file
out _ dir : Optional directory to output the file
custom _ selection : Optional custom selection class
force _ rerun : If existing file should be overwritten
Returns :
out _ file : filepath of new PDB file"""
|
if not custom_selection :
custom_selection = ModelSelection ( [ 0 ] )
# If no output directory , custom name , or suffix is specified , add a suffix " _ new "
if not out_dir or not custom_name :
if not out_suffix :
out_suffix = '_new'
# Prepare the output file path
outfile = ssbio . utils . outfile_maker ( inname = self . structure_file , outname = custom_name , append_to_name = out_suffix , outdir = out_dir , outext = '.pdb' )
try :
if ssbio . utils . force_rerun ( flag = force_rerun , outfile = outfile ) :
self . save ( outfile , custom_selection )
except TypeError as e : # If trying to save something that can ' t be saved as a PDB ( example : 5iqr . cif ) , log an error and return None
# The error thrown by PDBIO . py is " TypeError : % c requires int or char "
log . error ( '{}: unable to save structure in PDB file format' . format ( self . structure_file ) )
raise TypeError ( e )
return outfile
|
def split ( examples , ratio = 0.8 ) :
"""Utility function that can be used within the parse ( ) implementation of
sub classes to split a list of example into two lists for training and
testing ."""
|
split = int ( ratio * len ( examples ) )
return examples [ : split ] , examples [ split : ]
|
def _make_chunk_size ( self , req_size ) :
"""Takes an allocation size as requested by the user and modifies it to be a suitable chunk size ."""
|
size = req_size
size += 2 * self . _chunk_size_t_size
# Two size fields
size = self . _chunk_min_size if size < self . _chunk_min_size else size
if size & self . _chunk_align_mask : # If the chunk would not be aligned
size = ( size & ~ self . _chunk_align_mask ) + self . _chunk_align_mask + 1
# Fix it
return size
|
def cermine_dois ( pdf_file , force_api = False , override_local = None ) :
"""Run ` CERMINE < https : / / github . com / CeON / CERMINE > ` _ to extract DOIs of cited papers from a PDF file .
. . note : :
Try to use a local CERMINE JAR file , and falls back to using the API . JAR file is expected to be found in ` ` libbmc / external / cermine . jar ` ` . You can override this using the ` ` override _ local ` ` parameter .
. . note : :
CERMINE JAR file can be found at ` < http : / / maven . icm . edu . pl / artifactory / simple / kdd - releases / pl / edu / icm / cermine / cermine - impl / > ` _ .
. . note : :
This fallback using the ` CERMINE API < http : / / cermine . ceon . pl / about . html > ` _ , and hence , uploads the PDF file ( so uses network ) . Check out the CERMINE API terms .
. . note : :
This function uses CERMINE to extract references from the paper , and try to match them on Crossref to get DOIs .
: param pdf _ file : Path to the PDF file to handle .
: param force _ api : Force the use of the Cermine API ( and do not try to use a local JAR file ) . Defaults to ` ` False ` ` .
: param override _ local : Use this specific JAR file , instead of the one at the default location ( ` ` libbmc / external / cermine . jar ` ` ) .
: returns : A dict of cleaned plaintext citations and their associated DOI ."""
|
# TODO :
# * Do not convert to plain text , but use the extra metadata from
# CERMINE
# Call CERMINE on the PDF file
cermine_output = cermine ( pdf_file , force_api , override_local )
# Parse the resulting XML
root = ET . fromstring ( cermine_output )
plaintext_references = [ # Remove extra whitespaces
tools . clean_whitespaces ( # Convert XML element to string , discarding any leading " [ n ] "
ET . tostring ( e , method = "text" ) . decode ( "utf-8" ) . replace ( e . text , "" ) ) for e in root . iter ( "mixed-citation" ) ]
# Call the plaintext methods to fetch DOIs
return plaintext . get_cited_dois ( plaintext_references )
|
def steem_to_sbd ( self , steemamt = 0 , price = 0 , account = None ) :
'''Uses the ticker to get the highest bid
and moves the steem at that price .'''
|
if not account :
account = self . mainaccount
if self . check_balances ( account ) :
if steemamt == 0 :
steemamt = self . steembal
elif steemamt > self . steembal :
self . msg . error_message ( "INSUFFICIENT FUNDS. CURRENT STEEM BAL: " + str ( self . steembal ) )
return False
if price == 0 :
price = self . dex_ticker ( ) [ 'highest_bid' ]
try :
self . dex . sell ( steemamt , "STEEM" , price , account = account )
except Exception as e :
self . msg . error_message ( "COULD NOT SELL STEEM FOR SBD: " + str ( e ) )
return False
else :
self . msg . message ( "TRANSFERED " + str ( steemamt ) + " STEEM TO SBD AT THE PRICE OF: $" + str ( price ) )
return True
else :
return False
|
def _set_properties ( self , resource ) :
"""Update properties from resource in body of ` ` api _ response ` `
: type resource : dict
: param resource : variable representation returned from the API ."""
|
self . _properties . clear ( )
cleaned = resource . copy ( )
if "name" in cleaned :
self . name = variable_name_from_full_name ( cleaned . pop ( "name" ) )
self . _properties . update ( cleaned )
|
def plot_power_factor_mu ( self , temp = 600 , output = 'eig' , relaxation_time = 1e-14 , xlim = None ) :
"""Plot the power factor in function of Fermi level . Semi - log plot
Args :
temp : the temperature
xlim : a list of min and max fermi energy by default ( 0 , and band
gap )
tau : A relaxation time in s . By default none and the plot is by
units of relaxation time
Returns :
a matplotlib object"""
|
import matplotlib . pyplot as plt
plt . figure ( figsize = ( 9 , 7 ) )
pf = self . _bz . get_power_factor ( relaxation_time = relaxation_time , output = output , doping_levels = False ) [ temp ]
plt . semilogy ( self . _bz . mu_steps , pf , linewidth = 3.0 )
self . _plot_bg_limits ( )
self . _plot_doping ( temp )
if output == 'eig' :
plt . legend ( [ 'PF$_1$' , 'PF$_2$' , 'PF$_3$' ] )
if xlim is None :
plt . xlim ( - 0.5 , self . _bz . gap + 0.5 )
else :
plt . xlim ( xlim )
plt . ylabel ( "Power factor, ($\\mu$W/(mK$^2$))" , fontsize = 30.0 )
plt . xlabel ( "E-E$_f$ (eV)" , fontsize = 30.0 )
plt . xticks ( fontsize = 25 )
plt . yticks ( fontsize = 25 )
plt . tight_layout ( )
return plt
|
def filter ( args ) :
"""% prog filter test . blast
Produce a new blast file and filter based on :
- score : > = cutoff
- pctid : > = cutoff
- hitlen : > = cutoff
- evalue : < = cutoff
- ids : valid ids
Use - - inverse to obtain the complementary records for the criteria above .
- noself : remove self - self hits"""
|
p = OptionParser ( filter . __doc__ )
p . add_option ( "--score" , dest = "score" , default = 0 , type = "int" , help = "Score cutoff" )
p . set_align ( pctid = 95 , hitlen = 100 , evalue = .01 )
p . add_option ( "--noself" , default = False , action = "store_true" , help = "Remove self-self hits" )
p . add_option ( "--ids" , help = "Path to file with ids to retain" )
p . add_option ( "--inverse" , default = False , action = "store_true" , help = "Similar to grep -v, inverse" )
p . set_outfile ( outfile = None )
opts , args = p . parse_args ( args )
if len ( args ) != 1 :
sys . exit ( not p . print_help ( ) )
if opts . ids :
ids = set ( )
for row in must_open ( opts . ids ) :
if row [ 0 ] == "#" :
continue
row = row . replace ( "," , "\t" )
ids . update ( row . split ( ) )
else :
ids = None
blastfile , = args
inverse = opts . inverse
outfile = opts . outfile
fp = must_open ( blastfile )
score , pctid , hitlen , evalue , noself = opts . score , opts . pctid , opts . hitlen , opts . evalue , opts . noself
newblastfile = blastfile + ".P{0}L{1}" . format ( int ( pctid ) , hitlen ) if outfile is None else outfile
if inverse :
newblastfile += ".inverse"
fw = must_open ( newblastfile , "w" )
for row in fp :
if row [ 0 ] == '#' :
continue
c = BlastLine ( row )
if ids :
if c . query in ids and c . subject in ids :
noids = False
else :
noids = True
else :
noids = None
remove = c . score < score or c . pctid < pctid or c . hitlen < hitlen or c . evalue > evalue or noids
if inverse :
remove = not remove
remove = remove or ( noself and c . query == c . subject )
if not remove :
print ( row . rstrip ( ) , file = fw )
fw . close ( )
return newblastfile
|
def _close ( self ) :
"""Close connection to remote host ."""
|
if self . _process is None :
return
self . quit ( )
self . _process . stdin . close ( )
logger . debug ( "Waiting for ssh process to finish..." )
self . _process . wait ( )
# Wait for ssh session to finish .
# self . _ process . terminate ( )
# self . _ process . kill ( )
self . _process = None
|
def predicatesOut ( G : Graph , n : Node ) -> Set [ TriplePredicate ] :
"""predicatesOut ( G , n ) is the set of predicates in arcsOut ( G , n ) ."""
|
return { p for p , _ in G . predicate_objects ( n ) }
|
def increment ( self , member , amount = 1 ) :
"""Increment the score of ` ` member ` ` by ` ` amount ` ` ."""
|
self . _dict [ member ] += amount
return self . _dict [ member ]
|
def __run_embedded ( db_name , argv ) :
"""Runs the Database device server embeded in another TANGO Database
( just like any other TANGO device server )"""
|
__monkey_patch_database_class ( )
run ( ( DataBase , ) , args = argv , util = util , green_mode = GreenMode . Gevent )
|
def calmarnorm ( sharpe , T , tau = 1.0 ) :
'''Multiplicator for normalizing calmar ratio to period tau'''
|
return calmar ( sharpe , tau ) / calmar ( sharpe , T )
|
def get_instance ( self , payload ) :
"""Build an instance of TollFreeInstance
: param dict payload : Payload response from the API
: returns : twilio . rest . api . v2010 . account . available _ phone _ number . toll _ free . TollFreeInstance
: rtype : twilio . rest . api . v2010 . account . available _ phone _ number . toll _ free . TollFreeInstance"""
|
return TollFreeInstance ( self . _version , payload , account_sid = self . _solution [ 'account_sid' ] , country_code = self . _solution [ 'country_code' ] , )
|
def vswitch_delete ( self , vswitch_name , persist = True ) :
"""Delete vswitch .
: param str name : the vswitch name
: param bool persist : whether delete the vswitch from the permanent
configuration for the system"""
|
self . _networkops . delete_vswitch ( vswitch_name , persist )
|
def aes_b64_decrypt ( value , secret , block_size = AES . block_size ) :
"""AES decrypt @ value with @ secret using the | CFB | mode of AES
with a cryptographically secure initialization vector .
- > ( # str ) AES decrypted @ value
from vital . security import aes _ encrypt , aes _ decrypt
aes _ encrypt ( " Hello , world " ,
" aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW " )
# - > ' zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI + LNqNcmZhw = '
aes _ decrypt (
" zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI + LNqNcmZhw = " ,
" aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW " )
# - > ' Hello , world '"""
|
if value is not None :
iv = value [ : block_size ]
cipher = AES . new ( secret [ : 32 ] , AES . MODE_CFB , iv )
return cipher . decrypt ( b64decode ( uniorbytes ( value [ block_size * 2 : ] , bytes ) ) ) . decode ( 'utf-8' )
|
def _CheckIsSocket ( self , file_entry ) :
"""Checks the is _ socket find specification .
Args :
file _ entry ( FileEntry ) : file entry .
Returns :
bool : True if the file entry matches the find specification , False if not ."""
|
if definitions . FILE_ENTRY_TYPE_SOCKET not in self . _file_entry_types :
return False
return file_entry . IsSocket ( )
|
def saveSettings ( self , groupName = None ) :
"""Writes the registry items into the persistent settings store ."""
|
groupName = groupName if groupName else self . settingsGroupName
settings = QtCore . QSettings ( )
logger . info ( "Saving {} to: {}" . format ( groupName , settings . fileName ( ) ) )
settings . remove ( groupName )
# start with a clean slate
settings . beginGroup ( groupName )
try :
for itemNr , item in enumerate ( self . items ) :
key = "item-{:03d}" . format ( itemNr )
value = repr ( item . asDict ( ) )
settings . setValue ( key , value )
finally :
settings . endGroup ( )
|
def DeserializeFromBufer ( buffer , offset = 0 ) :
"""Deserialize object instance from the specified buffer .
Args :
buffer ( bytes , bytearray , BytesIO ) : ( Optional ) data to create the stream from .
offset : UNUSED
Returns :
Transaction :"""
|
mstream = StreamManager . GetStream ( buffer )
reader = BinaryReader ( mstream )
tx = Transaction . DeserializeFrom ( reader )
StreamManager . ReleaseStream ( mstream )
return tx
|
def prox_l0 ( v , alpha ) :
r"""Compute the proximal operator of the : math : ` \ ell _ 0 ` " norm " ( hard
thresholding )
. . math : :
\ mathrm { prox } _ { \ alpha f } ( v ) = \ mathcal { S } _ { 0 , \ alpha } ( \ mathbf { v } )
= \ left \ { \ begin { array } { ccc } 0 & \ text { if } &
| v | < \ sqrt { 2 \ alpha } \ \ v & \ text { if } &
| v | \ geq \ sqrt { 2 \ alpha } \ end { array } \ right . \ ; ,
where : math : ` f ( \ mathbf { x } ) = \ | \ mathbf { x } \ | _ 0 ` . The approach taken
here is to start with the definition of the : math : ` \ ell _ 0 ` " norm "
and derive the corresponding proximal operator . Note , however , that
some authors ( e . g . see Sec . 2.3 of : cite : ` kowalski - 2014 - thresholding ` )
start by defining the hard thresholding rule and then derive the
corresponding penalty function , which leads to a simpler form for
the thresholding rule and a more complicated form for the penalty
function .
Unlike the corresponding : func : ` norm _ l0 ` , there is no need for an
` axis ` parameter since the proximal operator of the : math : ` \ ell _ 0 `
norm is the same when taken independently over each element , or
over their sum .
Parameters
v : array _ like
Input array : math : ` \ mathbf { v } `
alpha : float or array _ like
Parameter : math : ` \ alpha `
Returns
x : ndarray
Output array"""
|
return ( np . abs ( v ) >= np . sqrt ( 2.0 * alpha ) ) * v
|
def deploy_directory ( directory , auth = None ) :
"""Deploy all files in a given directory .
: param str directory : the path to a directory
: param tuple [ str ] auth : A pair of ( str username , str password ) to give to the auth keyword of the constructor of
: class : ` artifactory . ArtifactoryPath ` . Defaults to the result of : func : ` get _ arty _ auth ` ."""
|
for file in os . listdir ( directory ) :
full_path = os . path . join ( directory , file )
if file . endswith ( BELANNO_EXTENSION ) :
name = file [ : - len ( BELANNO_EXTENSION ) ]
log . info ( 'deploying annotation %s' , full_path )
deploy_annotation ( full_path , name , auth = auth )
elif file . endswith ( BELNS_EXTENSION ) :
name = file [ : - len ( BELNS_EXTENSION ) ]
log . info ( 'deploying namespace %s' , full_path )
deploy_namespace ( full_path , name , auth = auth )
elif file . endswith ( BEL_EXTENSION ) :
name = file [ : - len ( BEL_EXTENSION ) ]
log . info ( 'deploying knowledge %s' , full_path )
deploy_knowledge ( full_path , name , auth = auth )
else :
log . debug ( 'not deploying %s' , full_path )
|
def fw_rule_create ( self , data , fw_name = None , cache = False ) :
"""Top level rule creation routine ."""
|
LOG . debug ( "FW Rule create %s" , data )
self . _fw_rule_create ( fw_name , data , cache )
|
def _set_environment_variables ( self ) :
"""Initializes the correct environment variables for spark"""
|
cmd = [ ]
# special case for driver JVM properties .
self . _set_launcher_property ( "driver-memory" , "spark.driver.memory" )
self . _set_launcher_property ( "driver-library-path" , "spark.driver.extraLibraryPath" )
self . _set_launcher_property ( "driver-class-path" , "spark.driver.extraClassPath" )
self . _set_launcher_property ( "driver-java-options" , "spark.driver.extraJavaOptions" )
self . _set_launcher_property ( "executor-memory" , "spark.executor.memory" )
self . _set_launcher_property ( "executor-cores" , "spark.executor.cores" )
for key , val in self . _spark_launcher_args . items ( ) :
if val is None :
continue
val = list ( as_iterable ( val ) )
if len ( val ) :
if key in self . _boolean_args :
cmd . append ( "--{key}" . format ( key = key ) )
else :
sep = self . _spark_launcher_arg_sep . get ( key , ',' )
cmd . append ( '--{key} {val}' . format ( key = key , val = sep . join ( str ( x ) for x in val ) ) )
cmd += [ 'pyspark-shell' ]
cmd_line = ' ' . join ( x for x in cmd if x )
os . environ [ "PYSPARK_SUBMIT_ARGS" ] = cmd_line
log . info ( "spark-submit arguments: %s" , cmd_line )
|
def handle ( self , * args , ** options ) :
"""Command handle ."""
|
models . Observer . objects . all ( ) . delete ( )
models . Subscriber . objects . all ( ) . delete ( )
for cache_key in cache . keys ( search = '{}*' . format ( THROTTLE_CACHE_PREFIX ) ) :
cache . delete ( cache_key )
|
def set_attributes ( self , doc , fields , parent_type = None ) :
"""Fields are specified as a list so that order is preserved for display
purposes only . ( Might be used for certain serialization formats . . . )
: param str doc : Description of type .
: param list ( Field ) fields : Ordered list of fields for type .
: param Optional [ Composite ] parent _ type : The type this type inherits
from ."""
|
self . raw_doc = doc
self . doc = doc_unwrap ( doc )
self . fields = fields
self . parent_type = parent_type
self . _raw_examples = OrderedDict ( )
self . _examples = OrderedDict ( )
self . _fields_by_name = { }
# Dict [ str , Field ]
# Check that no two fields share the same name .
for field in self . fields :
if field . name in self . _fields_by_name :
orig_lineno = self . _fields_by_name [ field . name ] . _ast_node . lineno
raise InvalidSpec ( "Field '%s' already defined on line %s." % ( field . name , orig_lineno ) , field . _ast_node . lineno )
self . _fields_by_name [ field . name ] = field
# Check that the fields for this type do not match any of the fields of
# its parents .
cur_type = self . parent_type
while cur_type :
for field in self . fields :
if field . name in cur_type . _fields_by_name :
lineno = cur_type . _fields_by_name [ field . name ] . _ast_node . lineno
raise InvalidSpec ( "Field '%s' already defined in parent '%s' on line %d." % ( field . name , cur_type . name , lineno ) , field . _ast_node . lineno )
cur_type = cur_type . parent_type
# Import namespaces containing any custom annotations
# Note : we don ' t need to do this for builtin annotations because
# they are treated as globals at the IR level
for field in self . fields :
for annotation in field . custom_annotations : # first , check the annotation * type *
if annotation . annotation_type . namespace . name != self . namespace . name :
self . namespace . add_imported_namespace ( annotation . annotation_type . namespace , imported_annotation_type = True )
# second , check if we need to import the annotation itself
# the annotation namespace is currently not actually used in the
# backends , which reconstruct the annotation from the annotation
# type directly . This could be changed in the future , and at
# the IR level it makes sense to include the dependency
if annotation . namespace . name != self . namespace . name :
self . namespace . add_imported_namespace ( annotation . namespace , imported_annotation = True )
# Indicate that the attributes of the type have been populated .
self . _is_forward_ref = False
|
def load_act_node ( self ) -> ActNode :
"""Raises :
ValidationError : AAA01 when no act block is found and AAA02 when
multiple act blocks are found ."""
|
act_nodes = ActNode . build_body ( self . node . body )
if not act_nodes :
raise ValidationError ( self . first_line_no , self . node . col_offset , 'AAA01 no Act block found in test' )
# Allow ` pytest . raises ` and ` self . assertRaises ( ) ` in assert nodes - if
# any of the additional nodes are ` pytest . raises ` , then raise
for a_n in act_nodes [ 1 : ] :
if a_n . block_type in [ ActNodeType . marked_act , ActNodeType . result_assignment ] :
raise ValidationError ( self . first_line_no , self . node . col_offset , 'AAA02 multiple Act blocks found in test' , )
return act_nodes [ 0 ]
|
def benchmark_forward ( self ) :
"""Benchmark forward execution ."""
|
self . _setup ( )
def f ( ) :
self . _forward ( )
self . mod_ext . synchronize ( ** self . ext_kwargs )
f ( )
# Ignore first
self . forward_stat = self . _calc_benchmark_stat ( f )
|
def flatten_reducer ( flattened_list : list , entry : typing . Union [ list , tuple , COMPONENT ] ) -> list :
"""Flattens a list of COMPONENT instances to remove any lists or tuples
of COMPONENTS contained within the list
: param flattened _ list :
The existing flattened list that has been populated from previous
calls of this reducer function
: param entry :
An entry to be reduced . Either a COMPONENT instance or a list / tuple
of COMPONENT instances
: return :
The flattened list with the entry flatly added to it"""
|
if hasattr ( entry , 'includes' ) and hasattr ( entry , 'files' ) :
flattened_list . append ( entry )
elif entry :
flattened_list . extend ( entry )
return flattened_list
|
def _substitute ( self , var_map , safe = False ) :
"""Implementation of : meth : ` substitute ` .
For internal use , the ` safe ` keyword argument allows to perform a
substitution on the ` args ` and ` kwargs ` of the expression only ,
guaranteeing that the type of the expression does not change , at the
cost of possibly not returning a maximally simplified expression . The
` safe ` keyword is not handled recursively , i . e . any ` args ` / ` kwargs `
will be fully simplified , possibly changing their types ."""
|
if self in var_map :
if not safe or ( type ( var_map [ self ] ) == type ( self ) ) :
return var_map [ self ]
if isinstance ( self . __class__ , Singleton ) :
return self
new_args = [ substitute ( arg , var_map ) for arg in self . args ]
new_kwargs = { key : substitute ( val , var_map ) for ( key , val ) in self . kwargs . items ( ) }
if safe :
return self . __class__ ( * new_args , ** new_kwargs )
else :
return self . create ( * new_args , ** new_kwargs )
|
def plot_ebands_with_edos ( self , dos_pos = 0 , method = "gaussian" , step = 0.01 , width = 0.1 , ** kwargs ) :
"""Plot the band structure and the DOS .
Args :
dos _ pos : Index of the task from which the DOS should be obtained ( note : 0 refers to the first DOS task ) .
method : String defining the method for the computation of the DOS .
step : Energy step ( eV ) of the linear mesh .
width : Standard deviation ( eV ) of the gaussian .
kwargs : Keyword arguments passed to ` plot _ with _ edos ` method to customize the plot .
Returns :
` matplotlib ` figure ."""
|
with self . nscf_task . open_gsr ( ) as gsr :
gs_ebands = gsr . ebands
with self . dos_tasks [ dos_pos ] . open_gsr ( ) as gsr :
dos_ebands = gsr . ebands
edos = dos_ebands . get_edos ( method = method , step = step , width = width )
return gs_ebands . plot_with_edos ( edos , ** kwargs )
|
def MODE ( self , setmode ) :
"""Set mode ."""
|
set_data = True
mode = None
if setmode == self . AUTO_MODE :
mode = 'AUTO_MODE'
elif setmode == self . MANU_MODE :
mode = 'MANU_MODE'
set_data = self . get_set_temperature ( )
elif setmode == self . BOOST_MODE :
mode = 'BOOST_MODE'
elif setmode == self . COMFORT_MODE :
mode = 'COMFORT_MODE'
elif setmode == self . LOWERING_MODE :
mode = 'LOWERING_MODE'
else :
LOG . warning ( "Thermostat.MODE.setter: Invalid mode: %s" % str ( setmode ) )
if mode :
self . actionNodeData ( mode , set_data )
|
def _wait_for_status ( linode_id , status = None , timeout = 300 , quiet = True ) :
'''Wait for a certain status from Linode .
linode _ id
The ID of the Linode to wait on . Required .
status
The status to look for to update .
timeout
The amount of time to wait for a status to update .
quiet
Log status updates to debug logs when False . Otherwise , logs to info .'''
|
if status is None :
status = _get_status_id_by_name ( 'brand_new' )
status_desc_waiting = _get_status_descr_by_id ( status )
interval = 5
iterations = int ( timeout / interval )
for i in range ( 0 , iterations ) :
result = get_linode ( kwargs = { 'linode_id' : linode_id } )
if result [ 'STATUS' ] == status :
return True
status_desc_result = _get_status_descr_by_id ( result [ 'STATUS' ] )
time . sleep ( interval )
log . log ( logging . INFO if not quiet else logging . DEBUG , 'Status for Linode %s is \'%s\', waiting for \'%s\'.' , linode_id , status_desc_result , status_desc_waiting )
return False
|
def id_field_name ( cls ) :
"""If only one primary _ key , then return it . Otherwise , raise ValueError ."""
|
if cls . _cache_id_field_name is None :
pk_names = cls . pk_names ( )
if len ( pk_names ) == 1 :
cls . _cache_id_field_name = pk_names [ 0 ]
else : # pragma : no cover
raise ValueError ( "{classname} has more than 1 primary key!" . format ( classname = cls . __name__ ) )
return cls . _cache_id_field_name
|
def ExportToDjangoView ( request ) :
"""Exports / metrics as a Django view .
You can use django _ prometheus . urls to map / metrics to this view ."""
|
if 'prometheus_multiproc_dir' in os . environ :
registry = prometheus_client . CollectorRegistry ( )
multiprocess . MultiProcessCollector ( registry )
else :
registry = prometheus_client . REGISTRY
metrics_page = prometheus_client . generate_latest ( registry )
return HttpResponse ( metrics_page , content_type = prometheus_client . CONTENT_TYPE_LATEST )
|
def neighbors ( self , key ) :
"""Return dict of neighbor atom index and connecting bond ."""
|
return { n : attr [ "bond" ] for n , attr in self . graph [ key ] . items ( ) }
|
def render_context_with_title ( self , context ) :
"""Render a page title and insert it into the context .
This function takes in a context dict and uses it to render the
page _ title variable . It then appends this title to the context using
the ' page _ title ' key . If there is already a page _ title key defined in
context received then this function will do nothing ."""
|
if "page_title" not in context :
con = template . Context ( context )
# NOTE ( sambetts ) : Use force _ text to ensure lazy translations
# are handled correctly .
temp = template . Template ( encoding . force_text ( self . page_title ) )
context [ "page_title" ] = temp . render ( con )
return context
|
def fire ( self , * args , ** kwargs ) :
"""Emit the signal , calling all coroutines in - line with the given
arguments and in the order they were registered .
This is obviously a coroutine .
Instead of calling : meth : ` fire ` explicitly , the ad - hoc signal object
itself can be called , too ."""
|
for token , coro in list ( self . _connections . items ( ) ) :
keep = yield from coro ( * args , ** kwargs )
if not keep :
del self . _connections [ token ]
|
def _login ( session ) :
"""Login .
Use Selenium webdriver to login . USPS authenticates users
in part by a key generated by complex , obfuscated client - side
Javascript , which can ' t ( easily ) be replicated in Python .
Invokes the webdriver once to perform login , then uses the
resulting session cookies with a standard Python ` requests `
session ."""
|
_LOGGER . debug ( "attempting login" )
session . cookies . clear ( )
try :
session . remove_expired_responses ( )
except AttributeError :
pass
try :
driver = _get_driver ( session . auth . driver )
except WebDriverException as exception :
raise USPSError ( str ( exception ) )
driver . get ( LOGIN_URL )
username = driver . find_element_by_name ( 'username' )
username . send_keys ( session . auth . username )
password = driver . find_element_by_name ( 'password' )
password . send_keys ( session . auth . password )
driver . find_element_by_id ( 'btn-submit' ) . click ( )
try :
WebDriverWait ( driver , LOGIN_TIMEOUT ) . until ( EC . title_is ( WELCOME_TITLE ) )
except TimeoutException :
raise USPSError ( 'login failed' )
for cookie in driver . get_cookies ( ) :
session . cookies . set ( name = cookie [ 'name' ] , value = cookie [ 'value' ] )
_save_cookies ( session . cookies , session . auth . cookie_path )
|
def sources ( self ) :
"""Returns a dictionary of source methods found on this object ,
keyed on method name . Source methods are identified by
( self , context ) arguments on this object . For example :
. . code - block : : python
def f ( self , context ) :
is a source method , but
. . code - block : : python
def f ( self , ctx ) :
is not ."""
|
try :
return self . _sources
except AttributeError :
self . _sources = find_sources ( self )
return self . _sources
|
def record_command ( self , cmd , prg = '' ) :
"""record the command passed - this is usually the name of the program
being run or task being run"""
|
self . _log ( self . logFileCommand , force_to_string ( cmd ) , prg )
|
def set_courses ( self , course_ids ) :
"""Sets the courses .
arg : course _ ids ( osid . id . Id [ ] ) : the course ` ` Ids ` `
raise : InvalidArgument - ` ` course _ ids ` ` is invalid
raise : NullArgument - ` ` course _ ids ` ` is ` ` null ` `
raise : NoAccess - ` ` Metadata . isReadOnly ( ) ` ` is ` ` true ` `
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for osid . learning . ActivityForm . set _ assets _ template
if not isinstance ( course_ids , list ) :
raise errors . InvalidArgument ( )
if self . get_courses_metadata ( ) . is_read_only ( ) :
raise errors . NoAccess ( )
idstr_list = [ ]
for object_id in course_ids :
if not self . _is_valid_id ( object_id ) :
raise errors . InvalidArgument ( )
idstr_list . append ( str ( object_id ) )
self . _my_map [ 'courseIds' ] = idstr_list
|
def cmd_export_all ( * args ) :
"""Arguments :
< output folder > [ - - [ - - quality < 0-100 > ] [ - - page _ format < page _ format > ] ]
Export all documents as PDF files .
Default quality is 50.
Default page format is A4.
Possible JSON replies :
" status " : " error " , " exception " : " yyy " ,
" reason " : " xxxx " , " args " : " ( xxxx , ) "
" status " : " ok " ,
" docids " : [
[ " xxx " , " file : / / / tmp / xxx . pdf " ] ,
[ " yyy " , " file : / / / tmp / yyy . pdf " ] ,
[ " zzz " , " file : / / / tmp / zzz . pdf " ]
" output _ dir " : " file : / / / tmp " ,"""
|
( output_dir , quality , page_format ) = _get_export_params ( args )
dsearch = get_docsearch ( )
try :
os . mkdir ( output_dir )
except FileExistsError : # NOQA ( Python 3 . x only )
pass
out = [ ]
docs = [ d for d in dsearch . docs ]
docs . sort ( key = lambda doc : doc . docid )
output_dir = FS . safe ( output_dir )
for ( doc_idx , doc ) in enumerate ( docs ) :
output_pdf = FS . join ( output_dir , doc . docid + ".pdf" )
exporter = doc . build_exporter ( file_format = "pdf" )
if exporter . can_change_quality :
exporter . set_quality ( quality )
if exporter . can_select_format :
exporter . set_page_format ( page_format )
verbose ( "[{}/{}] Exporting {} --> {} ..." . format ( doc_idx + 1 , len ( docs ) , doc . docid , output_pdf ) )
exporter . save ( output_pdf )
out . append ( ( doc . docid , output_pdf ) )
doc = None
gc . collect ( )
verbose ( "Done" )
reply ( { "docids" : out , "output_dir" : output_dir , } )
|
def render ( template , ** context ) :
'''Render a template with uData frontend specifics
* Theme'''
|
theme = current_app . config [ 'THEME' ]
return render_theme_template ( get_theme ( theme ) , template , ** context )
|
def doOutages ( self ) :
"""Applies branch outtages ."""
|
assert len ( self . branchOutages ) == len ( self . market . case . branches )
weights = [ [ ( False , r ) , ( True , 1 - ( r ) ) ] for r in self . branchOutages ]
for i , ln in enumerate ( self . market . case . branches ) :
ln . online = weighted_choice ( weights [ i ] )
if ln . online == False :
print "Branch outage [%s] in period %d." % ( ln . name , self . stepid )
|
def set_input_shape_ngpu ( self , new_input_shape ) :
"""Create and initialize layer parameters on the device previously set
in self . device _ name .
: param new _ input _ shape : a list or tuple for the shape of the input ."""
|
assert self . device_name , "Device name has not been set."
device_name = self . device_name
if self . input_shape is None : # First time setting the input shape
self . input_shape = [ None ] + [ int ( d ) for d in list ( new_input_shape ) ]
if device_name in self . params_device : # There is a copy of weights on this device
self . __dict__ . update ( self . params_device [ device_name ] )
return
# Stop recursion
self . params_device [ device_name ] = { }
# Initialize weights on this device
with tf . device ( device_name ) :
self . set_input_shape ( self . input_shape )
keys_after = self . __dict__ . keys ( )
if self . params_names is None : # Prevent overriding training
self . params_names = [ k for k in keys_after if isinstance ( self . __dict__ [ k ] , tf . Variable ) ]
params = { k : self . __dict__ [ k ] for k in self . params_names }
self . params_device [ device_name ] = params
|
def _got_message ( self , uid , text ) :
"""The controller has send us a message .
: param uid : Unique id of the controller
: param text : Text to display
: type uid : str
: type text : str"""
|
# TODO : use try
e = Event ( uid , E_MESSAGE , text )
self . queue . put_nowait ( e )
self . controllers [ uid ] [ 2 ] = time . time ( )
|
def _reference_rmvs ( self , removes ) :
"""Prints all removed packages"""
|
print ( "" )
self . msg . template ( 78 )
msg_pkg = "package"
if len ( removes ) > 1 :
msg_pkg = "packages"
print ( "| Total {0} {1} removed" . format ( len ( removes ) , msg_pkg ) )
self . msg . template ( 78 )
for pkg in removes :
if not GetFromInstalled ( pkg ) . name ( ) :
print ( "| Package {0} removed" . format ( pkg ) )
else :
print ( "| Package {0} not found" . format ( pkg ) )
self . msg . template ( 78 )
print ( "" )
|
def element ( self , inp = None ) :
"""Return an element from ` ` inp ` ` or from scratch ."""
|
if inp is not None :
s = str ( inp ) [ : self . length ]
s += ' ' * ( self . length - len ( s ) )
return s
else :
return ' ' * self . length
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.