code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def get_container_instance_logs(access_token, subscription_id, resource_group, container_group_name,
container_name=None):
'''Get the container logs for containers in a container group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
container_name (str): Optional name of a container in the group.
Returns:
HTTP response. Container logs.
'''
if container_name is None:
container_name = container_group_name
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerInstance/ContainerGroups/',
container_group_name,
'/containers/', container_name, '/logs?api-version=', CONTAINER_API])
return do_get(endpoint, access_token)
|
Get the container logs for containers in a container group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
container_name (str): Optional name of a container in the group.
Returns:
HTTP response. Container logs.
|
def construct_ingest_query(self, static_path, columns):
"""
Builds an ingest query for an HDFS TSV load.
:param static_path: The path on hdfs where the data is
:type static_path: str
:param columns: List of all the columns that are available
:type columns: list
"""
# backward compatibility for num_shards,
# but target_partition_size is the default setting
# and overwrites the num_shards
num_shards = self.num_shards
target_partition_size = self.target_partition_size
if self.target_partition_size == -1:
if self.num_shards == -1:
target_partition_size = DEFAULT_TARGET_PARTITION_SIZE
else:
num_shards = -1
metric_names = [m['fieldName'] for m in self.metric_spec if m['type'] != 'count']
# Take all the columns, which are not the time dimension
# or a metric, as the dimension columns
dimensions = [c for c in columns if c not in metric_names and c != self.ts_dim]
ingest_query_dict = {
"type": "index_hadoop",
"spec": {
"dataSchema": {
"metricsSpec": self.metric_spec,
"granularitySpec": {
"queryGranularity": self.query_granularity,
"intervals": self.intervals,
"type": "uniform",
"segmentGranularity": self.segment_granularity,
},
"parser": {
"type": "string",
"parseSpec": {
"columns": columns,
"dimensionsSpec": {
"dimensionExclusions": [],
"dimensions": dimensions, # list of names
"spatialDimensions": []
},
"timestampSpec": {
"column": self.ts_dim,
"format": "auto"
},
"format": "tsv"
}
},
"dataSource": self.druid_datasource
},
"tuningConfig": {
"type": "hadoop",
"jobProperties": {
"mapreduce.job.user.classpath.first": "false",
"mapreduce.map.output.compress": "false",
"mapreduce.output.fileoutputformat.compress": "false",
},
"partitionsSpec": {
"type": "hashed",
"targetPartitionSize": target_partition_size,
"numShards": num_shards,
},
},
"ioConfig": {
"inputSpec": {
"paths": static_path,
"type": "static"
},
"type": "hadoop"
}
}
}
if self.job_properties:
ingest_query_dict['spec']['tuningConfig']['jobProperties'] \
.update(self.job_properties)
if self.hadoop_dependency_coordinates:
ingest_query_dict['hadoopDependencyCoordinates'] \
= self.hadoop_dependency_coordinates
return ingest_query_dict
|
Builds an ingest query for an HDFS TSV load.
:param static_path: The path on hdfs where the data is
:type static_path: str
:param columns: List of all the columns that are available
:type columns: list
|
def pstdev(data):
"""Calculates the population standard deviation."""
#: http://stackoverflow.com/a/27758326
n = len(data)
if n < 2:
raise ValueError('variance requires at least two data points')
ss = _ss(data)
pvar = ss/n # the population variance
return pvar**0.5
|
Calculates the population standard deviation.
|
def start(self):
'''
Start the actual proxy minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
'''
super(ProxyMinion, self).start()
try:
if check_user(self.config['user']):
self.action_log_info('The Proxy Minion is starting up')
self.verify_hash_type()
self.minion.tune_in()
if self.minion.restart:
raise SaltClientError('Proxy Minion could not connect to Master')
except (KeyboardInterrupt, SaltSystemExit) as exc:
self.action_log_info('Proxy Minion Stopping')
if isinstance(exc, KeyboardInterrupt):
log.warning('Exiting on Ctrl-c')
self.shutdown()
else:
log.error(exc)
self.shutdown(exc.code)
|
Start the actual proxy minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
|
def _sync(self):
"""
Synchronize the cached data with the underlyind database.
Uses an internal transaction counter and compares to the checkpoint_operations
and checkpoint_timeout paramters to determine whether to persist the memory store.
In this implementation, this method wraps calls to C{shelve.Shelf#sync}.
"""
if (self._opcount > self.checkpoint_operations or
datetime.now() > self._last_sync + self.checkpoint_timeout):
self.log.debug("Synchronizing queue metadata.")
self.queue_metadata.sync()
self._last_sync = datetime.now()
self._opcount = 0
else:
self.log.debug("NOT synchronizing queue metadata.")
|
Synchronize the cached data with the underlyind database.
Uses an internal transaction counter and compares to the checkpoint_operations
and checkpoint_timeout paramters to determine whether to persist the memory store.
In this implementation, this method wraps calls to C{shelve.Shelf#sync}.
|
def memory_objects_for_hash(self, n):
"""
Returns a set of :class:`SimMemoryObjects` that contain expressions that contain a variable with the hash
`h`.
"""
return set([self[i] for i in self.addrs_for_hash(n)])
|
Returns a set of :class:`SimMemoryObjects` that contain expressions that contain a variable with the hash
`h`.
|
def readme(filename, encoding='utf8'):
"""
Read the contents of a file
"""
with io.open(filename, encoding=encoding) as source:
return source.read()
|
Read the contents of a file
|
def get_content(self, obj):
"""All content for a state's page on an election day."""
election_day = ElectionDay.objects.get(
date=self.context['election_date'])
division = obj
# In case of house special election,
# use parent division.
if obj.level.name == DivisionLevel.DISTRICT:
division = obj.parent
special = True if self.context.get('special') else False
return PageContent.objects.division_content(
election_day,
division,
special
)
|
All content for a state's page on an election day.
|
def convert_avgpool(params, w_name, scope_name, inputs, layers, weights, names):
"""
Convert Average pooling.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting pooling ...')
if names == 'short':
tf_name = 'P' + random_string(7)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
if 'kernel_shape' in params:
height, width = params['kernel_shape']
else:
height, width = params['kernel_size']
if 'strides' in params:
stride_height, stride_width = params['strides']
else:
stride_height, stride_width = params['stride']
if 'pads' in params:
padding_h, padding_w, _, _ = params['pads']
else:
padding_h, padding_w = params['padding']
input_name = inputs[0]
pad = 'valid'
if height % 2 == 1 and width % 2 == 1 and \
height // 2 == padding_h and width // 2 == padding_w and \
stride_height == 1 and stride_width == 1:
pad = 'same'
else:
padding_name = tf_name + '_pad'
padding_layer = keras.layers.ZeroPadding2D(
padding=(padding_h, padding_w),
name=padding_name
)
layers[padding_name] = padding_layer(layers[inputs[0]])
input_name = padding_name
# Pooling type AveragePooling2D
pooling = keras.layers.AveragePooling2D(
pool_size=(height, width),
strides=(stride_height, stride_width),
padding=pad,
name=tf_name,
data_format='channels_first'
)
layers[scope_name] = pooling(layers[input_name])
|
Convert Average pooling.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
def simBirth(self,which_agents):
'''
Makes new consumers for the given indices. Slightly extends base method by also setting
pLvlErrNow = 1.0 for new agents, indicating that they correctly perceive their productivity.
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None
'''
AggShockConsumerType.simBirth(self,which_agents)
if hasattr(self,'pLvlErrNow'):
self.pLvlErrNow[which_agents] = 1.0
else:
self.pLvlErrNow = np.ones(self.AgentCount)
|
Makes new consumers for the given indices. Slightly extends base method by also setting
pLvlErrNow = 1.0 for new agents, indicating that they correctly perceive their productivity.
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None
|
def _request(self, request_method, endpoint='', url='', data=None, params=None, use_api_key=False, omit_api_version=False):
"""Perform a http request via the specified method to an API endpoint.
:param string request_method: Request method.
:param string endpoint: Target endpoint. (Optional).
:param string url: Override the endpoint and provide the full url (eg for pagination). (Optional).
:param dict params: Provide parameters to pass to the request. (Optional).
:param dict data: Data to pass to the post. (Optional).
:return: Response
:rtype: ``Response``
"""
if not data:
data = {}
if not params:
params = {}
if endpoint and omit_api_version and not url:
url = '%s/%s' % (self.base_url, endpoint)
if endpoint and not url:
url = '%s/%s/%s' % (self.base_url, settings.API_VERSION, endpoint)
if use_api_key:
headers = {
'Authorization': self.auth.get_api_key(),
'User-Agent': self.user_agent,
}
else:
headers = {
'Authorization': self.auth.get_authorization(),
'User-Agent': self.user_agent,
}
response = requests.__getattribute__(request_method)(
url=url,
hooks=settings.REQUEST_HOOK,
headers=headers,
json=data,
params=params
)
if ((response.status_code != 200) and (response.status_code != 202)):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise PyCronofyRequestError(
request=e.request,
response=e.response,
)
return response
|
Perform a http request via the specified method to an API endpoint.
:param string request_method: Request method.
:param string endpoint: Target endpoint. (Optional).
:param string url: Override the endpoint and provide the full url (eg for pagination). (Optional).
:param dict params: Provide parameters to pass to the request. (Optional).
:param dict data: Data to pass to the post. (Optional).
:return: Response
:rtype: ``Response``
|
def update(self, filter_, document, multi=False, **kwargs):
"""update method
"""
self._valide_update_document(document)
if multi:
return self.__collect.update_many(filter_, document, **kwargs)
else:
return self.__collect.update_one(filter_, document, **kwargs)
|
update method
|
def add(self, key):
"""
Add `key` as an item to this OrderedSet, then return its index.
If `key` is already in the OrderedSet, return the index it already
had.
"""
if key not in self.map:
self.map[key] = len(self.items)
self.items.append(key)
return self.map[key]
|
Add `key` as an item to this OrderedSet, then return its index.
If `key` is already in the OrderedSet, return the index it already
had.
|
def overplot_lines(ax,
catlines_all_wave,
list_valid_islitlets,
rectwv_coeff,
global_integer_offset_x_pix, global_integer_offset_y_pix,
ds9_file, debugplot):
"""Overplot lines (arc/OH).
Parameters
----------
ax : matplotlib axes
Current plot axes.
catlines_all_wave : numpy array
Array with wavelengths of the lines to be overplotted.
list_valid_islitlets : list of integers
List with numbers of valid slitlets.
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for the
particular CSU configuration.
global_integer_offset_x_pix : int
Global offset in the X direction to be applied after computing
the expected location.
global_integer_offset_y_pix : int
Global offset in the Y direction to be applied after computing
the expected location.
ds9_file : file handler or None
File handler to ds9 region file where the location of lines
must be saved.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
"""
for islitlet in list_valid_islitlets:
crval1_linear = rectwv_coeff.contents[islitlet - 1]['crval1_linear']
cdelt1_linear = rectwv_coeff.contents[islitlet - 1]['cdelt1_linear']
crvaln_linear = crval1_linear + (EMIR_NAXIS1 - 1) * cdelt1_linear
bb_ns1_orig = rectwv_coeff.contents[islitlet - 1]['bb_ns1_orig']
ttd_order = rectwv_coeff.contents[islitlet - 1]['ttd_order']
aij = rectwv_coeff.contents[islitlet - 1]['ttd_aij']
bij = rectwv_coeff.contents[islitlet - 1]['ttd_bij']
min_row_rectified = float(
rectwv_coeff.contents[islitlet - 1]['min_row_rectified']
)
max_row_rectified = float(
rectwv_coeff.contents[islitlet - 1]['max_row_rectified']
)
mean_row_rectified = (min_row_rectified + max_row_rectified) / 2
wpoly_coeff = rectwv_coeff.contents[islitlet - 1]['wpoly_coeff']
x0 = []
y0 = []
x1 = []
y1 = []
x2 = []
y2 = []
for line in catlines_all_wave:
if crval1_linear <= line <= crvaln_linear:
tmp_coeff = np.copy(wpoly_coeff)
tmp_coeff[0] -= line
tmp_xroots = np.polynomial.Polynomial(tmp_coeff).roots()
for dum in tmp_xroots:
if np.isreal(dum):
dum = dum.real
if 1 <= dum <= EMIR_NAXIS1:
x0.append(dum)
y0.append(mean_row_rectified)
x1.append(dum)
y1.append(min_row_rectified)
x2.append(dum)
y2.append(max_row_rectified)
xx0, yy0 = fmap(ttd_order, aij, bij, np.array(x0), np.array(y0))
xx0 -= global_integer_offset_x_pix
yy0 += bb_ns1_orig
yy0 -= global_integer_offset_y_pix
xx1, yy1 = fmap(ttd_order, aij, bij, np.array(x1), np.array(y1))
xx1 -= global_integer_offset_x_pix
yy1 += bb_ns1_orig
yy1 -= global_integer_offset_y_pix
xx2, yy2 = fmap(ttd_order, aij, bij, np.array(x2), np.array(y2))
xx2 -= global_integer_offset_x_pix
yy2 += bb_ns1_orig
yy2 -= global_integer_offset_y_pix
if abs(debugplot) % 10 != 0:
if abs(debugplot) == 22:
for xx1_, xx2_, yy1_, yy2_ in zip(xx1, xx2, yy1, yy2):
ax.plot([xx1_, xx2_], [yy1_, yy2_], 'c-', linewidth=2.0)
else:
ax.plot(xx0, yy0, 'c.')
if ds9_file is not None:
ds9_file.write(
'#\n# islitlet...........: {0}\n'.format(islitlet)
)
for xx0_, yy0_ in zip(xx0, yy0):
ds9_file.write(
'circle {0} {1} 2 # fill=1\n'.format(
xx0_, yy0_)
)
|
Overplot lines (arc/OH).
Parameters
----------
ax : matplotlib axes
Current plot axes.
catlines_all_wave : numpy array
Array with wavelengths of the lines to be overplotted.
list_valid_islitlets : list of integers
List with numbers of valid slitlets.
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for the
particular CSU configuration.
global_integer_offset_x_pix : int
Global offset in the X direction to be applied after computing
the expected location.
global_integer_offset_y_pix : int
Global offset in the Y direction to be applied after computing
the expected location.
ds9_file : file handler or None
File handler to ds9 region file where the location of lines
must be saved.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
|
def isObservableElement(self, elementName):
"""
Mention if an element is an observable element.
:param str ElementName: the element name to evaluate
:return: true if is an observable element, otherwise false.
:rtype: bool
"""
if not(isinstance(elementName, str)):
raise TypeError(
"Element name should be a string ." +
"I receive this {0}"
.format(elementName))
return (True if (elementName == "*")
else self._evaluateString(elementName))
|
Mention if an element is an observable element.
:param str ElementName: the element name to evaluate
:return: true if is an observable element, otherwise false.
:rtype: bool
|
def is_admin_user(self):
"""
Checks if the user is the admin or a SiteAdmin user.
:return: Boolean
"""
user = api.user.get_current()
roles = user.getRoles()
return "LabManager" in roles or "Manager" in roles
|
Checks if the user is the admin or a SiteAdmin user.
:return: Boolean
|
def _finish_add(self, num_bytes_to_add, num_partition_bytes_to_add):
# type: (int, int) -> None
'''
An internal method to do all of the accounting needed whenever
something is added to the ISO. This method should only be called by
public API implementations.
Parameters:
num_bytes_to_add - The number of additional bytes to add to all
descriptors.
num_partition_bytes_to_add - The number of additional bytes to add to
the partition if this is a UDF file.
Returns:
Nothing.
'''
for pvd in self.pvds:
pvd.add_to_space_size(num_bytes_to_add + num_partition_bytes_to_add)
if self.joliet_vd is not None:
self.joliet_vd.add_to_space_size(num_bytes_to_add + num_partition_bytes_to_add)
if self.enhanced_vd is not None:
self.enhanced_vd.copy_sizes(self.pvd)
if self.udf_root is not None:
num_extents_to_add = utils.ceiling_div(num_partition_bytes_to_add,
self.pvd.logical_block_size())
self.udf_main_descs.partition.part_length += num_extents_to_add
self.udf_reserve_descs.partition.part_length += num_extents_to_add
self.udf_logical_volume_integrity.size_table += num_extents_to_add
if self._always_consistent:
self._reshuffle_extents()
else:
self._needs_reshuffle = True
|
An internal method to do all of the accounting needed whenever
something is added to the ISO. This method should only be called by
public API implementations.
Parameters:
num_bytes_to_add - The number of additional bytes to add to all
descriptors.
num_partition_bytes_to_add - The number of additional bytes to add to
the partition if this is a UDF file.
Returns:
Nothing.
|
def convert_H2OFrame_2_DMatrix(self, predictors, yresp, h2oXGBoostModel):
'''
This method requires that you import the following toolboxes: xgboost, pandas, numpy and scipy.sparse.
This method will convert an H2OFrame to a DMatrix that can be used by native XGBoost. The H2OFrame contains
numerical and enum columns alone. Note that H2O one-hot-encoding introduces a missing(NA)
column. There can be NAs in any columns.
Follow the steps below to compare H2OXGBoost and native XGBoost:
1. Train the H2OXGBoost model with H2OFrame trainFile and generate a prediction:
h2oModelD = H2OXGBoostEstimator(**h2oParamsD) # parameters specified as a dict()
h2oModelD.train(x=myX, y=y, training_frame=trainFile) # train with H2OFrame trainFile
h2oPredict = h2oPredictD = h2oModelD.predict(trainFile)
2. Derive the DMatrix from H2OFrame:
nativeDMatrix = trainFile.convert_H2OFrame_2_DMatrix(myX, y, h2oModelD)
3. Derive the parameters for native XGBoost:
nativeParams = h2oModelD.convert_H2OXGBoostParams_2_XGBoostParams()
4. Train your native XGBoost model and generate a prediction:
nativeModel = xgb.train(params=nativeParams[0], dtrain=nativeDMatrix, num_boost_round=nativeParams[1])
nativePredict = nativeModel.predict(data=nativeDMatrix, ntree_limit=nativeParams[1].
5. Compare the predictions h2oPredict from H2OXGBoost, nativePredict from native XGBoost.
:param h2oFrame: H2OFrame to be converted to DMatrix for native XGBoost
:param predictors: List of predictor columns, can be column names or indices
:param yresp: response column, can be column index or name
:param h2oXGBoostModel: H2OXGboost model that are built with the same H2OFrame as input earlier
:return: DMatrix that can be an input to a native XGBoost model
'''
import xgboost as xgb
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
assert isinstance(predictors, list) or isinstance(predictors, tuple)
assert h2oXGBoostModel._model_json['algo'] == 'xgboost', \
"convert_H2OFrame_2_DMatrix is used for H2OXGBoost model only."
tempFrame = self[predictors].cbind(self[yresp])
colnames = tempFrame.names
if type(predictors[0])==type(1): # convert integer indices to column names
temp = []
for colInd in predictors:
temp.append(colnames[colInd])
predictors = temp
if (type(yresp) == type(1)):
tempy = colnames[yresp]
yresp = tempy # column name of response column
enumCols = [] # extract enum columns out to process them
enumColsIndices = [] # store enum column indices
typeDict = self.types
for predName in predictors:
if str(typeDict[predName])=='enum':
enumCols.append(predName)
enumColsIndices.append(colnames.index(predName))
pandaFtrain = tempFrame.as_data_frame(use_pandas=True, header=True)
nrows = tempFrame.nrow
# convert H2OFrame to DMatrix starts here
if len(enumCols) > 0: # enumCols contain all enum column names
allDomain = tempFrame.levels() # list all domain levels with column indices
domainLen = []
for enumIndex in enumColsIndices:
if len(allDomain[enumIndex])>0:
domainLen.append(len(allDomain[enumIndex])*-1)
incLevel = np.argsort(domainLen) # indices of enum column indices with decreasing domain length
# need to move enum columns to the front, highest level first
c2 = tempFrame[enumCols[incLevel[0]]]
tempFrame = tempFrame.drop(enumCols[incLevel[0]])
for index in range(1, len(incLevel)):
c2 = c2.cbind(tempFrame[enumCols[incLevel[index]]])
tempFrame = tempFrame.drop(enumCols[incLevel[index]])
enumCols = c2.names
tempFrame = c2.cbind(tempFrame)
pandaFtrain = tempFrame.as_data_frame(use_pandas=True, header=True) # redo translation from H2O to panda
pandaTrainPart = generatePandaEnumCols(pandaFtrain, enumCols[0], nrows, tempFrame[enumCols[0]].categories())
pandaFtrain.drop([enumCols[0]], axis=1, inplace=True)
for colInd in range(1, len(enumCols)):
cname=enumCols[colInd]
ctemp = generatePandaEnumCols(pandaFtrain, cname, nrows, tempFrame[enumCols[colInd]].categories())
pandaTrainPart=pd.concat([pandaTrainPart, ctemp], axis=1)
pandaFtrain.drop([cname], axis=1, inplace=True)
pandaFtrain = pd.concat([pandaTrainPart, pandaFtrain], axis=1)
c0= tempFrame[yresp].asnumeric().as_data_frame(use_pandas=True, header=True)
pandaFtrain.drop([yresp], axis=1, inplace=True)
pandaF = pd.concat([c0, pandaFtrain], axis=1)
pandaF.rename(columns={c0.columns[0]:yresp}, inplace=True)
newX = list(pandaFtrain.columns.values)
data = pandaF.as_matrix(newX)
label = pandaF.as_matrix([yresp])
return xgb.DMatrix(data=csr_matrix(data), label=label) \
if h2oXGBoostModel._model_json['output']['sparse'] else xgb.DMatrix(data=data, label=label)
|
This method requires that you import the following toolboxes: xgboost, pandas, numpy and scipy.sparse.
This method will convert an H2OFrame to a DMatrix that can be used by native XGBoost. The H2OFrame contains
numerical and enum columns alone. Note that H2O one-hot-encoding introduces a missing(NA)
column. There can be NAs in any columns.
Follow the steps below to compare H2OXGBoost and native XGBoost:
1. Train the H2OXGBoost model with H2OFrame trainFile and generate a prediction:
h2oModelD = H2OXGBoostEstimator(**h2oParamsD) # parameters specified as a dict()
h2oModelD.train(x=myX, y=y, training_frame=trainFile) # train with H2OFrame trainFile
h2oPredict = h2oPredictD = h2oModelD.predict(trainFile)
2. Derive the DMatrix from H2OFrame:
nativeDMatrix = trainFile.convert_H2OFrame_2_DMatrix(myX, y, h2oModelD)
3. Derive the parameters for native XGBoost:
nativeParams = h2oModelD.convert_H2OXGBoostParams_2_XGBoostParams()
4. Train your native XGBoost model and generate a prediction:
nativeModel = xgb.train(params=nativeParams[0], dtrain=nativeDMatrix, num_boost_round=nativeParams[1])
nativePredict = nativeModel.predict(data=nativeDMatrix, ntree_limit=nativeParams[1].
5. Compare the predictions h2oPredict from H2OXGBoost, nativePredict from native XGBoost.
:param h2oFrame: H2OFrame to be converted to DMatrix for native XGBoost
:param predictors: List of predictor columns, can be column names or indices
:param yresp: response column, can be column index or name
:param h2oXGBoostModel: H2OXGboost model that are built with the same H2OFrame as input earlier
:return: DMatrix that can be an input to a native XGBoost model
|
def list_pending_work_units(self, work_spec_name, start=0, limit=None):
"""Get a dictionary of in-progress work units for some work spec.
The dictionary is from work unit name to work unit definiton.
Units listed here should be worked on by some worker.
"""
return self.registry.filter(WORK_UNITS_ + work_spec_name,
priority_min=time.time(),
start=start, limit=limit)
|
Get a dictionary of in-progress work units for some work spec.
The dictionary is from work unit name to work unit definiton.
Units listed here should be worked on by some worker.
|
def complete_sum(self):
"""
Return an equivalent DNF expression that includes all prime
implicants.
"""
node = self.node.complete_sum()
if node is self.node:
return self
else:
return _expr(node)
|
Return an equivalent DNF expression that includes all prime
implicants.
|
def _generate_destination_for_source(self, src_ase):
# type: (SyncCopy, blobxfer.models.azure.StorageEntity) ->
# blobxfer.models.azure.StorageEntity)
"""Generate entities for source path
:param SyncCopy self: this
:param blobxfer.models.azure.StorageEntity src_ase: source ase
:rtype: blobxfer.models.azure.StorageEntity
:return: destination storage entity
"""
# create a storage entity for each destination
for sa, cont, name, dpath in self._get_destination_paths():
if self._spec.options.rename:
name = str(pathlib.Path(name))
if name == '.':
raise RuntimeError(
'attempting rename multiple files to a directory')
else:
name = str(pathlib.Path(name) / src_ase.name)
# translate source mode to dest mode
dst_mode = self._translate_src_mode_to_dst_mode(src_ase.mode)
dst_ase = self._check_for_existing_remote(sa, cont, name, dst_mode)
if dst_ase is None:
dst_ase = blobxfer.models.azure.StorageEntity(cont, ed=None)
dst_ase.populate_from_local(
sa, cont, name, dst_mode, src_ase.cache_control)
dst_ase.size = src_ase.size
# overwrite tier with specified storage tier
if (dst_mode == blobxfer.models.azure.StorageModes.Block and
self._spec.options.access_tier is not None):
dst_ase.access_tier = self._spec.options.access_tier
# check condition for dst
action = self._check_copy_conditions(src_ase, dst_ase)
if action == SynccopyAction.Copy:
yield dst_ase
elif action == SynccopyAction.Skip:
# add to exclusion set if skipping
if self._spec.options.delete_extraneous_destination:
uid = (
blobxfer.operations.synccopy.SyncCopy.
create_deletion_id(
dst_ase._client, dst_ase.container, dst_ase.name)
)
self._delete_exclude.add(uid)
if self._general_options.dry_run:
logger.info('[DRY RUN] skipping: {} -> {}'.format(
src_ase.path, dst_ase.path))
|
Generate entities for source path
:param SyncCopy self: this
:param blobxfer.models.azure.StorageEntity src_ase: source ase
:rtype: blobxfer.models.azure.StorageEntity
:return: destination storage entity
|
def isModified(self):
"""Check if either the datastream content or profile fields have changed
and should be saved to Fedora.
:rtype: boolean
"""
# NOTE: only check content digest if locally cached content is set
# (content already pulled or new content set); otherwise this
# results in pulling content down to checksum it !
return self.info_modified or \
self._content and self._content_digest() != self.digest
|
Check if either the datastream content or profile fields have changed
and should be saved to Fedora.
:rtype: boolean
|
def set(self, id_, lineno, value='', fname=None, args=None):
""" Like the above, but issues no warning on duplicate macro
definitions.
"""
if fname is None:
if CURRENT_FILE:
fname = CURRENT_FILE[-1]
else: # If no files opened yet, use owns program fname
fname = sys.argv[0]
self.table[id_] = ID(id_, args, value, lineno, fname)
|
Like the above, but issues no warning on duplicate macro
definitions.
|
def write_training_metrics(self):
"""
Write Training Metrics to CSV
"""
with open(self.path, 'w') as file:
writer = csv.writer(file)
writer.writerow(FIELD_NAMES)
for row in self.rows:
writer.writerow(row)
|
Write Training Metrics to CSV
|
def load_config(path):
"""
Load device configuration from file path and return list with parsed lines.
:param path: Location of configuration file.
:type path: str
:rtype: list
"""
args = []
with open(path, 'r') as fp:
for line in fp.readlines():
if line.strip() and not line.startswith("#"):
args.append(line.replace("\n", ""))
return args
|
Load device configuration from file path and return list with parsed lines.
:param path: Location of configuration file.
:type path: str
:rtype: list
|
def decompile_pyc(bin_pyc, output=sys.stdout):
'''
decompile apython pyc or pyo binary file.
:param bin_pyc: input file objects
:param output: output file objects
'''
from turicreate.meta.asttools import python_source
bin = bin_pyc.read()
code = marshal.loads(bin[8:])
mod_ast = make_module(code)
python_source(mod_ast, file=output)
|
decompile apython pyc or pyo binary file.
:param bin_pyc: input file objects
:param output: output file objects
|
def memory_usage(self, index=True, deep=False):
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.0+0.0j 1 True
1 1 1.0 1.0+0.0j 1 True
2 1 1.0 1.0+0.0j 1 True
3 1 1.0 1.0+0.0j 1 True
4 1 1.0 1.0+0.0j 1 True
>>> df.memory_usage()
Index 80
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 80
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5168
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result
|
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.0+0.0j 1 True
1 1 1.0 1.0+0.0j 1 True
2 1 1.0 1.0+0.0j 1 True
3 1 1.0 1.0+0.0j 1 True
4 1 1.0 1.0+0.0j 1 True
>>> df.memory_usage()
Index 80
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 80
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5168
|
def check(dependency=None, timeout=60):
"""Mark function as a check.
:param dependency: the check that this check depends on
:type dependency: function
:param timeout: maximum number of seconds the check can run
:type timeout: int
When a check depends on another, the former will only run if the latter passes.
Additionally, the dependent check will inherit the filesystem of its dependency.
This is particularly useful when writing e.g., a ``compiles`` check that compiles a
student's program (and checks that it compiled successfully). Any checks that run the
student's program will logically depend on this check, and since they inherit the
resulting filesystem of the check, they will immidiately have access to the compiled
program without needing to recompile.
Example usage::
@check50.check() # Mark 'exists' as a check
def exists():
\"""hello.c exists\"""
check50.exists("hello.c")
@check50.check(exists) # Mark 'compiles' as a check that depends on 'exists'
def compiles():
\"""hello.c compiles\"""
check50.c.compile("hello.c")
@check50.check(compiles)
def prints_hello():
\"""prints "Hello, world!\\\\n\"""
# Since 'prints_hello', depends on 'compiles' it inherits the compiled binary
check50.run("./hello").stdout("[Hh]ello, world!?\\n", "hello, world\\n").exit()
"""
def decorator(check):
# Modules are evaluated from the top of the file down, so _check_names will
# contain the names of the checks in the order in which they are declared
_check_names.append(check.__name__)
check._check_dependency = dependency
@functools.wraps(check)
def wrapper(checks_root, dependency_state):
# Result template
result = CheckResult.from_check(check)
# Any shared (returned) state
state = None
try:
# Setup check environment, copying disk state from dependency
internal.run_dir = checks_root / check.__name__
src_dir = checks_root / (dependency.__name__ if dependency else "-")
shutil.copytree(src_dir, internal.run_dir)
os.chdir(internal.run_dir)
# Run registered functions before/after running check and set timeout
with internal.register, _timeout(seconds=timeout):
args = (dependency_state,) if inspect.getfullargspec(check).args else ()
state = check(*args)
except Failure as e:
result.passed = False
result.cause = e.payload
except BaseException as e:
result.passed = None
result.cause = {"rationale": _("check50 ran into an error while running checks!")}
log(repr(e))
for line in traceback.format_tb(e.__traceback__):
log(line.rstrip())
log(_("Contact sysadmins@cs50.harvard.edu with the URL of this check!"))
else:
result.passed = True
finally:
result.log = _log
result.data = _data
return result, state
return wrapper
return decorator
|
Mark function as a check.
:param dependency: the check that this check depends on
:type dependency: function
:param timeout: maximum number of seconds the check can run
:type timeout: int
When a check depends on another, the former will only run if the latter passes.
Additionally, the dependent check will inherit the filesystem of its dependency.
This is particularly useful when writing e.g., a ``compiles`` check that compiles a
student's program (and checks that it compiled successfully). Any checks that run the
student's program will logically depend on this check, and since they inherit the
resulting filesystem of the check, they will immidiately have access to the compiled
program without needing to recompile.
Example usage::
@check50.check() # Mark 'exists' as a check
def exists():
\"""hello.c exists\"""
check50.exists("hello.c")
@check50.check(exists) # Mark 'compiles' as a check that depends on 'exists'
def compiles():
\"""hello.c compiles\"""
check50.c.compile("hello.c")
@check50.check(compiles)
def prints_hello():
\"""prints "Hello, world!\\\\n\"""
# Since 'prints_hello', depends on 'compiles' it inherits the compiled binary
check50.run("./hello").stdout("[Hh]ello, world!?\\n", "hello, world\\n").exit()
|
def add_params(endpoint, params):
"""
Combine query endpoint and params.
Example::
>>> add_params("https://www.google.com/search", {"q": "iphone"})
https://www.google.com/search?q=iphone
"""
p = PreparedRequest()
p.prepare(url=endpoint, params=params)
if PY2: # pragma: no cover
return unicode(p.url)
else: # pragma: no cover
return p.url
|
Combine query endpoint and params.
Example::
>>> add_params("https://www.google.com/search", {"q": "iphone"})
https://www.google.com/search?q=iphone
|
def ping(self):
"""sends a NOP packet and waits response; returns None"""
ret, data = self.sendmess(MSG_NOP, bytes())
if data or ret > 0:
raise ProtocolError('invalid reply to ping message')
if ret < 0:
raise OwnetError(-ret, self.errmess[-ret])
|
sends a NOP packet and waits response; returns None
|
def get_git_isolation():
"""Get Git isolation from the current context."""
ctx = click.get_current_context(silent=True)
if ctx and GIT_ISOLATION in ctx.meta:
return ctx.meta[GIT_ISOLATION]
|
Get Git isolation from the current context.
|
def getPDFstr(s):
""" Return a PDF string depending on its coding.
Notes:
If only ascii then "(original)" is returned, else if only 8 bit chars
then "(original)" with interspersed octal strings \nnn is returned,
else a string "<FEFF[hexstring]>" is returned, where [hexstring] is the
UTF-16BE encoding of the original.
"""
if not bool(s):
return "()"
def make_utf16be(s):
r = hexlify(bytearray([254, 255]) + bytearray(s, "UTF-16BE"))
t = r if fitz_py2 else r.decode()
return "<" + t + ">" # brackets indicate hex
# following either returns original string with mixed-in
# octal numbers \nnn if outside ASCII range, or:
# exits with utf-16be BOM version of the string
r = ""
for c in s:
oc = ord(c)
if oc > 255: # shortcut if beyond code range
return make_utf16be(s)
if oc > 31 and oc < 127:
if c in ("(", ")", "\\"):
r += "\\"
r += c
continue
if oc > 127:
r += "\\" + oct(oc)[-3:]
continue
if oc < 8 or oc > 13 or oc == 11 or c == 127:
r += "\\267" # indicate unsupported char
continue
if oc == 8:
r += "\\b"
elif oc == 9:
r += "\\t"
elif oc == 10:
r += "\\n"
elif oc == 12:
r += "\\f"
elif oc == 13:
r += "\\r"
return "(" + r + ")"
|
Return a PDF string depending on its coding.
Notes:
If only ascii then "(original)" is returned, else if only 8 bit chars
then "(original)" with interspersed octal strings \nnn is returned,
else a string "<FEFF[hexstring]>" is returned, where [hexstring] is the
UTF-16BE encoding of the original.
|
def rm_file_or_dir(path, ignore_errors=True):
"""
Helper function to clean a certain filepath
Parameters
----------
path
Returns
-------
"""
if os.path.exists(path):
if os.path.isdir(path):
if os.path.islink(path):
os.unlink(path)
else:
shutil.rmtree(path, ignore_errors=ignore_errors)
else:
if os.path.islink(path):
os.unlink(path)
else:
os.remove(path)
|
Helper function to clean a certain filepath
Parameters
----------
path
Returns
-------
|
def check_policies(self, account, account_policies, aws_policies):
"""Iterate through the policies of a specific account and create or update the policy if its missing or
does not match the policy documents from Git. Returns a dict of all the policies added to the account
(does not include updated policies)
Args:
account (:obj:`Account`): Account to check policies for
account_policies (`dict` of `str`: `dict`): A dictionary containing all the policies for the specific
account
aws_policies (`dict` of `str`: `dict`): A dictionary containing the non-AWS managed policies on the account
Returns:
:obj:`dict` of `str`: `str`
"""
self.log.debug('Fetching policies for {}'.format(account.account_name))
sess = get_aws_session(account)
iam = sess.client('iam')
added = {}
for policyName, account_policy in account_policies.items():
# policies pulled from github a likely bytes and need to be converted
if isinstance(account_policy, bytes):
account_policy = account_policy.decode('utf-8')
# Using re.sub instead of format since format breaks on the curly braces of json
gitpol = json.loads(
re.sub(
r'{AD_Group}',
account.ad_group_base or account.account_name,
account_policy
)
)
if policyName in aws_policies:
pol = aws_policies[policyName]
awspol = iam.get_policy_version(
PolicyArn=pol['Arn'],
VersionId=pol['DefaultVersionId']
)['PolicyVersion']['Document']
if awspol != gitpol:
self.log.warn('IAM Policy {} on {} does not match Git policy documents, updating'.format(
policyName,
account.account_name
))
self.create_policy(account, iam, json.dumps(gitpol, indent=4), policyName, arn=pol['Arn'])
else:
self.log.debug('IAM Policy {} on {} is up to date'.format(
policyName,
account.account_name
))
else:
self.log.warn('IAM Policy {} is missing on {}'.format(policyName, account.account_name))
response = self.create_policy(account, iam, json.dumps(gitpol), policyName)
added[policyName] = response['Policy']
return added
|
Iterate through the policies of a specific account and create or update the policy if its missing or
does not match the policy documents from Git. Returns a dict of all the policies added to the account
(does not include updated policies)
Args:
account (:obj:`Account`): Account to check policies for
account_policies (`dict` of `str`: `dict`): A dictionary containing all the policies for the specific
account
aws_policies (`dict` of `str`: `dict`): A dictionary containing the non-AWS managed policies on the account
Returns:
:obj:`dict` of `str`: `str`
|
def pypi_search(self):
"""
Search PyPI by metadata keyword
e.g. yolk -S name=yolk AND license=GPL
@param spec: Cheese Shop search spec
@type spec: list of strings
spec examples:
["name=yolk"]
["license=GPL"]
["name=yolk", "AND", "license=GPL"]
@returns: 0 on success or 1 if mal-formed search spec
"""
spec = self.pkg_spec
#Add remainging cli arguments to options.pypi_search
search_arg = self.options.pypi_search
spec.insert(0, search_arg.strip())
(spec, operator) = self.parse_search_spec(spec)
if not spec:
return 1
for pkg in self.pypi.search(spec, operator):
if pkg['summary']:
summary = pkg['summary'].encode('utf-8')
else:
summary = ""
print("""%s (%s):
%s
""" % (pkg['name'].encode('utf-8'), pkg["version"],
summary))
return 0
|
Search PyPI by metadata keyword
e.g. yolk -S name=yolk AND license=GPL
@param spec: Cheese Shop search spec
@type spec: list of strings
spec examples:
["name=yolk"]
["license=GPL"]
["name=yolk", "AND", "license=GPL"]
@returns: 0 on success or 1 if mal-formed search spec
|
def AddWarning(self, warning):
"""Adds an warning.
Args:
warning (ExtractionWarning): an extraction warning.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed.
"""
self._RaiseIfNotWritable()
self._storage_file.AddWarning(warning)
self.number_of_warnings += 1
|
Adds an warning.
Args:
warning (ExtractionWarning): an extraction warning.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed.
|
def get_user_groups(self, user_name):
"""Get a list of groups associated to a user.
:param user_name: name of user to list groups
:returns: list of groups
:raises: HTTPResponseError in case an HTTP error status was returned
"""
res = self._make_ocs_request(
'GET',
self.OCS_SERVICE_CLOUD,
'users/' + user_name + '/groups',
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree, [100])
return [group.text for group in tree.find('data/groups')]
raise HTTPResponseError(res)
|
Get a list of groups associated to a user.
:param user_name: name of user to list groups
:returns: list of groups
:raises: HTTPResponseError in case an HTTP error status was returned
|
def match(self, s=''):
"""return all options that match, in the name or the description,
with string `s`, case is disregarded.
Example: ``cma.CMAOptions().match('verb')`` returns the verbosity
options.
"""
match = s.lower()
res = {}
for k in sorted(self):
s = str(k) + '=\'' + str(self[k]) + '\''
if match in s.lower():
res[k] = self[k]
return CMAOptions(res, unchecked=True)
|
return all options that match, in the name or the description,
with string `s`, case is disregarded.
Example: ``cma.CMAOptions().match('verb')`` returns the verbosity
options.
|
def for_entity(obj, check_support_attachments=False):
"""Return attachments on an entity."""
if check_support_attachments and not supports_attachments(obj):
return []
return getattr(obj, ATTRIBUTE)
|
Return attachments on an entity.
|
def create(conversion_finder, parsed_att: Any, attribute_type: Type[Any], errors: Dict[Type, Exception] = None):
"""
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param parsed_att:
:param attribute_type:
:param conversion_finder:
:return:
"""
if conversion_finder is None:
msg = "No conversion finder provided to find a converter between parsed attribute '{patt}' of type " \
"'{typ}' and expected type '{expt}'.".format(patt=str(parsed_att),
typ=get_pretty_type_str(type(parsed_att)),
expt=get_pretty_type_str(attribute_type))
else:
msg = "No conversion chain found between parsed attribute '{patt}' of type '{typ}' and expected type " \
"'{expt}' using conversion finder {conv}.".format(patt=parsed_att,
typ=get_pretty_type_str(type(parsed_att)),
expt=get_pretty_type_str(attribute_type),
conv=conversion_finder)
if errors is not None:
msg = msg + ' ' + str(errors)
return NoConverterFoundForObjectType(msg)
|
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param parsed_att:
:param attribute_type:
:param conversion_finder:
:return:
|
def get_vcsrc(self):
"""
Returns in-memory created module pointing at user's configuration
and extra code/commands. By default tries to create module from
:setting:`VCSRC_PATH`.
"""
try:
vimrc = create_module('vcsrc', settings.VCSRC_PATH)
except IOError:
self.stderr.write("No module or package at %s\n"
% settings.VCSRC_PATH)
vimrc = None
return vimrc
|
Returns in-memory created module pointing at user's configuration
and extra code/commands. By default tries to create module from
:setting:`VCSRC_PATH`.
|
def timings(reps,func,*args,**kw):
"""timings(reps,func,*args,**kw) -> (t_total,t_per_call)
Execute a function reps times, return a tuple with the elapsed total CPU
time in seconds and the time per call. These are just the first two values
in timings_out()."""
return timings_out(reps,func,*args,**kw)[0:2]
|
timings(reps,func,*args,**kw) -> (t_total,t_per_call)
Execute a function reps times, return a tuple with the elapsed total CPU
time in seconds and the time per call. These are just the first two values
in timings_out().
|
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
|
List files for a package file
|
def create(self, metadata, publisher_account,
service_descriptors=None, providers=None,
use_secret_store=True):
"""
Register an asset in both the keeper's DIDRegistry (on-chain) and in the Metadata store (
Aquarius).
:param metadata: dict conforming to the Metadata accepted by Ocean Protocol.
:param publisher_account: Account of the publisher registering this asset
:param service_descriptors: list of ServiceDescriptor tuples of length 2.
The first item must be one of ServiceTypes and the second
item is a dict of parameters and values required by the service
:param providers: list of addresses of providers of this asset (a provider is
an ethereum account that is authorized to provide asset services)
:return: DDO instance
"""
assert isinstance(metadata, dict), f'Expected metadata of type dict, got {type(metadata)}'
if not metadata or not Metadata.validate(metadata):
raise OceanInvalidMetadata('Metadata seems invalid. Please make sure'
' the required metadata values are filled in.')
# copy metadata so we don't change the original
metadata_copy = copy.deepcopy(metadata)
# Create a DDO object
did = DID.did()
logger.debug(f'Generating new did: {did}')
# Check if it's already registered first!
if did in self._get_aquarius().list_assets():
raise OceanDIDAlreadyExist(
f'Asset id {did} is already registered to another asset.')
ddo = DDO(did)
# Add public key and authentication
ddo.add_public_key(did, publisher_account.address)
ddo.add_authentication(did, PUBLIC_KEY_TYPE_RSA)
# Setup metadata service
# First replace `files` with encrypted `files`
assert metadata_copy['base'][
'files'], 'files is required in the metadata base attributes.'
assert Metadata.validate(metadata), 'metadata seems invalid.'
logger.debug('Encrypting content urls in the metadata.')
brizo = BrizoProvider.get_brizo()
if not use_secret_store:
encrypt_endpoint = brizo.get_encrypt_endpoint(self._config)
files_encrypted = brizo.encrypt_files_dict(
metadata_copy['base']['files'],
encrypt_endpoint,
ddo.asset_id,
publisher_account.address,
self._keeper.sign_hash(ddo.asset_id, publisher_account)
)
else:
files_encrypted = self._get_secret_store(publisher_account) \
.encrypt_document(
did_to_id(did),
json.dumps(metadata_copy['base']['files']),
)
metadata_copy['base']['checksum'] = ddo.generate_checksum(did, metadata)
ddo.add_proof(metadata_copy['base']['checksum'], publisher_account, self._keeper)
# only assign if the encryption worked
if files_encrypted:
logger.info(f'Content urls encrypted successfully {files_encrypted}')
index = 0
for file in metadata_copy['base']['files']:
file['index'] = index
index = index + 1
del file['url']
metadata_copy['base']['encryptedFiles'] = files_encrypted
else:
raise AssertionError('Encrypting the files failed. Make sure the secret store is'
' setup properly in your config file.')
# DDO url and `Metadata` service
ddo_service_endpoint = self._get_aquarius().get_service_endpoint(did)
metadata_service_desc = ServiceDescriptor.metadata_service_descriptor(metadata_copy,
ddo_service_endpoint)
if not service_descriptors:
service_descriptors = [ServiceDescriptor.authorization_service_descriptor(
self._config.secret_store_url)]
brizo = BrizoProvider.get_brizo()
service_descriptors += [ServiceDescriptor.access_service_descriptor(
metadata[MetadataBase.KEY]['price'],
brizo.get_consume_endpoint(self._config),
brizo.get_service_endpoint(self._config),
3600,
self._keeper.escrow_access_secretstore_template.address
)]
else:
service_types = set(map(lambda x: x[0], service_descriptors))
if ServiceTypes.AUTHORIZATION not in service_types:
service_descriptors += [ServiceDescriptor.authorization_service_descriptor(
self._config.secret_store_url)]
else:
brizo = BrizoProvider.get_brizo()
service_descriptors += [ServiceDescriptor.access_service_descriptor(
metadata[MetadataBase.KEY]['price'],
brizo.get_consume_endpoint(self._config),
brizo.get_service_endpoint(self._config),
3600,
self._keeper.escrow_access_secretstore_template.address
)]
# Add all services to ddo
service_descriptors = service_descriptors + [metadata_service_desc]
for service in ServiceFactory.build_services(did, service_descriptors):
ddo.add_service(service)
logger.debug(
f'Generated ddo and services, DID is {ddo.did},'
f' metadata service @{ddo_service_endpoint}, '
f'`Access` service initialize @{ddo.services[0].endpoints.service}.')
response = None
# register on-chain
registered_on_chain = self._keeper.did_registry.register(
did,
checksum=Web3Provider.get_web3().sha3(text=metadata_copy['base']['checksum']),
url=ddo_service_endpoint,
account=publisher_account,
providers=providers
)
if registered_on_chain is False:
logger.warning(f'Registering {did} on-chain failed.')
return None
logger.info(f'DDO with DID {did} successfully registered on chain.')
try:
# publish the new ddo in ocean-db/Aquarius
response = self._get_aquarius().publish_asset_ddo(ddo)
logger.debug('Asset/ddo published successfully in aquarius.')
except ValueError as ve:
raise ValueError(f'Invalid value to publish in the metadata: {str(ve)}')
except Exception as e:
logger.error(f'Publish asset in aquarius failed: {str(e)}')
if not response:
return None
return ddo
|
Register an asset in both the keeper's DIDRegistry (on-chain) and in the Metadata store (
Aquarius).
:param metadata: dict conforming to the Metadata accepted by Ocean Protocol.
:param publisher_account: Account of the publisher registering this asset
:param service_descriptors: list of ServiceDescriptor tuples of length 2.
The first item must be one of ServiceTypes and the second
item is a dict of parameters and values required by the service
:param providers: list of addresses of providers of this asset (a provider is
an ethereum account that is authorized to provide asset services)
:return: DDO instance
|
def main():
"""
NAME
core_depthplot.py
DESCRIPTION
plots various measurements versus core_depth or age. plots data flagged as 'FS-SS-C' as discrete samples.
SYNTAX
core_depthplot.py [command line options]
# or, for Anaconda users:
core_depthplot_anaconda [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input measurments format file
-fsum FILE: specify input LIMS database (IODP) core summary csv file
-fwig FILE: specify input depth,wiggle to plot, in magic format with sample_core_depth key for depth
-fsa FILE: specify input er_samples format file from magic for depth
-fa FILE: specify input ages format file from magic for age
NB: must have either -fsa OR -fa (not both)
-fsp FILE sym size: specify input zeq_specimen format file from magic, sym and size
NB: PCAs will have specified color, while fisher means will be white with specified color as the edgecolor
-fres FILE specify input pmag_results file from magic, sym and size
-LP [AF,T,ARM,IRM, X] step [in mT,C,mT,mT, mass/vol] to plot
-S do not plot blanket treatment data (if this is set, you don't need the -LP)
-sym SYM SIZE, symbol, size for continuous points (e.g., ro 5, bs 10, g^ 10 for red dot, blue square, green triangle), default is blue dot at 5 pt
-D do not plot declination
-M do not plot magnetization
-log plot magnetization on a log scale
-L do not connect dots with a line
-I do not plot inclination
-d min max [in m] depth range to plot
-n normalize by weight in er_specimen table
-Iex: plot the expected inc at lat - only available for results with lat info in file
-ts TS amin amax: plot the GPTS for the time interval between amin and amax (numbers in Ma)
TS: [ck95, gts04, gts12]
-ds [mbsf,mcd] specify depth scale, mbsf default
-fmt [svg, eps, pdf, png] specify output format for plot (default: svg)
-sav save plot silently
DEFAULTS:
Measurements file: measurements.txt
Samples file: samples.txt
NRM step
Summary file: none
"""
args = sys.argv
if '-h' in args:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([ ['f', False, 'measurements.txt'], ['fsum', False, ''],
['fwig', False, ''], ['fsa', False, ''],
['fa', False, ''], ['fsp', False, ''],
['fres', False, '' ], ['fmt', False, 'svg'],
['LP', False, ''], ['n', False, False],
['d', False, '-1 -1'], ['ts', False, ''],
['WD', False, '.'], ['L', False, True],
['S', False, True], ['D', False, True],
['I', False, True], ['M', False, True],
['log', False, 0],
['ds', False, 'sample_core_depth'],
['sym', False, 'bo 5'], ['ID', False, '.'],
['sav', False, False], ['DM', False, 3]])
checked_args = extractor.extract_and_check_args(args, dataframe)
meas_file, sum_file, wig_file, samp_file, age_file, spc_file, res_file, fmt, meth, norm, depth, timescale, dir_path, pltLine, pltSus, pltDec, pltInc, pltMag, logit, depth_scale, symbol, input_dir, save, data_model_num = extractor.get_vars(
['f', 'fsum', 'fwig', 'fsa', 'fa', 'fsp', 'fres', 'fmt', 'LP', 'n', 'd', 'ts', 'WD', 'L', 'S', 'D', 'I', 'M', 'log', 'ds', 'sym', 'ID', 'sav', 'DM'], checked_args)
# format some variables
# format symbol/size
try:
sym, size = symbol.split()
size = int(size)
except:
print('you should provide -sym in this format: ro 5')
print('using defaults instead')
sym, size = 'ro', 5
# format result file, symbol, size
if res_file:
try:
res_file, res_sym, res_size = res_file.split()
except:
print('you must provide -fres in this format: -fres filename symbol size')
print(
'could not parse {}, defaulting to using no result file'.format(res_file))
res_file, res_sym, res_size = '', '', 0
else:
res_file, res_sym, res_size = '', '', 0
# format specimen file, symbol, size
if spc_file:
try:
spc_file, spc_sym, spc_size = spc_file.split()
except:
print('you must provide -fsp in this format: -fsp filename symbol size')
print(
'could not parse {}, defaulting to using no specimen file'.format(spc_file))
spc_file, spc_sym, spc_size = '', '', 0
else:
spc_file, spc_sym, spc_size = '', '', 0
# format min/max depth
try:
dmin, dmax = depth.split()
except:
print('you must provide -d in this format: -d dmin dmax')
print('could not parse {}, defaulting to plotting all depths'.format(depth))
dmin, dmax = -1, -1
# format timescale, min/max time
if timescale:
try:
timescale, amin, amax = timescale.split()
pltTime = True
except:
print(
'you must provide -ts in this format: -ts timescale minimum_age maximum_age')
print(
'could not parse {}, defaulting to using no timescale'.format(timescale))
timescale, amin, amax = None, -1, -1
pltTime = False
else:
timescale, amin, amax = None, -1, -1
pltTime = False
# format norm and wt_file
if norm and not isinstance(norm, bool):
wt_file = norm
norm = True
else:
norm = False
wt_file = ''
# format list of protcols and step
try:
method, step = meth.split()
except:
print(
'To use the -LP flag you must provide both the protocol and the step in this format:\n-LP [AF,T,ARM,IRM, X] step [in mT,C,mT,mT, mass/vol] to plot')
print('Defaulting to using no protocol')
method, step = 'LT-NO', 0
# list of varnames
#['f', 'fsum', 'fwig', 'fsa', 'fa', 'fsp', 'fres', 'fmt', 'LP', 'n', 'd', 'ts', 'WD', 'L', 'S', 'D', 'I', 'M', 'log', 'ds', 'sym' ]
#meas_file, sum_file, wig_file, samp_file, age_file, spc_file, res_file, fmt, meth, norm, depth, timescale, dir_path, pltLine, pltSus, pltDec, pltInc, pltMag, logit, depth_scale, symbol
fig, figname = ipmag.core_depthplot(input_dir, meas_file, spc_file, samp_file, age_file, sum_file, wt_file, depth_scale, dmin, dmax, sym, size,
spc_sym, spc_size, method, step, fmt, pltDec, pltInc, pltMag, pltLine, pltSus, logit, pltTime, timescale, amin, amax, norm, data_model_num)
if not pmagplotlib.isServer:
figname = figname.replace(':', '_')
if fig and save:
print('-I- Created plot: {}'.format(figname))
plt.savefig(figname)
return
app = wx.App(redirect=False)
if not fig:
pw.simple_warning(
'No plot was able to be created with the data you provided.\nMake sure you have given all the required information and try again')
return False
dpi = fig.get_dpi()
pixel_width = dpi * fig.get_figwidth()
pixel_height = dpi * fig.get_figheight()
figname = os.path.join(dir_path, figname)
plot_frame = pmag_menu_dialogs.PlotFrame((int(pixel_width), int(pixel_height + 50)),
fig, figname, standalone=True)
app.MainLoop()
|
NAME
core_depthplot.py
DESCRIPTION
plots various measurements versus core_depth or age. plots data flagged as 'FS-SS-C' as discrete samples.
SYNTAX
core_depthplot.py [command line options]
# or, for Anaconda users:
core_depthplot_anaconda [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input measurments format file
-fsum FILE: specify input LIMS database (IODP) core summary csv file
-fwig FILE: specify input depth,wiggle to plot, in magic format with sample_core_depth key for depth
-fsa FILE: specify input er_samples format file from magic for depth
-fa FILE: specify input ages format file from magic for age
NB: must have either -fsa OR -fa (not both)
-fsp FILE sym size: specify input zeq_specimen format file from magic, sym and size
NB: PCAs will have specified color, while fisher means will be white with specified color as the edgecolor
-fres FILE specify input pmag_results file from magic, sym and size
-LP [AF,T,ARM,IRM, X] step [in mT,C,mT,mT, mass/vol] to plot
-S do not plot blanket treatment data (if this is set, you don't need the -LP)
-sym SYM SIZE, symbol, size for continuous points (e.g., ro 5, bs 10, g^ 10 for red dot, blue square, green triangle), default is blue dot at 5 pt
-D do not plot declination
-M do not plot magnetization
-log plot magnetization on a log scale
-L do not connect dots with a line
-I do not plot inclination
-d min max [in m] depth range to plot
-n normalize by weight in er_specimen table
-Iex: plot the expected inc at lat - only available for results with lat info in file
-ts TS amin amax: plot the GPTS for the time interval between amin and amax (numbers in Ma)
TS: [ck95, gts04, gts12]
-ds [mbsf,mcd] specify depth scale, mbsf default
-fmt [svg, eps, pdf, png] specify output format for plot (default: svg)
-sav save plot silently
DEFAULTS:
Measurements file: measurements.txt
Samples file: samples.txt
NRM step
Summary file: none
|
def load_ns_sequence(eos_name):
"""
Load the data of an NS non-rotating equilibrium sequence
generated using the equation of state (EOS) chosen by the
user. [Only the 2H 2-piecewise polytropic EOS is currently
supported. This yields NSs with large radiss (15-16km).]
Parameters
-----------
eos_name: string
NS equation of state label ('2H' is the only supported
choice at the moment)
Returns
----------
ns_sequence: 3D-array
contains the sequence data in the form NS gravitational
mass (in solar masses), NS baryonic mass (in solar
masses), NS compactness (dimensionless)
max_ns_g_mass: float
the maximum NS gravitational mass (in solar masses) in
the sequence (this is the mass of the most massive stable
NS)
"""
ns_sequence = []
if eos_name == '2H':
ns_sequence_path = os.path.join(pycbc.tmpltbank.NS_SEQUENCE_FILE_DIRECTORY, 'equil_2H.dat')
#ns_sequence_path = os.path.join(NS_SEQUENCE_FILE_DIRECTORY, 'equil_2H.dat')
ns_sequence = np.loadtxt(ns_sequence_path)
else:
print('Only the 2H EOS is currently supported!')
print('If you plan to use a different NS EOS, be sure not to filter')
print('too many templates!\n')
raise Exception('Unsupported EOS!')
max_ns_g_mass = max(ns_sequence[:,0])
return (ns_sequence, max_ns_g_mass)
|
Load the data of an NS non-rotating equilibrium sequence
generated using the equation of state (EOS) chosen by the
user. [Only the 2H 2-piecewise polytropic EOS is currently
supported. This yields NSs with large radiss (15-16km).]
Parameters
-----------
eos_name: string
NS equation of state label ('2H' is the only supported
choice at the moment)
Returns
----------
ns_sequence: 3D-array
contains the sequence data in the form NS gravitational
mass (in solar masses), NS baryonic mass (in solar
masses), NS compactness (dimensionless)
max_ns_g_mass: float
the maximum NS gravitational mass (in solar masses) in
the sequence (this is the mass of the most massive stable
NS)
|
def _router_address(self, data):
"""only for IPv6 addresses"""
args = data.split()[1:]
try:
self._relay_attrs['ip_v6'].extend(args)
except KeyError:
self._relay_attrs['ip_v6'] = list(args)
|
only for IPv6 addresses
|
def check_managed_changes(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
keep_mode=False,
seuser=None,
serole=None,
setype=None,
serange=None,
**kwargs):
'''
Return a dictionary of what changes need to be made for a file
.. versionchanged:: Neon
selinux attributes added
CLI Example:
.. code-block:: bash
salt '*' file.check_managed_changes /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' jinja True None None base
'''
# If the source is a list then find which file exists
source, source_hash = source_list(source, # pylint: disable=W0633
source_hash,
saltenv)
sfn = ''
source_sum = None
if contents is None:
# Gather the source file from the server
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
**kwargs)
# Ensure that user-provided hash string is lowercase
if source_sum and ('hsum' in source_sum):
source_sum['hsum'] = source_sum['hsum'].lower()
if comments:
__clean_tmp(sfn)
raise CommandExecutionError(comments)
if sfn and source and keep_mode:
if _urlparse(source).scheme in ('salt', 'file') \
or source.startswith('/'):
try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc:
log.warning('Unable to stat %s: %s', sfn, exc)
changes = check_file_meta(name, sfn, source, source_sum, user,
group, mode, attrs, saltenv, contents,
seuser=seuser, serole=serole, setype=setype, serange=serange)
__clean_tmp(sfn)
return changes
|
Return a dictionary of what changes need to be made for a file
.. versionchanged:: Neon
selinux attributes added
CLI Example:
.. code-block:: bash
salt '*' file.check_managed_changes /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' jinja True None None base
|
def beatExtraction(st_features, win_len, PLOT=False):
"""
This function extracts an estimate of the beat rate for a musical signal.
ARGUMENTS:
- st_features: a numpy array (n_feats x numOfShortTermWindows)
- win_len: window size in seconds
RETURNS:
- BPM: estimates of beats per minute
- Ratio: a confidence measure
"""
# Features that are related to the beat tracking task:
toWatch = [0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]
max_beat_time = int(round(2.0 / win_len))
hist_all = numpy.zeros((max_beat_time,))
for ii, i in enumerate(toWatch): # for each feature
DifThres = 2.0 * (numpy.abs(st_features[i, 0:-1] - st_features[i, 1::])).mean() # dif threshold (3 x Mean of Difs)
if DifThres<=0:
DifThres = 0.0000000000000001
[pos1, _] = utilities.peakdet(st_features[i, :], DifThres) # detect local maxima
posDifs = [] # compute histograms of local maxima changes
for j in range(len(pos1)-1):
posDifs.append(pos1[j+1]-pos1[j])
[hist_times, HistEdges] = numpy.histogram(posDifs, numpy.arange(0.5, max_beat_time + 1.5))
hist_centers = (HistEdges[0:-1] + HistEdges[1::]) / 2.0
hist_times = hist_times.astype(float) / st_features.shape[1]
hist_all += hist_times
if PLOT:
plt.subplot(9, 2, ii + 1)
plt.plot(st_features[i, :], 'k')
for k in pos1:
plt.plot(k, st_features[i, k], 'k*')
f1 = plt.gca()
f1.axes.get_xaxis().set_ticks([])
f1.axes.get_yaxis().set_ticks([])
if PLOT:
plt.show(block=False)
plt.figure()
# Get beat as the argmax of the agregated histogram:
I = numpy.argmax(hist_all)
bpms = 60 / (hist_centers * win_len)
BPM = bpms[I]
# ... and the beat ratio:
Ratio = hist_all[I] / hist_all.sum()
if PLOT:
# filter out >500 beats from plotting:
hist_all = hist_all[bpms < 500]
bpms = bpms[bpms < 500]
plt.plot(bpms, hist_all, 'k')
plt.xlabel('Beats per minute')
plt.ylabel('Freq Count')
plt.show(block=True)
return BPM, Ratio
|
This function extracts an estimate of the beat rate for a musical signal.
ARGUMENTS:
- st_features: a numpy array (n_feats x numOfShortTermWindows)
- win_len: window size in seconds
RETURNS:
- BPM: estimates of beats per minute
- Ratio: a confidence measure
|
def _get_code_dir(self, code_path):
"""
Method to get a path to a directory where the Lambda function code is available. This directory will
be mounted directly inside the Docker container.
This method handles a few different cases for ``code_path``:
- ``code_path``is a existent zip/jar file: Unzip in a temp directory and return the temp directory
- ``code_path`` is a existent directory: Return this immediately
- ``code_path`` is a file/dir that does not exist: Return it as is. May be this method is not clever to
detect the existence of the path
:param string code_path: Path to the code. This could be pointing at a file or folder either on a local
disk or in some network file system
:return string: Directory containing Lambda function code. It can be mounted directly in container
"""
decompressed_dir = None
try:
if os.path.isfile(code_path) and code_path.endswith(self.SUPPORTED_ARCHIVE_EXTENSIONS):
decompressed_dir = _unzip_file(code_path)
yield decompressed_dir
else:
LOG.debug("Code %s is not a zip/jar file", code_path)
yield code_path
finally:
if decompressed_dir:
shutil.rmtree(decompressed_dir)
|
Method to get a path to a directory where the Lambda function code is available. This directory will
be mounted directly inside the Docker container.
This method handles a few different cases for ``code_path``:
- ``code_path``is a existent zip/jar file: Unzip in a temp directory and return the temp directory
- ``code_path`` is a existent directory: Return this immediately
- ``code_path`` is a file/dir that does not exist: Return it as is. May be this method is not clever to
detect the existence of the path
:param string code_path: Path to the code. This could be pointing at a file or folder either on a local
disk or in some network file system
:return string: Directory containing Lambda function code. It can be mounted directly in container
|
def p_variable(self, p):
"""variable : LEFT_BRACE LITERAL EQUALS unbound_segments RIGHT_BRACE
| LEFT_BRACE LITERAL RIGHT_BRACE"""
p[0] = [_Segment(_BINDING, p[2])]
if len(p) > 4:
p[0].extend(p[4])
else:
p[0].append(_Segment(_TERMINAL, '*'))
self.segment_count += 1
p[0].append(_Segment(_END_BINDING, ''))
|
variable : LEFT_BRACE LITERAL EQUALS unbound_segments RIGHT_BRACE
| LEFT_BRACE LITERAL RIGHT_BRACE
|
def _get_blob(self):
"""read blob on access only because get_object is slow"""
if not self.__blob:
self.__blob = self.repo.get_object(self.id)
return self.__blob
|
read blob on access only because get_object is slow
|
def _kl_independent(a, b, name="kl_independent"):
"""Batched KL divergence `KL(a || b)` for Independent distributions.
We can leverage the fact that
```
KL(Independent(a) || Independent(b)) = sum(KL(a || b))
```
where the sum is over the `reinterpreted_batch_ndims`.
Args:
a: Instance of `Independent`.
b: Instance of `Independent`.
name: (optional) name to use for created ops. Default "kl_independent".
Returns:
Batchwise `KL(a || b)`.
Raises:
ValueError: If the event space for `a` and `b`, or their underlying
distributions don't match.
"""
p = a.distribution
q = b.distribution
# The KL between any two (non)-batched distributions is a scalar.
# Given that the KL between two factored distributions is the sum, i.e.
# KL(p1(x)p2(y) || q1(x)q2(y)) = KL(p1 || q1) + KL(q1 || q2), we compute
# KL(p || q) and do a `reduce_sum` on the reinterpreted batch dimensions.
if (tensorshape_util.is_fully_defined(a.event_shape) and
tensorshape_util.is_fully_defined(b.event_shape)):
if a.event_shape == b.event_shape:
if p.event_shape == q.event_shape:
num_reduce_dims = (tensorshape_util.rank(a.event_shape) -
tensorshape_util.rank(p.event_shape))
reduce_dims = [-i - 1 for i in range(0, num_reduce_dims)]
return tf.reduce_sum(
input_tensor=kullback_leibler.kl_divergence(p, q, name=name),
axis=reduce_dims)
else:
raise NotImplementedError("KL between Independents with different "
"event shapes not supported.")
else:
raise ValueError("Event shapes do not match.")
else:
with tf.control_dependencies(
[
assert_util.assert_equal(a.event_shape_tensor(),
b.event_shape_tensor()),
assert_util.assert_equal(p.event_shape_tensor(),
q.event_shape_tensor())
]):
num_reduce_dims = (
prefer_static.rank_from_shape(
a.event_shape_tensor, a.event_shape) -
prefer_static.rank_from_shape(
p.event_shape_tensor, a.event_shape))
reduce_dims = prefer_static.range(-num_reduce_dims - 1, -1, 1)
return tf.reduce_sum(
input_tensor=kullback_leibler.kl_divergence(p, q, name=name),
axis=reduce_dims)
|
Batched KL divergence `KL(a || b)` for Independent distributions.
We can leverage the fact that
```
KL(Independent(a) || Independent(b)) = sum(KL(a || b))
```
where the sum is over the `reinterpreted_batch_ndims`.
Args:
a: Instance of `Independent`.
b: Instance of `Independent`.
name: (optional) name to use for created ops. Default "kl_independent".
Returns:
Batchwise `KL(a || b)`.
Raises:
ValueError: If the event space for `a` and `b`, or their underlying
distributions don't match.
|
def proxies(self, url):
"""
Get the transport proxy configuration
:param url: string
:return: Proxy configuration dictionary
:rtype: Dictionary
"""
netloc = urllib.parse.urlparse(url).netloc
proxies = {}
if settings.PROXIES and settings.PROXIES.get(netloc):
proxies["http"] = settings.PROXIES[netloc]
proxies["https"] = settings.PROXIES[netloc]
elif settings.PROXY_URL:
proxies["http"] = settings.PROXY_URL
proxies["https"] = settings.PROXY_URL
return proxies
|
Get the transport proxy configuration
:param url: string
:return: Proxy configuration dictionary
:rtype: Dictionary
|
def partition_dumps(self):
"""Yeild a set of manifest object that parition the dumps.
Simply adds resources/files to a manifest until their are either the
the correct number of files or the size limit is exceeded, then yields
that manifest.
"""
manifest = self.manifest_class()
manifest_size = 0
manifest_files = 0
for resource in self.resources:
manifest.add(resource)
manifest_size += resource.length
manifest_files += 1
if (manifest_size >= self.max_size or
manifest_files >= self.max_files):
yield(manifest)
# Need to start a new manifest
manifest = self.manifest_class()
manifest_size = 0
manifest_files = 0
if (manifest_files > 0):
yield(manifest)
|
Yeild a set of manifest object that parition the dumps.
Simply adds resources/files to a manifest until their are either the
the correct number of files or the size limit is exceeded, then yields
that manifest.
|
def model_post_save(sender, instance, created=False, **kwargs):
"""Signal emitted after any model is saved via Django ORM.
:param sender: Model class that was saved
:param instance: The actual instance that was saved
:param created: True if a new row was created
"""
if sender._meta.app_label == 'rest_framework_reactive':
# Ignore own events.
return
def notify():
table = sender._meta.db_table
if created:
notify_observers(table, ORM_NOTIFY_KIND_CREATE, instance.pk)
else:
notify_observers(table, ORM_NOTIFY_KIND_UPDATE, instance.pk)
transaction.on_commit(notify)
|
Signal emitted after any model is saved via Django ORM.
:param sender: Model class that was saved
:param instance: The actual instance that was saved
:param created: True if a new row was created
|
async def take_control(self, password):
"""Take control of QTM.
:param password: Password as entered in QTM.
"""
cmd = "takecontrol %s" % password
return await asyncio.wait_for(
self._protocol.send_command(cmd), timeout=self._timeout
)
|
Take control of QTM.
:param password: Password as entered in QTM.
|
def findGlyph(self, glyphName):
"""
Returns a ``list`` of the group or groups associated with
**glyphName**.
**glyphName** will be an :ref:`type-string`. If no group is found
to contain **glyphName** an empty ``list`` will be returned. ::
>>> font.groups.findGlyph("A")
["A_accented"]
"""
glyphName = normalizers.normalizeGlyphName(glyphName)
groupNames = self._findGlyph(glyphName)
groupNames = [self.keyNormalizer.__func__(
groupName) for groupName in groupNames]
return groupNames
|
Returns a ``list`` of the group or groups associated with
**glyphName**.
**glyphName** will be an :ref:`type-string`. If no group is found
to contain **glyphName** an empty ``list`` will be returned. ::
>>> font.groups.findGlyph("A")
["A_accented"]
|
def xy_spectrail_arc_intersections(self, slitlet2d=None):
"""Compute intersection points of spectrum trails with arc lines.
The member list_arc_lines is updated with new keyword:keyval
values for each arc line.
Parameters
----------
slitlet2d : numpy array
Slitlet image to be displayed with the computed boundaries
and intersecting points overplotted. This argument is
optional.
"""
# protections
if self.list_arc_lines is None:
raise ValueError("Arc lines not sought")
number_spectrum_trails = len(self.list_spectrails)
if number_spectrum_trails == 0:
raise ValueError("Number of available spectrum trails is 0")
number_arc_lines = len(self.list_arc_lines)
if number_arc_lines == 0:
raise ValueError("Number of available arc lines is 0")
# intersection of the arc lines with the spectrum trails
# (note: the coordinates are computed using pixel values,
# ranging from 1 to EMIR_NAXIS1, as given in the original
# image reference system ---not in the slitlet image reference
# system---)
self.x_inter_rect = np.array([]) # rectified image coordinates
self.y_inter_rect = np.array([]) # rectified image coordinates
for arcline in self.list_arc_lines:
# middle spectrum trail
spectrail = self.list_spectrails[self.i_middle_spectrail]
xroot, yroot = intersection_spectrail_arcline(
spectrail=spectrail, arcline=arcline
)
arcline.x_rectified = xroot
self.x_inter_rect = np.append(
self.x_inter_rect, [xroot] * number_spectrum_trails
)
for spectrail in self.list_spectrails:
# compute expected ordinate y_expected in the rectified
# image
y_expected = self.corr_yrect_a + self.corr_yrect_b * \
spectrail.y_rectified
self.y_inter_rect = np.append(self.y_inter_rect, y_expected)
if abs(self.debugplot) >= 10:
print('>>> y0_frontier_lower_expected........: ',
self.y0_frontier_lower_expected)
print('>>> y0_frontier_upper_expected........: ',
self.y0_frontier_upper_expected)
print('>>> shifted y0_frontier_upper_expected: ',
self.corr_yrect_a +
self.corr_yrect_b * self.y0_frontier_lower)
print('>>> shifted y0_frontier_lower_expected: ',
self.corr_yrect_a +
self.corr_yrect_b * self.y0_frontier_upper)
#
self.x_inter_orig = np.array([]) # original image coordinates
self.y_inter_orig = np.array([]) # original image coordinates
for arcline in self.list_arc_lines:
for spectrail in self.list_spectrails:
xroot, yroot = intersection_spectrail_arcline(
spectrail=spectrail, arcline=arcline
)
self.x_inter_orig = np.append(self.x_inter_orig, xroot)
self.y_inter_orig = np.append(self.y_inter_orig, yroot)
# display intersection points
if abs(self.debugplot % 10) != 0 and slitlet2d is not None:
# display image with zscale cuts
title = "Slitlet#" + str(self.islitlet) + \
" (xy_spectrail_arc_intersections)"
ax = ximshow(slitlet2d, title=title,
first_pixel=(self.bb_nc1_orig, self.bb_ns1_orig),
show=False)
# spectrum trails
for spectrail in self.list_spectrails:
xdum, ydum = spectrail.linspace_pix(start=self.bb_nc1_orig,
stop=self.bb_nc2_orig)
ax.plot(xdum, ydum, 'g')
# arc lines
for arcline in self.list_arc_lines:
xdum, ydum = arcline.linspace_pix(start=self.bb_ns1_orig,
stop=self.bb_ns2_orig)
ax.plot(xdum, ydum, 'g')
# intersection points
ax.plot(self.x_inter_orig, self.y_inter_orig, 'co')
ax.plot(self.x_inter_rect, self.y_inter_rect, 'bo')
# show plot
pause_debugplot(self.debugplot, pltshow=True)
|
Compute intersection points of spectrum trails with arc lines.
The member list_arc_lines is updated with new keyword:keyval
values for each arc line.
Parameters
----------
slitlet2d : numpy array
Slitlet image to be displayed with the computed boundaries
and intersecting points overplotted. This argument is
optional.
|
def css_property(self) -> str:
"""Generate a random snippet of CSS that assigns value to a property.
:return: CSS property.
:Examples:
'background-color: #f4d3a1'
"""
prop = self.random.choice(list(CSS_PROPERTIES.keys()))
val = CSS_PROPERTIES[prop]
if isinstance(val, list):
val = self.random.choice(val)
elif val == 'color':
val = self.__text.hex_color()
elif val == 'size':
val = '{}{}'.format(self.random.randint(1, 99),
self.random.choice(CSS_SIZE_UNITS))
return '{}: {}'.format(prop, val)
|
Generate a random snippet of CSS that assigns value to a property.
:return: CSS property.
:Examples:
'background-color: #f4d3a1'
|
def __driver_stub(self, text, state):
"""Display help messages or invoke the proper completer.
The interface of helper methods and completer methods are documented in
the helper() decorator method and the completer() decorator method,
respectively.
Arguments:
text: A string, that is the current completion scope.
state: An integer.
Returns:
A string used to replace the given text, if any.
None if no completion candidates are found.
Raises:
This method is called via the readline callback. If this method
raises an error, it is silently ignored by the readline library.
This behavior makes debugging very difficult. For this reason,
non-driver methods are run within try-except blocks. When an error
occurs, the stack trace is printed to self.stderr.
"""
origline = readline.get_line_buffer()
line = origline.lstrip()
if line and line[-1] == '?':
self.__driver_helper(line)
else:
toks = shlex.split(line)
return self.__driver_completer(toks, text, state)
|
Display help messages or invoke the proper completer.
The interface of helper methods and completer methods are documented in
the helper() decorator method and the completer() decorator method,
respectively.
Arguments:
text: A string, that is the current completion scope.
state: An integer.
Returns:
A string used to replace the given text, if any.
None if no completion candidates are found.
Raises:
This method is called via the readline callback. If this method
raises an error, it is silently ignored by the readline library.
This behavior makes debugging very difficult. For this reason,
non-driver methods are run within try-except blocks. When an error
occurs, the stack trace is printed to self.stderr.
|
def get_field_value_from_context(field_name, context_list):
"""
Helper to get field value from string path.
String '<context>' is used to go up on context stack. It just
can be used at the beginning of path: <context>.<context>.field_name_1
On the other hand, '<root>' is used to start lookup from first item on context.
"""
field_path = field_name.split('.')
if field_path[0] == '<root>':
context_index = 0
field_path.pop(0)
else:
context_index = -1
while field_path[0] == '<context>':
context_index -= 1
field_path.pop(0)
try:
field_value = context_list[context_index]
while len(field_path):
field = field_path.pop(0)
if isinstance(field_value, (list, tuple, ListModel)):
if field.isdigit():
field = int(field)
field_value = field_value[field]
elif isinstance(field_value, dict):
try:
field_value = field_value[field]
except KeyError:
if field.isdigit():
field = int(field)
field_value = field_value[field]
else:
field_value = None
else:
field_value = getattr(field_value, field)
return field_value
except (IndexError, AttributeError, KeyError, TypeError):
return None
|
Helper to get field value from string path.
String '<context>' is used to go up on context stack. It just
can be used at the beginning of path: <context>.<context>.field_name_1
On the other hand, '<root>' is used to start lookup from first item on context.
|
def fillna_value(self, df, left, **concat_args):
"""
This method gives subclasses the opportunity to define how
join() fills missing values. Return value must be compatible with
DataFrame.fillna() value argument. Examples:
- return 0: replace missing values with zero
- return df.mean(): replace missing values with column mean
This default implimentation fills counts with zero.
TODO: identify counts more robustly instead of relying on column name
Typically fill other fields with mean but can't do that during the join
because that would leak information across a train/test split
"""
value = pd.Series(
0, index=[c for c in df.columns
if c.endswith('_count') and c.find('_per_') == -1])
return value
|
This method gives subclasses the opportunity to define how
join() fills missing values. Return value must be compatible with
DataFrame.fillna() value argument. Examples:
- return 0: replace missing values with zero
- return df.mean(): replace missing values with column mean
This default implimentation fills counts with zero.
TODO: identify counts more robustly instead of relying on column name
Typically fill other fields with mean but can't do that during the join
because that would leak information across a train/test split
|
async def get_connection(self, container):
'''
Get an exclusive connection, useful for blocked commands and transactions.
You must call release or shutdown (not recommanded) to return the connection after use.
:param container: routine container
:returns: RedisClientBase object, with some commands same as RedisClient like execute_command,
batch_execute, register_script etc.
'''
if self._connpool:
conn = self._connpool.pop()
return RedisClientBase(conn, self)
else:
conn = self._create_client(container)
await RedisClientBase._get_connection(self, container, conn)
await self._protocol.send_command(conn, container, 'SELECT', str(self.db))
return RedisClientBase(conn, self)
|
Get an exclusive connection, useful for blocked commands and transactions.
You must call release or shutdown (not recommanded) to return the connection after use.
:param container: routine container
:returns: RedisClientBase object, with some commands same as RedisClient like execute_command,
batch_execute, register_script etc.
|
def func_on_enter(func):
"""
Register the `func` as a callback reacting only to ENTER.
Note:
This function doesn't bind the key to the element, just creates sort of
filter, which ignores all other events.
"""
def function_after_enter_pressed(ev):
ev.stopPropagation()
# if the key was `enter` ..
if ev.keyCode == 13:
func(ev)
return function_after_enter_pressed
|
Register the `func` as a callback reacting only to ENTER.
Note:
This function doesn't bind the key to the element, just creates sort of
filter, which ignores all other events.
|
def remove_entry(self, offset, length):
# type: (int, int) -> None
'''
Given an offset and length, find and remove the entry in this block
that corresponds.
Parameters:
offset - The offset of the entry to look for.
length - The length of the entry to look for.
Returns:
Nothing.
'''
for index, entry in enumerate(self._entries):
if entry.offset == offset and entry.length == length:
del self._entries[index]
break
else:
raise pycdlibexception.PyCdlibInternalError('Could not find an entry for the RR CE entry in the CE block!')
|
Given an offset and length, find and remove the entry in this block
that corresponds.
Parameters:
offset - The offset of the entry to look for.
length - The length of the entry to look for.
Returns:
Nothing.
|
def check(self, dsm, independence_factor=5, **kwargs):
"""
Check least common mechanism.
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to check.
independence_factor (int): if the maximum dependencies for one
module is inferior or equal to the DSM size divided by the
independence factor, then this criterion is verified.
Returns:
bool: True if least common mechanism, else False
"""
# leastCommonMechanismMatrix
least_common_mechanism = False
message = ''
# get the list of dependent modules for each module
data = dsm.data
categories = dsm.categories
dsm_size = dsm.size[0]
if not categories:
categories = ['appmodule'] * dsm_size
dependent_module_number = []
# evaluate Matrix(data)
for j in range(0, dsm_size):
dependent_module_number.append(0)
for i in range(0, dsm_size):
if (categories[i] != 'framework' and
categories[j] != 'framework' and
data[i][j] > 0):
dependent_module_number[j] += 1
# except for the broker if any and libs, check that threshold is not
# overlapped
# index of brokers
# and app_libs are set to 0
for index, item in enumerate(dsm.categories):
if item == 'broker' or item == 'applib':
dependent_module_number[index] = 0
if max(dependent_module_number) <= dsm_size / independence_factor:
least_common_mechanism = True
else:
maximum = max(dependent_module_number)
message = (
'Dependencies to %s (%s) > matrix size (%s) / '
'independence factor (%s) = %s' % (
dsm.entities[dependent_module_number.index(maximum)],
maximum, dsm_size, independence_factor,
dsm_size / independence_factor))
return least_common_mechanism, message
|
Check least common mechanism.
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to check.
independence_factor (int): if the maximum dependencies for one
module is inferior or equal to the DSM size divided by the
independence factor, then this criterion is verified.
Returns:
bool: True if least common mechanism, else False
|
def modifyModlist(
old_entry: dict, new_entry: dict, ignore_attr_types: Optional[List[str]] = None,
ignore_oldexistent: bool = False) -> Dict[str, Tuple[str, List[bytes]]]:
"""
Build differential modify list for calling LDAPObject.modify()/modify_s()
:param old_entry:
Dictionary holding the old entry
:param new_entry:
Dictionary holding what the new entry should be
:param ignore_attr_types:
List of attribute type names to be ignored completely
:param ignore_oldexistent:
If true attribute type names which are in old_entry
but are not found in new_entry at all are not deleted.
This is handy for situations where your application
sets attribute value to '' for deleting an attribute.
In most cases leave zero.
:return: List of tuples suitable for
:py:meth:`ldap:ldap.LDAPObject.modify`.
This function is the same as :py:func:`ldap:ldap.modlist.modifyModlist`
except for the following changes:
* MOD_DELETE/MOD_DELETE used in preference to MOD_REPLACE when updating
an existing value.
"""
ignore_attr_types = _list_dict(map(str.lower, (ignore_attr_types or [])))
modlist: Dict[str, Tuple[str, List[bytes]]] = {}
attrtype_lower_map = {}
for a in old_entry.keys():
attrtype_lower_map[a.lower()] = a
for attrtype in new_entry.keys():
attrtype_lower = attrtype.lower()
if attrtype_lower in ignore_attr_types:
# This attribute type is ignored
continue
# Filter away null-strings
new_value = list(filter(lambda x: x is not None, new_entry[attrtype]))
if attrtype_lower in attrtype_lower_map:
old_value = old_entry.get(attrtype_lower_map[attrtype_lower], [])
old_value = list(filter(lambda x: x is not None, old_value))
del attrtype_lower_map[attrtype_lower]
else:
old_value = []
if not old_value and new_value:
# Add a new attribute to entry
modlist[attrtype] = (ldap3.MODIFY_ADD, escape_list(new_value))
elif old_value and new_value:
# Replace existing attribute
old_value_dict = _list_dict(old_value)
new_value_dict = _list_dict(new_value)
delete_values = []
for v in old_value:
if v not in new_value_dict:
delete_values.append(v)
add_values = []
for v in new_value:
if v not in old_value_dict:
add_values.append(v)
if len(delete_values) > 0 or len(add_values) > 0:
modlist[attrtype] = (
ldap3.MODIFY_REPLACE, escape_list(new_value))
elif old_value and not new_value:
# Completely delete an existing attribute
modlist[attrtype] = (ldap3.MODIFY_DELETE, [])
if not ignore_oldexistent:
# Remove all attributes of old_entry which are not present
# in new_entry at all
for a in attrtype_lower_map.keys():
if a in ignore_attr_types:
# This attribute type is ignored
continue
attrtype = attrtype_lower_map[a]
modlist[attrtype] = (ldap3.MODIFY_DELETE, [])
return modlist
|
Build differential modify list for calling LDAPObject.modify()/modify_s()
:param old_entry:
Dictionary holding the old entry
:param new_entry:
Dictionary holding what the new entry should be
:param ignore_attr_types:
List of attribute type names to be ignored completely
:param ignore_oldexistent:
If true attribute type names which are in old_entry
but are not found in new_entry at all are not deleted.
This is handy for situations where your application
sets attribute value to '' for deleting an attribute.
In most cases leave zero.
:return: List of tuples suitable for
:py:meth:`ldap:ldap.LDAPObject.modify`.
This function is the same as :py:func:`ldap:ldap.modlist.modifyModlist`
except for the following changes:
* MOD_DELETE/MOD_DELETE used in preference to MOD_REPLACE when updating
an existing value.
|
def get_urlhash(self, url, fmt):
"""Returns the hash of the file of an internal url
"""
with self.open(os.path.basename(url)) as f:
return {'url': fmt(url), 'sha256': filehash(f, 'sha256')}
|
Returns the hash of the file of an internal url
|
def app_authorize(self, account=None, flush=True, bailout=False):
"""
Like app_authenticate(), but uses the authorization password
of the account.
For the difference between authentication and authorization
please google for AAA.
:type account: Account
:param account: An account object, like login().
:type flush: bool
:param flush: Whether to flush the last prompt from the buffer.
:type bailout: bool
:param bailout: Whether to wait for a prompt after sending the password.
"""
with self._get_account(account) as account:
user = account.get_name()
password = account.get_authorization_password()
if password is None:
password = account.get_password()
self._dbg(1, "Attempting to app-authorize %s." % user)
self._app_authenticate(account, password, flush, bailout)
self.app_authorized = True
|
Like app_authenticate(), but uses the authorization password
of the account.
For the difference between authentication and authorization
please google for AAA.
:type account: Account
:param account: An account object, like login().
:type flush: bool
:param flush: Whether to flush the last prompt from the buffer.
:type bailout: bool
:param bailout: Whether to wait for a prompt after sending the password.
|
def add_months_to_date(months, date):
"""Add a number of months to a date"""
month = date.month
new_month = month + months
years = 0
while new_month < 1:
new_month += 12
years -= 1
while new_month > 12:
new_month -= 12
years += 1
# month = timestamp.month
year = date.year + years
try:
return datetime.date(year, new_month, date.day)
except ValueError:
# This means that the day exceeds the last day of the month, i.e. it is 30th March, and we are finding the day
# 1 month ago, and it is trying to return 30th February
if months > 0:
# We are adding, so use the first day of the next month
new_month += 1
if new_month > 12:
new_month -= 12
year += 1
return datetime.datetime(year, new_month, 1)
else:
# We are subtracting - use the last day of the same month
new_day = calendar.monthrange(year, new_month)[1]
return datetime.datetime(year, new_month, new_day)
|
Add a number of months to a date
|
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
|
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
|
def deploy(self, driver, location_id=config.DEFAULT_LOCATION_ID,
size=config.DEFAULT_SIZE):
"""Use driver to deploy node, with optional ability to specify
location id and size id.
First, obtain location object from driver. Next, get the
size. Then, get the image. Finally, deploy node, and return
NodeProxy. """
logger.debug('deploying node %s using driver %s' % (self.name, driver))
args = {'name': self.name}
if hasattr(config, 'SSH_KEY_NAME'):
args['ex_keyname'] = config.SSH_KEY_NAME
if hasattr(config, 'EX_USERDATA'):
args['ex_userdata'] = config.EX_USERDATA
args['location'] = driver.list_locations()[location_id]
logger.debug('location %s' % args['location'])
args['size'] = size_from_name(size, driver.list_sizes())
logger.debug('size %s' % args['size'])
logger.debug('image name %s' % config.IMAGE_NAMES[self.image_name])
args['image'] = image_from_name(
config.IMAGE_NAMES[self.image_name], driver.list_images())
logger.debug('image %s' % args['image'])
logger.debug('creating node with args: %s' % args)
node = driver.create_node(**args)
logger.debug('node created')
# password must be extracted before _wait_until_running(), where it goes away
logger.debug('driver.features %s' % driver.features)
password = node.extra.get('password') \
if 'generates_password' in driver.features['create_node'] else None
logger.debug('waiting for node to obtain %s' % config.SSH_INTERFACE)
node, ip_addresses = driver._wait_until_running(
node, timeout=1200, ssh_interface=config.SSH_INTERFACE)
ssh_args = {'hostname': ip_addresses[0], 'port': 22, 'timeout': 10}
if password:
ssh_args['password'] = password
else:
ssh_args['key'] = config.SSH_KEY_PATH if hasattr(config, 'SSH_KEY_PATH') else None
logger.debug('initializing ssh client with %s' % ssh_args)
ssh_client = libcloud.compute.ssh.SSHClient(**ssh_args)
logger.debug('ssh client attempting to connect')
ssh_client = driver._ssh_client_connect(ssh_client)
logger.debug('ssh client connected')
logger.debug('starting node deployment with %s steps' % len(self.deployment.steps))
driver._run_deployment_script(self.deployment, node, ssh_client)
node.script_deployments = self.script_deployments # retain exit_status, stdout, stderr
logger.debug('node.extra["imageId"] %s' % node.extra['imageId'])
return NodeProxy(node, args['image'])
|
Use driver to deploy node, with optional ability to specify
location id and size id.
First, obtain location object from driver. Next, get the
size. Then, get the image. Finally, deploy node, and return
NodeProxy.
|
def event_stream(self, filters=None):
"""
:param filters: filters to apply on messages. See docker api.
:return: an iterable that contains events from docker. See the docker api for content.
"""
if filters is None:
filters = {}
return self._docker.events(decode=True, filters=filters)
|
:param filters: filters to apply on messages. See docker api.
:return: an iterable that contains events from docker. See the docker api for content.
|
def compute():
"""Compute the polynomial."""
if what == "numpy":
y = eval(expr)
else:
y = ne.evaluate(expr)
return len(y)
|
Compute the polynomial.
|
def create_diff_storage(self, target, variant):
"""Starts creating an empty differencing storage unit based on this
medium in the format and at the location defined by the @a target
argument.
The target medium must be in :py:attr:`MediumState.not_created`
state (i.e. must not have an existing storage unit). Upon successful
completion, this operation will set the type of the target medium to
:py:attr:`MediumType.normal` and create a storage unit necessary to
represent the differencing medium data in the given format (according
to the storage format of the target object).
After the returned progress object reports that the operation is
successfully complete, the target medium gets remembered by this
VirtualBox installation and may be attached to virtual machines.
The medium will be set to :py:attr:`MediumState.locked_read`
state for the duration of this operation.
in target of type :class:`IMedium`
Target medium.
in variant of type :class:`MediumVariant`
Exact image variant which should be created (as a combination of
:py:class:`MediumVariant` flags).
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorObjectInUse`
Medium not in @c NotCreated state.
"""
if not isinstance(target, IMedium):
raise TypeError("target can only be an instance of type IMedium")
if not isinstance(variant, list):
raise TypeError("variant can only be an instance of type list")
for a in variant[:10]:
if not isinstance(a, MediumVariant):
raise TypeError(
"array can only contain objects of type MediumVariant")
progress = self._call("createDiffStorage",
in_p=[target, variant])
progress = IProgress(progress)
return progress
|
Starts creating an empty differencing storage unit based on this
medium in the format and at the location defined by the @a target
argument.
The target medium must be in :py:attr:`MediumState.not_created`
state (i.e. must not have an existing storage unit). Upon successful
completion, this operation will set the type of the target medium to
:py:attr:`MediumType.normal` and create a storage unit necessary to
represent the differencing medium data in the given format (according
to the storage format of the target object).
After the returned progress object reports that the operation is
successfully complete, the target medium gets remembered by this
VirtualBox installation and may be attached to virtual machines.
The medium will be set to :py:attr:`MediumState.locked_read`
state for the duration of this operation.
in target of type :class:`IMedium`
Target medium.
in variant of type :class:`MediumVariant`
Exact image variant which should be created (as a combination of
:py:class:`MediumVariant` flags).
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorObjectInUse`
Medium not in @c NotCreated state.
|
def get_headers(self, container):
"""
Return the headers for the specified container.
"""
uri = "/%s" % utils.get_name(container)
resp, resp_body = self.api.method_head(uri)
return resp.headers
|
Return the headers for the specified container.
|
def parse_object(lexer: Lexer, is_const: bool) -> ObjectValueNode:
"""ObjectValue[Const]"""
start = lexer.token
item = cast(Callable[[Lexer], Node], partial(parse_object_field, is_const=is_const))
return ObjectValueNode(
fields=any_nodes(lexer, TokenKind.BRACE_L, item, TokenKind.BRACE_R),
loc=loc(lexer, start),
)
|
ObjectValue[Const]
|
def initialize_socket(self):
"""initialize the socket"""
try:
_LOGGER.debug("Trying to open socket.")
self._socket = socket.socket(
socket.AF_INET, # IPv4
socket.SOCK_DGRAM # UDP
)
self._socket.bind(('', self._udp_port))
except socket.error as err:
raise err
else:
_LOGGER.debug("Socket open.")
socket_thread = threading.Thread(
name="SocketThread", target=socket_worker,
args=(self._socket, self.messages,))
socket_thread.setDaemon(True)
socket_thread.start()
|
initialize the socket
|
def ListTimeZones(self):
"""Lists the timezones."""
max_length = 0
for timezone_name in pytz.all_timezones:
if len(timezone_name) > max_length:
max_length = len(timezone_name)
utc_date_time = datetime.datetime.utcnow()
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Timezone', 'UTC Offset'],
title='Zones')
for timezone_name in pytz.all_timezones:
try:
local_timezone = pytz.timezone(timezone_name)
except AssertionError as exception:
logger.error((
'Unable to determine information about timezone: {0:s} with '
'error: {1!s}').format(timezone_name, exception))
continue
local_date_string = '{0!s}'.format(
local_timezone.localize(utc_date_time))
if '+' in local_date_string:
_, _, diff = local_date_string.rpartition('+')
diff_string = '+{0:s}'.format(diff)
else:
_, _, diff = local_date_string.rpartition('-')
diff_string = '-{0:s}'.format(diff)
table_view.AddRow([timezone_name, diff_string])
table_view.Write(self._output_writer)
|
Lists the timezones.
|
def nonNegativeDerivative(requestContext, seriesList, maxValue=None):
"""
Same as the derivative function above, but ignores datapoints that trend
down. Useful for counters that increase for a long time, then wrap or
reset. (Such as if a network interface is destroyed and recreated by
unloading and re-loading a kernel module, common with USB / WiFi cards.
Example::
&target=nonNegativederivative(
company.server.application01.ifconfig.TXPackets)
"""
results = []
for series in seriesList:
newValues = []
prev = None
for val in series:
if None in (prev, val):
newValues.append(None)
prev = val
continue
diff = val - prev
if diff >= 0:
newValues.append(diff)
elif maxValue is not None and maxValue >= val:
newValues.append((maxValue - prev) + val + 1)
else:
newValues.append(None)
prev = val
newName = "nonNegativeDerivative(%s)" % series.name
newSeries = TimeSeries(newName, series.start, series.end, series.step,
newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
|
Same as the derivative function above, but ignores datapoints that trend
down. Useful for counters that increase for a long time, then wrap or
reset. (Such as if a network interface is destroyed and recreated by
unloading and re-loading a kernel module, common with USB / WiFi cards.
Example::
&target=nonNegativederivative(
company.server.application01.ifconfig.TXPackets)
|
def shuffle(self, x, random=None):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
"""
if random is None:
random = self.random
_int = int
for i in reversed(xrange(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
|
x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
|
def database_names(self, session=None):
"""**DEPRECATED**: Get a list of the names of all databases on the
connected server.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.7
Deprecated. Use :meth:`list_database_names` instead.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
warnings.warn("database_names is deprecated. Use list_database_names "
"instead.", DeprecationWarning, stacklevel=2)
return self.list_database_names(session)
|
**DEPRECATED**: Get a list of the names of all databases on the
connected server.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.7
Deprecated. Use :meth:`list_database_names` instead.
.. versionchanged:: 3.6
Added ``session`` parameter.
|
def db_to_specifier(db_string):
"""
Return the database specifier for a database string.
This accepts a database name or URL, and returns a database specifier in the
format accepted by ``specifier_to_db``. It is recommended that you consult
the documentation for that function for an explanation of the format.
"""
local_match = PLAIN_RE.match(db_string)
remote_match = URL_RE.match(db_string)
# If this looks like a local specifier:
if local_match:
return 'local:' + local_match.groupdict()['database']
# If this looks like a remote specifier:
elif remote_match:
# Just a fancy way of getting 3 variables in 2 lines...
hostname, portnum, database = map(remote_match.groupdict().get,
('hostname', 'portnum', 'database'))
local_url = settings._('COUCHDB_SERVER', 'http://127.0.0.1:5984/')
localhost, localport = urlparse.urlparse(local_url)[1].split(':')
# If it's the local server, then return a local specifier.
if (localhost == hostname) and (localport == portnum):
return 'local:' + database
# Otherwise, prepare and return the remote specifier.
return 'remote:%s:%s:%s' % (hostname, portnum, database)
# Throw a wobbly.
raise ValueError('Invalid database string: %r' % (db_string,))
|
Return the database specifier for a database string.
This accepts a database name or URL, and returns a database specifier in the
format accepted by ``specifier_to_db``. It is recommended that you consult
the documentation for that function for an explanation of the format.
|
def unitigs(args):
"""
%prog unitigs best.edges
Reads Celera Assembler's "best.edges" and extract all unitigs.
"""
p = OptionParser(unitigs.__doc__)
p.add_option("--maxerr", default=2, type="int", help="Maximum error rate")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bestedges, = args
G = read_graph(bestedges, maxerr=opts.maxerr, directed=True)
H = nx.Graph()
intconv = lambda x: int(x.split("-")[0])
for k, v in G.iteritems():
if k == G.get(v, None):
H.add_edge(intconv(k), intconv(v))
nunitigs = nreads = 0
for h in nx.connected_component_subgraphs(H, copy=False):
st = [x for x in h if h.degree(x) == 1]
if len(st) != 2:
continue
src, target = st
path = list(nx.all_simple_paths(h, src, target))
assert len(path) == 1
path, = path
print("|".join(str(x) for x in path))
nunitigs += 1
nreads += len(path)
logging.debug("A total of {0} unitigs built from {1} reads."\
.format(nunitigs, nreads))
|
%prog unitigs best.edges
Reads Celera Assembler's "best.edges" and extract all unitigs.
|
def getAccessURL(self, CorpNum, UserID):
""" νλΉ λ‘κ·ΈμΈ URL
args
CorpNum : νμ μ¬μ
μλ²νΈ
UserID : νμ νλΉμμ΄λ
return
30μ΄ λ³΄μ ν ν°μ ν¬ν¨ν url
raise
PopbillException
"""
result = self._httpget('/?TG=LOGIN', CorpNum, UserID)
return result.url
|
νλΉ λ‘κ·ΈμΈ URL
args
CorpNum : νμ μ¬μ
μλ²νΈ
UserID : νμ νλΉμμ΄λ
return
30μ΄ λ³΄μ ν ν°μ ν¬ν¨ν url
raise
PopbillException
|
def compare_packages(rpm_str_a, rpm_str_b, arch_provided=True):
"""Compare two RPM strings to determine which is newer
Parses version information out of RPM package strings of the form
returned by the ``rpm -q`` command and compares their versions to
determine which is newer. Provided strings *do not* require an
architecture at the end, although if providing strings without
architecture, the ``arch_provided`` parameter should be set to
False.
Note that the packages do not have to be the same package (i.e.
they do not require the same name or architecture).
:param str rpm_str_a: an rpm package string
:param str rpm_str_b: an rpm package string
:param bool arch_provided: whether package strings contain
architecture information
:return: 1 (``a`` is newer), 0 (versions are equivalent), or -1
(``b`` is newer)
:rtype: int
"""
logger.debug('resolve_versions(%s, %s)', rpm_str_a, rpm_str_b)
evr_a = parse_package(rpm_str_a, arch_provided)['EVR']
evr_b = parse_package(rpm_str_b, arch_provided)['EVR']
return labelCompare(evr_a, evr_b)
|
Compare two RPM strings to determine which is newer
Parses version information out of RPM package strings of the form
returned by the ``rpm -q`` command and compares their versions to
determine which is newer. Provided strings *do not* require an
architecture at the end, although if providing strings without
architecture, the ``arch_provided`` parameter should be set to
False.
Note that the packages do not have to be the same package (i.e.
they do not require the same name or architecture).
:param str rpm_str_a: an rpm package string
:param str rpm_str_b: an rpm package string
:param bool arch_provided: whether package strings contain
architecture information
:return: 1 (``a`` is newer), 0 (versions are equivalent), or -1
(``b`` is newer)
:rtype: int
|
def normalize_pts(pts, ymax, scaler=2):
"""
scales all coordinates and flip y axis due to different
origin coordinates (top left vs. bottom left)
"""
return [(x * scaler, ymax - (y * scaler)) for x, y in pts]
|
scales all coordinates and flip y axis due to different
origin coordinates (top left vs. bottom left)
|
def find(self, name, required):
"""
Finds all matching dependencies by their name.
:param name: the dependency name to locate.
:param required: true to raise an exception when no dependencies are found.
:return: a list of found dependencies
"""
if name == None:
raise Exception("Name cannot be null")
locator = self._locate(name)
if locator == None:
if required:
raise ReferenceException(None, name)
return None
return self._references.find(locator, required)
|
Finds all matching dependencies by their name.
:param name: the dependency name to locate.
:param required: true to raise an exception when no dependencies are found.
:return: a list of found dependencies
|
def create_variable(self, varname, vtype=None):
"""Create a tk variable.
If the variable was created previously return that instance.
"""
var_types = ('string', 'int', 'boolean', 'double')
vname = varname
var = None
type_from_name = 'string' # default type
if ':' in varname:
type_from_name, vname = varname.split(':')
# Fix incorrect order bug #33
if type_from_name not in (var_types):
# Swap order
type_from_name, vname = vname, type_from_name
if type_from_name not in (var_types):
raise Exception('Undefined variable type in "{0}"'.format(varname))
if vname in self.tkvariables:
var = self.tkvariables[vname]
else:
if vtype is None:
# get type from name
if type_from_name == 'int':
var = tkinter.IntVar()
elif type_from_name == 'boolean':
var = tkinter.BooleanVar()
elif type_from_name == 'double':
var = tkinter.DoubleVar()
else:
var = tkinter.StringVar()
else:
var = vtype()
self.tkvariables[vname] = var
return var
|
Create a tk variable.
If the variable was created previously return that instance.
|
def subkeys(self, path):
"""
A generalized form that can return multiple subkeys.
"""
for _ in subpaths_for_path_range(path, hardening_chars="'pH"):
yield self.subkey_for_path(_)
|
A generalized form that can return multiple subkeys.
|
def __deftype_impls( # pylint: disable=too-many-branches
ctx: ParserContext, form: ISeq
) -> Tuple[List[DefTypeBase], List[Method]]:
"""Roll up deftype* declared bases and method implementations."""
current_interface_sym: Optional[sym.Symbol] = None
current_interface: Optional[DefTypeBase] = None
interfaces = []
methods: List[Method] = []
interface_methods: MutableMapping[sym.Symbol, List[Method]] = {}
for elem in form:
if isinstance(elem, sym.Symbol):
if current_interface is not None:
if current_interface_sym in interface_methods:
raise ParserException(
f"deftype* forms may only implement an interface once",
form=elem,
)
assert (
current_interface_sym is not None
), "Symbol must be defined with interface"
interface_methods[current_interface_sym] = methods
current_interface_sym = elem
current_interface = _parse_ast(ctx, elem)
methods = []
if not isinstance(current_interface, (MaybeClass, MaybeHostForm, VarRef)):
raise ParserException(
f"deftype* interface implementation must be an existing interface",
form=elem,
)
interfaces.append(current_interface)
elif isinstance(elem, ISeq):
if current_interface is None:
raise ParserException(
f"deftype* method cannot be declared without interface", form=elem
)
methods.append(__deftype_method(ctx, elem, current_interface))
else:
raise ParserException(
f"deftype* must consist of interface or protocol names and methods",
form=elem,
)
if current_interface is not None:
if len(methods) > 0:
if current_interface_sym in interface_methods:
raise ParserException(
f"deftype* forms may only implement an interface once",
form=current_interface_sym,
)
assert (
current_interface_sym is not None
), "Symbol must be defined with interface"
interface_methods[current_interface_sym] = methods
else:
raise ParserException(
f"deftype* may not declare interface without at least one method",
form=current_interface_sym,
)
return interfaces, list(chain.from_iterable(interface_methods.values()))
|
Roll up deftype* declared bases and method implementations.
|
def raw_broadcast(self, destination, message, **kwargs):
"""Broadcast a raw (unmangled) message.
This may cause errors if the receiver expects a mangled message.
:param destination: Topic name to send to
:param message: Either a string or a serializable object to be sent
:param **kwargs: Further parameters for the transport layer. For example
delay: Delay transport of message by this many seconds
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
"""
self._broadcast(destination, message, **kwargs)
|
Broadcast a raw (unmangled) message.
This may cause errors if the receiver expects a mangled message.
:param destination: Topic name to send to
:param message: Either a string or a serializable object to be sent
:param **kwargs: Further parameters for the transport layer. For example
delay: Delay transport of message by this many seconds
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
|
def get_metric_names(self, agent_id, re=None, limit=5000):
"""
Requires: application ID
Optional: Regex to filter metric names, limit of results
Returns: A dictionary,
key: metric name,
value: list of fields available for a given metric
Method: Get
Restrictions: Rate limit to 1x per minute
Errors: 403 Invalid API Key, 422 Invalid Parameters
Endpoint: api.newrelic.com
"""
# Make sure we play it slow
self._api_rate_limit_exceeded(self.get_metric_names)
# Construct our GET request parameters into a nice dictionary
parameters = {'re': re, 'limit': limit}
endpoint = "https://api.newrelic.com"
uri = "{endpoint}/api/v1/applications/{agent_id}/metrics.xml"\
.format(endpoint=endpoint, agent_id=agent_id)
# A longer timeout is needed due to the amount of
# data that can be returned without a regex search
response = self._make_get_request(uri, parameters=parameters, timeout=max(self.timeout, 5.0))
# Parse the response. It seems clearer to return a dict of
# metrics/fields instead of a list of metric objects. It might be more
# consistent with the retrieval of metric data to make them objects but
# since the attributes in each type of metric object are different
# (and we aren't going to make heavyweight objects) we don't want to.
metrics = {}
for metric in response.findall('.//metric'):
fields = []
for field in metric.findall('.//field'):
fields.append(field.get('name'))
metrics[metric.get('name')] = fields
return metrics
|
Requires: application ID
Optional: Regex to filter metric names, limit of results
Returns: A dictionary,
key: metric name,
value: list of fields available for a given metric
Method: Get
Restrictions: Rate limit to 1x per minute
Errors: 403 Invalid API Key, 422 Invalid Parameters
Endpoint: api.newrelic.com
|
def numberofnetworks(self):
"""The number of distinct networks defined by the|Node| and
|Element| objects currently handled by the |HydPy| object."""
sels1 = selectiontools.Selections()
sels2 = selectiontools.Selections()
complete = selectiontools.Selection('complete',
self.nodes, self.elements)
for node in self.endnodes:
sel = complete.copy(node.name).select_upstream(node)
sels1 += sel
sels2 += sel.copy(node.name)
for sel1 in sels1:
for sel2 in sels2:
if sel1.name != sel2.name:
sel1 -= sel2
for name in list(sels1.names):
if not sels1[name].elements:
del sels1[name]
return sels1
|
The number of distinct networks defined by the|Node| and
|Element| objects currently handled by the |HydPy| object.
|
def lock(self, name, ttl=None, lock_id=None):
"""
Create a named :py:class:`Lock` instance. The lock implements
an API similar to the standard library's ``threading.Lock``,
and can also be used as a context manager or decorator.
:param str name: The name of the lock.
:param int ttl: The time-to-live for the lock in milliseconds
(optional). If the ttl is ``None`` then the lock will not
expire.
:param str lock_id: Optional identifier for the lock instance.
"""
return Lock(self, name, ttl, lock_id)
|
Create a named :py:class:`Lock` instance. The lock implements
an API similar to the standard library's ``threading.Lock``,
and can also be used as a context manager or decorator.
:param str name: The name of the lock.
:param int ttl: The time-to-live for the lock in milliseconds
(optional). If the ttl is ``None`` then the lock will not
expire.
:param str lock_id: Optional identifier for the lock instance.
|
def get(self, request, bot_id, handler_id, id, format=None):
"""
Get url parameter by id
---
serializer: AbsParamSerializer
responseMessages:
- code: 401
message: Not authenticated
"""
return super(UrlParameterDetail, self).get(request, bot_id, handler_id, id, format)
|
Get url parameter by id
---
serializer: AbsParamSerializer
responseMessages:
- code: 401
message: Not authenticated
|
def rule_command_cmdlist_interface_u_interface_fe_leaf_interface_fortygigabitethernet_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_u = ET.SubElement(cmdlist, "interface-u")
interface_fe_leaf = ET.SubElement(interface_u, "interface-fe-leaf")
interface = ET.SubElement(interface_fe_leaf, "interface")
fortygigabitethernet_leaf = ET.SubElement(interface, "fortygigabitethernet-leaf")
fortygigabitethernet_leaf.text = kwargs.pop('fortygigabitethernet_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def get_remaining_time(program):
'''
Get the remaining time in seconds of a program that is currently on.
'''
now = datetime.datetime.now()
program_start = program.get('start_time')
program_end = program.get('end_time')
if not program_start or not program_end:
_LOGGER.error('Could not determine program start and/or end times.')
_LOGGER.debug('Program data: %s', program)
return
if now > program_end:
_LOGGER.error('The provided program has already ended.')
_LOGGER.debug('Program data: %s', program)
return 0
progress = now - program_start
return progress.seconds
|
Get the remaining time in seconds of a program that is currently on.
|
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
headers = kw.get('headers', self.headers)
kw['headers'] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
|
Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.