body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def get(path, name):
'\n Args:\n path (string): Directory where the entry point is located.\n name (string): Name of the entry point file.\n\n Returns:\n (_EntryPointType): The type of the entry point.\n '
if name.endswith('.sh'):
return _EntryPointType.COMMAND
elif ('setup.py' in os.listdir(path)):
return _EntryPointType.PYTHON_PACKAGE
elif name.endswith('.py'):
return _EntryPointType.PYTHON_PROGRAM
else:
return _EntryPointType.COMMAND
| -4,104,312,754,512,531,000
|
Args:
path (string): Directory where the entry point is located.
name (string): Name of the entry point file.
Returns:
(_EntryPointType): The type of the entry point.
|
src/sagemaker_training/_entry_point_type.py
|
get
|
ChaiBapchya/sagemaker-training-toolk
|
python
|
def get(path, name):
'\n Args:\n path (string): Directory where the entry point is located.\n name (string): Name of the entry point file.\n\n Returns:\n (_EntryPointType): The type of the entry point.\n '
if name.endswith('.sh'):
return _EntryPointType.COMMAND
elif ('setup.py' in os.listdir(path)):
return _EntryPointType.PYTHON_PACKAGE
elif name.endswith('.py'):
return _EntryPointType.PYTHON_PROGRAM
else:
return _EntryPointType.COMMAND
|
def test_tf_linear_interp1d_map(self):
'Tests TF linear interpolation mapping to a single number.'
def graph_fn():
tf_x = tf.constant([0.0, 0.5, 1.0])
tf_y = tf.constant([0.5, 0.5, 0.5])
new_x = tf.constant([0.0, 0.25, 0.5, 0.75, 1.0])
tf_map_outputs = calibration_builder._tf_linear_interp1d(new_x, tf_x, tf_y)
return tf_map_outputs
tf_map_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_map_outputs_np, [0.5, 0.5, 0.5, 0.5, 0.5])
| -7,720,452,319,569,558,000
|
Tests TF linear interpolation mapping to a single number.
|
research/object_detection/builders/calibration_builder_test.py
|
test_tf_linear_interp1d_map
|
zhaowt96/models
|
python
|
def test_tf_linear_interp1d_map(self):
def graph_fn():
tf_x = tf.constant([0.0, 0.5, 1.0])
tf_y = tf.constant([0.5, 0.5, 0.5])
new_x = tf.constant([0.0, 0.25, 0.5, 0.75, 1.0])
tf_map_outputs = calibration_builder._tf_linear_interp1d(new_x, tf_x, tf_y)
return tf_map_outputs
tf_map_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_map_outputs_np, [0.5, 0.5, 0.5, 0.5, 0.5])
|
def test_tf_linear_interp1d_interpolate(self):
'Tests TF 1d linear interpolation not mapping to a single number.'
def graph_fn():
tf_x = tf.constant([0.0, 0.5, 1.0])
tf_y = tf.constant([0.6, 0.7, 1.0])
new_x = tf.constant([0.0, 0.25, 0.5, 0.75, 1.0])
tf_interpolate_outputs = calibration_builder._tf_linear_interp1d(new_x, tf_x, tf_y)
return tf_interpolate_outputs
tf_interpolate_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_interpolate_outputs_np, [0.6, 0.65, 0.7, 0.85, 1.0])
| -1,378,826,018,398,115,600
|
Tests TF 1d linear interpolation not mapping to a single number.
|
research/object_detection/builders/calibration_builder_test.py
|
test_tf_linear_interp1d_interpolate
|
zhaowt96/models
|
python
|
def test_tf_linear_interp1d_interpolate(self):
def graph_fn():
tf_x = tf.constant([0.0, 0.5, 1.0])
tf_y = tf.constant([0.6, 0.7, 1.0])
new_x = tf.constant([0.0, 0.25, 0.5, 0.75, 1.0])
tf_interpolate_outputs = calibration_builder._tf_linear_interp1d(new_x, tf_x, tf_y)
return tf_interpolate_outputs
tf_interpolate_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_interpolate_outputs_np, [0.6, 0.65, 0.7, 0.85, 1.0])
|
@staticmethod
def _get_scipy_interp1d(new_x, x, y):
'Helper performing 1d linear interpolation using SciPy.'
interpolation1d_fn = interpolate.interp1d(x, y)
return interpolation1d_fn(new_x)
| -4,444,101,741,602,493,400
|
Helper performing 1d linear interpolation using SciPy.
|
research/object_detection/builders/calibration_builder_test.py
|
_get_scipy_interp1d
|
zhaowt96/models
|
python
|
@staticmethod
def _get_scipy_interp1d(new_x, x, y):
interpolation1d_fn = interpolate.interp1d(x, y)
return interpolation1d_fn(new_x)
|
def _get_tf_interp1d(self, new_x, x, y):
'Helper performing 1d linear interpolation using Tensorflow.'
def graph_fn():
tf_interp_outputs = calibration_builder._tf_linear_interp1d(tf.convert_to_tensor(new_x, dtype=tf.float32), tf.convert_to_tensor(x, dtype=tf.float32), tf.convert_to_tensor(y, dtype=tf.float32))
return tf_interp_outputs
np_tf_interp_outputs = self.execute(graph_fn, [])
return np_tf_interp_outputs
| 6,076,830,241,423,907,000
|
Helper performing 1d linear interpolation using Tensorflow.
|
research/object_detection/builders/calibration_builder_test.py
|
_get_tf_interp1d
|
zhaowt96/models
|
python
|
def _get_tf_interp1d(self, new_x, x, y):
def graph_fn():
tf_interp_outputs = calibration_builder._tf_linear_interp1d(tf.convert_to_tensor(new_x, dtype=tf.float32), tf.convert_to_tensor(x, dtype=tf.float32), tf.convert_to_tensor(y, dtype=tf.float32))
return tf_interp_outputs
np_tf_interp_outputs = self.execute(graph_fn, [])
return np_tf_interp_outputs
|
def test_tf_linear_interp1d_against_scipy_map(self):
'Tests parity of TF linear interpolation with SciPy for simple mapping.'
length = 10
np_x = np.linspace(0, 1, length)
np_y_map = np.repeat(0.5, length)
test_data_np = np.linspace(0, 1, (length * 10))
scipy_map_outputs = self._get_scipy_interp1d(test_data_np, np_x, np_y_map)
np_tf_map_outputs = self._get_tf_interp1d(test_data_np, np_x, np_y_map)
self.assertAllClose(scipy_map_outputs, np_tf_map_outputs)
| 8,143,699,188,412,991,000
|
Tests parity of TF linear interpolation with SciPy for simple mapping.
|
research/object_detection/builders/calibration_builder_test.py
|
test_tf_linear_interp1d_against_scipy_map
|
zhaowt96/models
|
python
|
def test_tf_linear_interp1d_against_scipy_map(self):
length = 10
np_x = np.linspace(0, 1, length)
np_y_map = np.repeat(0.5, length)
test_data_np = np.linspace(0, 1, (length * 10))
scipy_map_outputs = self._get_scipy_interp1d(test_data_np, np_x, np_y_map)
np_tf_map_outputs = self._get_tf_interp1d(test_data_np, np_x, np_y_map)
self.assertAllClose(scipy_map_outputs, np_tf_map_outputs)
|
def test_tf_linear_interp1d_against_scipy_interpolate(self):
'Tests parity of TF linear interpolation with SciPy.'
length = 10
np_x = np.linspace(0, 1, length)
np_y_interp = np.linspace(0.5, 1, length)
test_data_np = np.linspace(0, 1, (length * 10))
scipy_interp_outputs = self._get_scipy_interp1d(test_data_np, np_x, np_y_interp)
np_tf_interp_outputs = self._get_tf_interp1d(test_data_np, np_x, np_y_interp)
self.assertAllClose(scipy_interp_outputs, np_tf_interp_outputs)
| 5,465,063,855,331,998,000
|
Tests parity of TF linear interpolation with SciPy.
|
research/object_detection/builders/calibration_builder_test.py
|
test_tf_linear_interp1d_against_scipy_interpolate
|
zhaowt96/models
|
python
|
def test_tf_linear_interp1d_against_scipy_interpolate(self):
length = 10
np_x = np.linspace(0, 1, length)
np_y_interp = np.linspace(0.5, 1, length)
test_data_np = np.linspace(0, 1, (length * 10))
scipy_interp_outputs = self._get_scipy_interp1d(test_data_np, np_x, np_y_interp)
np_tf_interp_outputs = self._get_tf_interp1d(test_data_np, np_x, np_y_interp)
self.assertAllClose(scipy_interp_outputs, np_tf_interp_outputs)
|
@staticmethod
def _add_function_approximation_to_calibration_proto(calibration_proto, x_array, y_array, class_id):
'Adds a function approximation to calibration proto for a class id.'
if (class_id is not None):
function_approximation = calibration_proto.class_id_function_approximations.class_id_xy_pairs_map[class_id]
else:
function_approximation = calibration_proto.function_approximation.x_y_pairs
for (x, y) in zip(x_array, y_array):
x_y_pair_message = function_approximation.x_y_pair.add()
x_y_pair_message.x = x
x_y_pair_message.y = y
| 385,374,581,038,189,440
|
Adds a function approximation to calibration proto for a class id.
|
research/object_detection/builders/calibration_builder_test.py
|
_add_function_approximation_to_calibration_proto
|
zhaowt96/models
|
python
|
@staticmethod
def _add_function_approximation_to_calibration_proto(calibration_proto, x_array, y_array, class_id):
if (class_id is not None):
function_approximation = calibration_proto.class_id_function_approximations.class_id_xy_pairs_map[class_id]
else:
function_approximation = calibration_proto.function_approximation.x_y_pairs
for (x, y) in zip(x_array, y_array):
x_y_pair_message = function_approximation.x_y_pair.add()
x_y_pair_message.x = x
x_y_pair_message.y = y
|
def test_class_agnostic_function_approximation(self):
'Tests that calibration produces correct class-agnostic values.'
class_agnostic_x = np.asarray([0.0, 0.5, 1.0])
class_agnostic_y = np.asarray([0.0, 0.25, 0.75])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(calibration_config, class_agnostic_x, class_agnostic_y, class_id=None)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant([[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]], [[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]], dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]], [[0.35, 0.45, 0.55], [0.65, 0.75, 0.75]]])
| 529,330,399,351,468,800
|
Tests that calibration produces correct class-agnostic values.
|
research/object_detection/builders/calibration_builder_test.py
|
test_class_agnostic_function_approximation
|
zhaowt96/models
|
python
|
def test_class_agnostic_function_approximation(self):
class_agnostic_x = np.asarray([0.0, 0.5, 1.0])
class_agnostic_y = np.asarray([0.0, 0.25, 0.75])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(calibration_config, class_agnostic_x, class_agnostic_y, class_id=None)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant([[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]], [[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]], dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]], [[0.35, 0.45, 0.55], [0.65, 0.75, 0.75]]])
|
def test_multiclass_function_approximations(self):
'Tests that calibration produces correct multiclass values.'
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(calibration_config, class_0_x, class_0_y, class_id=0)
class_1_x = np.asarray([0.0, 0.2, 1.0])
class_1_y = np.asarray([0.0, 0.6, 1.0])
self._add_function_approximation_to_calibration_proto(calibration_config, class_1_x, class_1_y, class_id=1)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant([[[0.1, 0.2], [0.9, 0.1]], [[0.6, 0.4], [0.08, 0.92]]], dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.6], [0.5, 0.3]], [[0.5, 0.7], [0.5, 0.96]]])
| 9,125,179,593,091,703,000
|
Tests that calibration produces correct multiclass values.
|
research/object_detection/builders/calibration_builder_test.py
|
test_multiclass_function_approximations
|
zhaowt96/models
|
python
|
def test_multiclass_function_approximations(self):
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(calibration_config, class_0_x, class_0_y, class_id=0)
class_1_x = np.asarray([0.0, 0.2, 1.0])
class_1_y = np.asarray([0.0, 0.6, 1.0])
self._add_function_approximation_to_calibration_proto(calibration_config, class_1_x, class_1_y, class_id=1)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant([[[0.1, 0.2], [0.9, 0.1]], [[0.6, 0.4], [0.08, 0.92]]], dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.6], [0.5, 0.3]], [[0.5, 0.7], [0.5, 0.96]]])
|
def test_temperature_scaling(self):
'Tests that calibration produces correct temperature scaling values.'
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 2.0
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant([[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]], [[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]], dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]], [[0.3, 0.35, 0.4], [0.45, 0.5, 0.5]]])
| 7,285,490,984,036,249,000
|
Tests that calibration produces correct temperature scaling values.
|
research/object_detection/builders/calibration_builder_test.py
|
test_temperature_scaling
|
zhaowt96/models
|
python
|
def test_temperature_scaling(self):
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 2.0
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant([[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]], [[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]], dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]], [[0.3, 0.35, 0.4], [0.45, 0.5, 0.5]]])
|
def test_skips_class_when_calibration_parameters_not_present(self):
'Tests that graph fails when parameters not present for all classes.'
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(calibration_config, class_0_x, class_0_y, class_id=0)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant([[[0.1, 0.2], [0.9, 0.1]], [[0.6, 0.4], [0.08, 0.92]]], dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.2], [0.5, 0.1]], [[0.5, 0.4], [0.5, 0.92]]])
| 643,980,486,263,068,900
|
Tests that graph fails when parameters not present for all classes.
|
research/object_detection/builders/calibration_builder_test.py
|
test_skips_class_when_calibration_parameters_not_present
|
zhaowt96/models
|
python
|
def test_skips_class_when_calibration_parameters_not_present(self):
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(calibration_config, class_0_x, class_0_y, class_id=0)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant([[[0.1, 0.2], [0.9, 0.1]], [[0.6, 0.4], [0.08, 0.92]]], dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.2], [0.5, 0.1]], [[0.5, 0.4], [0.5, 0.92]]])
|
def cluster_and_sort(x, max_clusters, min_cluster_size):
'\n :param x: object representations (X x Features)\n :param max_clusters:\n :param min_cluster_size:\n :return: List[cluster], Hierarchical dendrogram of splits.\n '
logger.debug(f'Looking for an appropriate number of clusters,min_cluster_size={min_cluster_size}, max_clusters={max_clusters}')
if (x.shape[1] == 0):
return (([0] * x.shape[0]), None)
r = (min(int((x.shape[0] / min_cluster_size)), max_clusters) + 1)
l = 1
if (l >= (r - 2)):
return (([0] * x.shape[0]), None)
prev_min_size = None
while (l < (r - 1)):
n_clusters = int(((l + r) / 2))
model = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward').fit(x)
clusters_counter = Counter(model.labels_)
min_size = clusters_counter.most_common()[(- 1)][1]
logger.debug(f'l={l}, r={r}, n_clusters={n_clusters}, min_cluster_size={min_cluster_size}, prev_min_size={prev_min_size}, min_size={min_size}')
if (min_size < min_cluster_size):
if ((prev_min_size is not None) and (min_size <= prev_min_size)):
break
r = (n_clusters + 1)
else:
l = n_clusters
prev_min_size = min_size
logger.debug(f'Number of clusters = {n_clusters}')
logger.debug(f'Min cluster size = {prev_min_size}')
logger.debug('Reorder clusters by size descending')
reorder_map = {c: i for (i, (c, _)) in enumerate(clusters_counter.most_common())}
return ([reorder_map[c] for c in model.labels_], model.children_)
| -1,275,711,226,123,318,000
|
:param x: object representations (X x Features)
:param max_clusters:
:param min_cluster_size:
:return: List[cluster], Hierarchical dendrogram of splits.
|
pysrc/papers/analysis/topics.py
|
cluster_and_sort
|
JetBrains-Research/pubtrends
|
python
|
def cluster_and_sort(x, max_clusters, min_cluster_size):
'\n :param x: object representations (X x Features)\n :param max_clusters:\n :param min_cluster_size:\n :return: List[cluster], Hierarchical dendrogram of splits.\n '
logger.debug(f'Looking for an appropriate number of clusters,min_cluster_size={min_cluster_size}, max_clusters={max_clusters}')
if (x.shape[1] == 0):
return (([0] * x.shape[0]), None)
r = (min(int((x.shape[0] / min_cluster_size)), max_clusters) + 1)
l = 1
if (l >= (r - 2)):
return (([0] * x.shape[0]), None)
prev_min_size = None
while (l < (r - 1)):
n_clusters = int(((l + r) / 2))
model = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward').fit(x)
clusters_counter = Counter(model.labels_)
min_size = clusters_counter.most_common()[(- 1)][1]
logger.debug(f'l={l}, r={r}, n_clusters={n_clusters}, min_cluster_size={min_cluster_size}, prev_min_size={prev_min_size}, min_size={min_size}')
if (min_size < min_cluster_size):
if ((prev_min_size is not None) and (min_size <= prev_min_size)):
break
r = (n_clusters + 1)
else:
l = n_clusters
prev_min_size = min_size
logger.debug(f'Number of clusters = {n_clusters}')
logger.debug(f'Min cluster size = {prev_min_size}')
logger.debug('Reorder clusters by size descending')
reorder_map = {c: i for (i, (c, _)) in enumerate(clusters_counter.most_common())}
return ([reorder_map[c] for c in model.labels_], model.children_)
|
def get_topics_description(df, comps, corpus, corpus_tokens, corpus_counts, n_words, ignore_comp=None):
"\n Get words from abstracts that describe the components the best way\n using closest to the 'ideal' frequency vector - [0, ..., 0, 1, 0, ..., 0] in tokens of cosine distance\n "
logger.debug(f'Generating topics description, ignore_comp={ignore_comp}')
comp_idx = {c: i for (i, c) in enumerate((c for c in comps if (c != ignore_comp)))}
if (len(comp_idx) < 2):
comp = list(comp_idx.keys())[0]
if (ignore_comp is None):
most_frequent = get_frequent_tokens(chain(*chain(*corpus)))
return {comp: list(sorted(most_frequent.items(), key=(lambda kv: kv[1]), reverse=True))[:n_words]}
else:
most_frequent = get_frequent_tokens(chain(*chain(*[corpus[i] for i in np.flatnonzero(df['id'].isin(set(comps[comp])))])))
return {comp: list(sorted(most_frequent.items(), key=(lambda kv: kv[1]), reverse=True))[:n_words], ignore_comp: []}
comps_ids = {comp: list(np.flatnonzero(df['id'].isin(comp_pids))) for (comp, comp_pids) in comps.items()}
result = _get_topics_description_cosine(comps_ids, corpus_tokens, corpus_counts, n_words, ignore_comp=ignore_comp)
kwds = [(comp, ','.join([f'{t}:{v:.3f}' for (t, v) in vs])) for (comp, vs) in result.items()]
logger.debug(('Description\n' + '\n'.join((f'{comp}: {kwd}' for (comp, kwd) in kwds))))
return result
| 8,841,790,934,862,806,000
|
Get words from abstracts that describe the components the best way
using closest to the 'ideal' frequency vector - [0, ..., 0, 1, 0, ..., 0] in tokens of cosine distance
|
pysrc/papers/analysis/topics.py
|
get_topics_description
|
JetBrains-Research/pubtrends
|
python
|
def get_topics_description(df, comps, corpus, corpus_tokens, corpus_counts, n_words, ignore_comp=None):
"\n Get words from abstracts that describe the components the best way\n using closest to the 'ideal' frequency vector - [0, ..., 0, 1, 0, ..., 0] in tokens of cosine distance\n "
logger.debug(f'Generating topics description, ignore_comp={ignore_comp}')
comp_idx = {c: i for (i, c) in enumerate((c for c in comps if (c != ignore_comp)))}
if (len(comp_idx) < 2):
comp = list(comp_idx.keys())[0]
if (ignore_comp is None):
most_frequent = get_frequent_tokens(chain(*chain(*corpus)))
return {comp: list(sorted(most_frequent.items(), key=(lambda kv: kv[1]), reverse=True))[:n_words]}
else:
most_frequent = get_frequent_tokens(chain(*chain(*[corpus[i] for i in np.flatnonzero(df['id'].isin(set(comps[comp])))])))
return {comp: list(sorted(most_frequent.items(), key=(lambda kv: kv[1]), reverse=True))[:n_words], ignore_comp: []}
comps_ids = {comp: list(np.flatnonzero(df['id'].isin(comp_pids))) for (comp, comp_pids) in comps.items()}
result = _get_topics_description_cosine(comps_ids, corpus_tokens, corpus_counts, n_words, ignore_comp=ignore_comp)
kwds = [(comp, ','.join([f'{t}:{v:.3f}' for (t, v) in vs])) for (comp, vs) in result.items()]
logger.debug(('Description\n' + '\n'.join((f'{comp}: {kwd}' for (comp, kwd) in kwds))))
return result
|
def _get_topics_description_cosine(comps, corpus_tokens, corpus_counts, n_words, ignore_comp=None):
"\n Select words with the frequency vector that is the closest to the 'ideal' frequency vector\n ([0, ..., 0, 1, 0, ..., 0]) in tokens of cosine distance\n "
logger.debug('Compute average tokens counts per components')
comp_idx = {c: i for (i, c) in enumerate((c for c in comps if (c != ignore_comp)))}
tokens_freqs_per_comp = np.zeros(shape=(len(comp_idx), corpus_counts.shape[1]), dtype=np.float)
for (comp, comp_ids) in comps.items():
if (comp != ignore_comp):
tokens_freqs_per_comp[comp_idx[comp], :] = np.sum(corpus_counts[comp_ids, :], axis=0)
tokens_freqs_total = np.sum(tokens_freqs_per_comp, axis=0)
tokens_freqs_norm = np.sqrt(np.diag((tokens_freqs_per_comp.T @ tokens_freqs_per_comp)))
tokens_freqs_per_comp = (tokens_freqs_per_comp / tokens_freqs_norm)
logger.debug('Take frequent tokens that have the most descriptive frequency vector for topics')
cluster_mask = np.eye(len(comp_idx))
distance = (tokens_freqs_per_comp.T @ cluster_mask)
adjusted_distance = (distance.T * np.log(tokens_freqs_total))
result = {}
for comp in comps.keys():
if (comp == ignore_comp):
result[comp] = []
continue
c = comp_idx[comp]
cluster_tokens_idx = np.argsort((- adjusted_distance[c, :]))[:n_words].tolist()
result[comp] = [(corpus_tokens[i], adjusted_distance[(c, i)]) for i in cluster_tokens_idx]
return result
| 3,370,905,416,881,422,000
|
Select words with the frequency vector that is the closest to the 'ideal' frequency vector
([0, ..., 0, 1, 0, ..., 0]) in tokens of cosine distance
|
pysrc/papers/analysis/topics.py
|
_get_topics_description_cosine
|
JetBrains-Research/pubtrends
|
python
|
def _get_topics_description_cosine(comps, corpus_tokens, corpus_counts, n_words, ignore_comp=None):
"\n Select words with the frequency vector that is the closest to the 'ideal' frequency vector\n ([0, ..., 0, 1, 0, ..., 0]) in tokens of cosine distance\n "
logger.debug('Compute average tokens counts per components')
comp_idx = {c: i for (i, c) in enumerate((c for c in comps if (c != ignore_comp)))}
tokens_freqs_per_comp = np.zeros(shape=(len(comp_idx), corpus_counts.shape[1]), dtype=np.float)
for (comp, comp_ids) in comps.items():
if (comp != ignore_comp):
tokens_freqs_per_comp[comp_idx[comp], :] = np.sum(corpus_counts[comp_ids, :], axis=0)
tokens_freqs_total = np.sum(tokens_freqs_per_comp, axis=0)
tokens_freqs_norm = np.sqrt(np.diag((tokens_freqs_per_comp.T @ tokens_freqs_per_comp)))
tokens_freqs_per_comp = (tokens_freqs_per_comp / tokens_freqs_norm)
logger.debug('Take frequent tokens that have the most descriptive frequency vector for topics')
cluster_mask = np.eye(len(comp_idx))
distance = (tokens_freqs_per_comp.T @ cluster_mask)
adjusted_distance = (distance.T * np.log(tokens_freqs_total))
result = {}
for comp in comps.keys():
if (comp == ignore_comp):
result[comp] = []
continue
c = comp_idx[comp]
cluster_tokens_idx = np.argsort((- adjusted_distance[c, :]))[:n_words].tolist()
result[comp] = [(corpus_tokens[i], adjusted_distance[(c, i)]) for i in cluster_tokens_idx]
return result
|
def test_params_deprecation_view_markers():
' Tests whether use of deprecated keyword parameters of view_markers\n raise corrrect warnings.\n '
deprecated_params = {'coords': 'marker_coords', 'colors': 'marker_color'}
deprecation_msg = 'The parameter "{}" will be removed in 0.6.0 release of Nilearn. Please use the parameter "{}" instead.'
warning_msgs = {old_: deprecation_msg.format(old_, new_) for (old_, new_) in deprecated_params.items()}
coords = np.arange(12).reshape((4, 3))
colors = ['r', 'g', 'black', 'white']
with warnings.catch_warnings(record=True) as raised_warnings:
html_connectome.view_markers(coords=coords, marker_color=colors)
html_connectome.view_markers(marker_coords=coords, colors=colors)
html_connectome.view_markers(marker_coords=coords, marker_color=colors)
html_connectome.view_markers(coords, colors)
old_params = ['coords', 'colors']
assert (len(raised_warnings) == 2)
for (old_param_, raised_warning_) in zip(old_params, raised_warnings):
assert (warning_msgs[old_param_] == str(raised_warning_.message))
assert (raised_warning_.category is DeprecationWarning)
| -5,101,782,481,197,769,000
|
Tests whether use of deprecated keyword parameters of view_markers
raise corrrect warnings.
|
nilearn/plotting/tests/test_html_connectome.py
|
test_params_deprecation_view_markers
|
JohannesWiesner/nilearn
|
python
|
def test_params_deprecation_view_markers():
' Tests whether use of deprecated keyword parameters of view_markers\n raise corrrect warnings.\n '
deprecated_params = {'coords': 'marker_coords', 'colors': 'marker_color'}
deprecation_msg = 'The parameter "{}" will be removed in 0.6.0 release of Nilearn. Please use the parameter "{}" instead.'
warning_msgs = {old_: deprecation_msg.format(old_, new_) for (old_, new_) in deprecated_params.items()}
coords = np.arange(12).reshape((4, 3))
colors = ['r', 'g', 'black', 'white']
with warnings.catch_warnings(record=True) as raised_warnings:
html_connectome.view_markers(coords=coords, marker_color=colors)
html_connectome.view_markers(marker_coords=coords, colors=colors)
html_connectome.view_markers(marker_coords=coords, marker_color=colors)
html_connectome.view_markers(coords, colors)
old_params = ['coords', 'colors']
assert (len(raised_warnings) == 2)
for (old_param_, raised_warning_) in zip(old_params, raised_warnings):
assert (warning_msgs[old_param_] == str(raised_warning_.message))
assert (raised_warning_.category is DeprecationWarning)
|
def L2NormLoss_test(gt, out, frame_ids):
'\n gt: B, 66, 25\n '
t_3d = np.zeros(len(frame_ids))
(batch_size, features, seq_len) = gt.shape
gt = gt.permute(0, 2, 1).contiguous().view(batch_size, seq_len, (- 1), 3)
out = out.permute(0, 2, 1).contiguous().view(batch_size, seq_len, (- 1), 3)
for k in np.arange(0, len(frame_ids)):
j = frame_ids[k]
t_3d[k] = (torch.mean(torch.norm((gt[:, j, :, :].contiguous().view((- 1), 3) - out[:, j, :, :].contiguous().view((- 1), 3)), 2, 1)).cpu().data.numpy() * batch_size)
return t_3d
| -2,572,087,974,684,275,000
|
gt: B, 66, 25
|
run/cmu_runner.py
|
L2NormLoss_test
|
Droliven/MSRGCN
|
python
|
def L2NormLoss_test(gt, out, frame_ids):
'\n \n '
t_3d = np.zeros(len(frame_ids))
(batch_size, features, seq_len) = gt.shape
gt = gt.permute(0, 2, 1).contiguous().view(batch_size, seq_len, (- 1), 3)
out = out.permute(0, 2, 1).contiguous().view(batch_size, seq_len, (- 1), 3)
for k in np.arange(0, len(frame_ids)):
j = frame_ids[k]
t_3d[k] = (torch.mean(torch.norm((gt[:, j, :, :].contiguous().view((- 1), 3) - out[:, j, :, :].contiguous().view((- 1), 3)), 2, 1)).cpu().data.numpy() * batch_size)
return t_3d
|
def L2NormLoss_train(gt, out):
'\n # (batch size,feature dim, seq len)\n 等同于 mpjpe_error_p3d()\n '
(batch_size, _, seq_len) = gt.shape
gt = gt.view(batch_size, (- 1), 3, seq_len).permute(0, 3, 1, 2).contiguous()
out = out.view(batch_size, (- 1), 3, seq_len).permute(0, 3, 1, 2).contiguous()
loss = torch.mean(torch.norm((gt - out), 2, dim=(- 1)))
return loss
| 3,562,557,728,237,097,000
|
# (batch size,feature dim, seq len)
等同于 mpjpe_error_p3d()
|
run/cmu_runner.py
|
L2NormLoss_train
|
Droliven/MSRGCN
|
python
|
def L2NormLoss_train(gt, out):
'\n # (batch size,feature dim, seq len)\n 等同于 mpjpe_error_p3d()\n '
(batch_size, _, seq_len) = gt.shape
gt = gt.view(batch_size, (- 1), 3, seq_len).permute(0, 3, 1, 2).contiguous()
out = out.view(batch_size, (- 1), 3, seq_len).permute(0, 3, 1, 2).contiguous()
loss = torch.mean(torch.norm((gt - out), 2, dim=(- 1)))
return loss
|
def parse_command_args(args):
'\n This parses the arguments and returns a tuple containing:\n\n (args, command, command_args)\n\n For example, "--config=bar start --with=baz" would return:\n\n ([\'--config=bar\'], \'start\', [\'--with=baz\'])\n '
index = None
for (arg_i, arg) in enumerate(args):
if (not arg.startswith('-')):
index = arg_i
break
if (index is None):
return (args, None, [])
return (args[:index], args[index], args[(index + 1):])
| 987,570,457,215,449,000
|
This parses the arguments and returns a tuple containing:
(args, command, command_args)
For example, "--config=bar start --with=baz" would return:
(['--config=bar'], 'start', ['--with=baz'])
|
nautobot/core/runner/runner.py
|
parse_command_args
|
Joezeppe/nautobot
|
python
|
def parse_command_args(args):
'\n This parses the arguments and returns a tuple containing:\n\n (args, command, command_args)\n\n For example, "--config=bar start --with=baz" would return:\n\n ([\'--config=bar\'], \'start\', [\'--with=baz\'])\n '
index = None
for (arg_i, arg) in enumerate(args):
if (not arg.startswith('-')):
index = arg_i
break
if (index is None):
return (args, None, [])
return (args[:index], args[index], args[(index + 1):])
|
def configure_app(config_path=None, project=None, default_config_path=None, default_settings=None, settings_initializer=None, settings_envvar=None, initializer=None, allow_extras=True, config_module_name=None, runner_name=None, on_configure=None):
'\n :param project: should represent the canonical name for the project, generally\n the same name it assigned in distutils.\n :param default_config_path: the default location for the configuration file.\n :param default_settings: default settings to load (think inheritence).\n :param settings_initializer: a callback function which should return a string\n representing the default settings template to generate.\n :param initializer: a callback function which will be executed before the command\n is executed. It is passed a dictionary of various configuration attributes.\n '
global __configured
project_filename = sanitize_name(project)
if (default_config_path is None):
default_config_path = ('~/%s/%s.conf.py' % (project_filename, project_filename))
if (settings_envvar is None):
settings_envvar = (project_filename.upper() + '_CONF')
if (config_module_name is None):
config_module_name = (project_filename + '_config')
if (settings_envvar in os.environ):
default_config_path = os.environ.get(settings_envvar)
else:
default_config_path = os.path.normpath(os.path.abspath(os.path.expanduser(default_config_path)))
if (not config_path):
config_path = default_config_path
config_path = os.path.expanduser(config_path)
if (not os.path.exists(config_path)):
if runner_name:
raise ValueError(("Configuration file does not exist. Use '%s init' to initialize the file." % (runner_name,)))
raise ValueError(('Configuration file does not exist at %r' % (config_path,)))
os.environ['DJANGO_SETTINGS_MODULE'] = config_module_name
def settings_callback(settings):
if (initializer is None):
return
try:
initializer({'project': project, 'config_path': config_path, 'settings': settings})
except Exception:
import sys
import traceback
traceback.print_exc()
sys.exit(1)
importer.install(config_module_name, config_path, default_settings, allow_extras=allow_extras, callback=settings_callback)
__configured = True
from django.conf import settings
hasattr(settings, 'INSTALLED_APPS')
if on_configure:
on_configure({'project': project, 'config_path': config_path, 'settings': settings})
| 9,219,377,544,592,713,000
|
:param project: should represent the canonical name for the project, generally
the same name it assigned in distutils.
:param default_config_path: the default location for the configuration file.
:param default_settings: default settings to load (think inheritence).
:param settings_initializer: a callback function which should return a string
representing the default settings template to generate.
:param initializer: a callback function which will be executed before the command
is executed. It is passed a dictionary of various configuration attributes.
|
nautobot/core/runner/runner.py
|
configure_app
|
Joezeppe/nautobot
|
python
|
def configure_app(config_path=None, project=None, default_config_path=None, default_settings=None, settings_initializer=None, settings_envvar=None, initializer=None, allow_extras=True, config_module_name=None, runner_name=None, on_configure=None):
'\n :param project: should represent the canonical name for the project, generally\n the same name it assigned in distutils.\n :param default_config_path: the default location for the configuration file.\n :param default_settings: default settings to load (think inheritence).\n :param settings_initializer: a callback function which should return a string\n representing the default settings template to generate.\n :param initializer: a callback function which will be executed before the command\n is executed. It is passed a dictionary of various configuration attributes.\n '
global __configured
project_filename = sanitize_name(project)
if (default_config_path is None):
default_config_path = ('~/%s/%s.conf.py' % (project_filename, project_filename))
if (settings_envvar is None):
settings_envvar = (project_filename.upper() + '_CONF')
if (config_module_name is None):
config_module_name = (project_filename + '_config')
if (settings_envvar in os.environ):
default_config_path = os.environ.get(settings_envvar)
else:
default_config_path = os.path.normpath(os.path.abspath(os.path.expanduser(default_config_path)))
if (not config_path):
config_path = default_config_path
config_path = os.path.expanduser(config_path)
if (not os.path.exists(config_path)):
if runner_name:
raise ValueError(("Configuration file does not exist. Use '%s init' to initialize the file." % (runner_name,)))
raise ValueError(('Configuration file does not exist at %r' % (config_path,)))
os.environ['DJANGO_SETTINGS_MODULE'] = config_module_name
def settings_callback(settings):
if (initializer is None):
return
try:
initializer({'project': project, 'config_path': config_path, 'settings': settings})
except Exception:
import sys
import traceback
traceback.print_exc()
sys.exit(1)
importer.install(config_module_name, config_path, default_settings, allow_extras=allow_extras, callback=settings_callback)
__configured = True
from django.conf import settings
hasattr(settings, 'INSTALLED_APPS')
if on_configure:
on_configure({'project': project, 'config_path': config_path, 'settings': settings})
|
def read_data_file(fp):
' Reading the raw data from a file of NeMo format\n For more info about the data format, refer to the\n `text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.\n '
(insts, w_words, s_words, classes) = ([], [], [], [])
with open(fp, 'r', encoding='utf-8') as f:
for line in tqdm(f):
es = [e.strip() for e in line.strip().split('\t')]
if (es[0] == '<eos>'):
inst = (deepcopy(classes), deepcopy(w_words), deepcopy(s_words))
insts.append(inst)
(w_words, s_words, classes) = ([], [], [])
else:
classes.append(es[0])
w_words.append(es[1])
s_words.append(es[2])
return insts
| 720,673,658,514,461,300
|
Reading the raw data from a file of NeMo format
For more info about the data format, refer to the
`text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
|
nemo/collections/nlp/data/text_normalization/utils.py
|
read_data_file
|
JMichaelStringer/NeMo
|
python
|
def read_data_file(fp):
' Reading the raw data from a file of NeMo format\n For more info about the data format, refer to the\n `text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.\n '
(insts, w_words, s_words, classes) = ([], [], [], [])
with open(fp, 'r', encoding='utf-8') as f:
for line in tqdm(f):
es = [e.strip() for e in line.strip().split('\t')]
if (es[0] == '<eos>'):
inst = (deepcopy(classes), deepcopy(w_words), deepcopy(s_words))
insts.append(inst)
(w_words, s_words, classes) = ([], [], [])
else:
classes.append(es[0])
w_words.append(es[1])
s_words.append(es[2])
return insts
|
def normalize_str(input_str, lang):
' Normalize an input string '
input_str_tokens = basic_tokenize(input_str.strip().lower(), lang)
input_str = ' '.join(input_str_tokens)
input_str = input_str.replace(' ', ' ')
return input_str
| -1,371,477,686,936,655,400
|
Normalize an input string
|
nemo/collections/nlp/data/text_normalization/utils.py
|
normalize_str
|
JMichaelStringer/NeMo
|
python
|
def normalize_str(input_str, lang):
' '
input_str_tokens = basic_tokenize(input_str.strip().lower(), lang)
input_str = ' '.join(input_str_tokens)
input_str = input_str.replace(' ', ' ')
return input_str
|
def remove_puncts(input_str):
' Remove punctuations from an input string '
return input_str.translate(str.maketrans('', '', string.punctuation))
| 8,084,838,030,692,354,000
|
Remove punctuations from an input string
|
nemo/collections/nlp/data/text_normalization/utils.py
|
remove_puncts
|
JMichaelStringer/NeMo
|
python
|
def remove_puncts(input_str):
' '
return input_str.translate(str.maketrans(, , string.punctuation))
|
def basic_tokenize(input_str, lang):
'\n The function is used to do some basic tokenization\n\n Args:\n input_str: The input string\n lang: Language of the input string\n Return: a list of tokens of the input string\n '
if (lang == constants.ENGLISH):
return word_tokenize(input_str)
return input_str.strip().split(' ')
| 7,466,873,734,542,841,000
|
The function is used to do some basic tokenization
Args:
input_str: The input string
lang: Language of the input string
Return: a list of tokens of the input string
|
nemo/collections/nlp/data/text_normalization/utils.py
|
basic_tokenize
|
JMichaelStringer/NeMo
|
python
|
def basic_tokenize(input_str, lang):
'\n The function is used to do some basic tokenization\n\n Args:\n input_str: The input string\n lang: Language of the input string\n Return: a list of tokens of the input string\n '
if (lang == constants.ENGLISH):
return word_tokenize(input_str)
return input_str.strip().split(' ')
|
def _hexify(data, chunksize=_hex_chunksize):
'Convert a binary string into its hex encoding, broken up into chunks\n of I{chunksize} characters separated by a space.\n\n @param data: the binary string\n @type data: string\n @param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}\n @rtype: string\n '
line = binascii.hexlify(data)
return b' '.join([line[i:(i + chunksize)] for i in range(0, len(line), chunksize)]).decode()
| 789,127,965,654,570,200
|
Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
_hexify
|
bopopescu/JobSniperRails
|
python
|
def _hexify(data, chunksize=_hex_chunksize):
'Convert a binary string into its hex encoding, broken up into chunks\n of I{chunksize} characters separated by a space.\n\n @param data: the binary string\n @type data: string\n @param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}\n @rtype: string\n '
line = binascii.hexlify(data)
return b' '.join([line[i:(i + chunksize)] for i in range(0, len(line), chunksize)]).decode()
|
def _base64ify(data, chunksize=_base64_chunksize):
'Convert a binary string into its base64 encoding, broken up into chunks\n of I{chunksize} characters separated by a space.\n\n @param data: the binary string\n @type data: string\n @param chunksize: the chunk size. Default is\n L{dns.rdata._base64_chunksize}\n @rtype: string\n '
line = base64.b64encode(data)
return b' '.join([line[i:(i + chunksize)] for i in range(0, len(line), chunksize)]).decode()
| 5,784,675,050,316,418,000
|
Convert a binary string into its base64 encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is
L{dns.rdata._base64_chunksize}
@rtype: string
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
_base64ify
|
bopopescu/JobSniperRails
|
python
|
def _base64ify(data, chunksize=_base64_chunksize):
'Convert a binary string into its base64 encoding, broken up into chunks\n of I{chunksize} characters separated by a space.\n\n @param data: the binary string\n @type data: string\n @param chunksize: the chunk size. Default is\n L{dns.rdata._base64_chunksize}\n @rtype: string\n '
line = base64.b64encode(data)
return b' '.join([line[i:(i + chunksize)] for i in range(0, len(line), chunksize)]).decode()
|
def _escapify(qstring):
'Escape the characters in a quoted string which need it.\n\n @param qstring: the string\n @type qstring: string\n @returns: the escaped string\n @rtype: string\n '
if isinstance(qstring, text_type):
qstring = qstring.encode()
if (not isinstance(qstring, bytearray)):
qstring = bytearray(qstring)
text = ''
for c in qstring:
if (c in __escaped):
text += ('\\' + chr(c))
elif ((c >= 32) and (c < 127)):
text += chr(c)
else:
text += ('\\%03d' % c)
return text
| -5,175,706,632,374,009,000
|
Escape the characters in a quoted string which need it.
@param qstring: the string
@type qstring: string
@returns: the escaped string
@rtype: string
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
_escapify
|
bopopescu/JobSniperRails
|
python
|
def _escapify(qstring):
'Escape the characters in a quoted string which need it.\n\n @param qstring: the string\n @type qstring: string\n @returns: the escaped string\n @rtype: string\n '
if isinstance(qstring, text_type):
qstring = qstring.encode()
if (not isinstance(qstring, bytearray)):
qstring = bytearray(qstring)
text =
for c in qstring:
if (c in __escaped):
text += ('\\' + chr(c))
elif ((c >= 32) and (c < 127)):
text += chr(c)
else:
text += ('\\%03d' % c)
return text
|
def _truncate_bitmap(what):
"Determine the index of greatest byte that isn't all zeros, and\n return the bitmap that contains all the bytes less than that index.\n\n @param what: a string of octets representing a bitmap.\n @type what: string\n @rtype: string\n "
for i in xrange((len(what) - 1), (- 1), (- 1)):
if (what[i] != 0):
return what[0:(i + 1)]
return what[0:1]
| -8,228,799,384,945,972,000
|
Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
@param what: a string of octets representing a bitmap.
@type what: string
@rtype: string
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
_truncate_bitmap
|
bopopescu/JobSniperRails
|
python
|
def _truncate_bitmap(what):
"Determine the index of greatest byte that isn't all zeros, and\n return the bitmap that contains all the bytes less than that index.\n\n @param what: a string of octets representing a bitmap.\n @type what: string\n @rtype: string\n "
for i in xrange((len(what) - 1), (- 1), (- 1)):
if (what[i] != 0):
return what[0:(i + 1)]
return what[0:1]
|
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
'Build an rdata object from text format.\n\n This function attempts to dynamically load a class which\n implements the specified rdata class and type. If there is no\n class-and-type-specific implementation, the GenericRdata class\n is used.\n\n Once a class is chosen, its from_text() class method is called\n with the parameters to this function.\n\n If I{tok} is a string, then a tokenizer is created and the string\n is used as its input.\n\n @param rdclass: The rdata class\n @type rdclass: int\n @param rdtype: The rdata type\n @type rdtype: int\n @param tok: The tokenizer or input text\n @type tok: dns.tokenizer.Tokenizer or string\n @param origin: The origin to use for relative names\n @type origin: dns.name.Name\n @param relativize: Should names be relativized?\n @type relativize: bool\n @rtype: dns.rdata.Rdata instance'
if isinstance(tok, string_types):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if (cls != GenericRdata):
token = tok.get()
tok.unget(token)
if (token.is_identifier() and (token.value == '\\#')):
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin, relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data), origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
| 8,269,539,008,425,469,000
|
Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
If I{tok} is a string, then a tokenizer is created and the string
is used as its input.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer or input text
@type tok: dns.tokenizer.Tokenizer or string
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: Should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
from_text
|
bopopescu/JobSniperRails
|
python
|
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
'Build an rdata object from text format.\n\n This function attempts to dynamically load a class which\n implements the specified rdata class and type. If there is no\n class-and-type-specific implementation, the GenericRdata class\n is used.\n\n Once a class is chosen, its from_text() class method is called\n with the parameters to this function.\n\n If I{tok} is a string, then a tokenizer is created and the string\n is used as its input.\n\n @param rdclass: The rdata class\n @type rdclass: int\n @param rdtype: The rdata type\n @type rdtype: int\n @param tok: The tokenizer or input text\n @type tok: dns.tokenizer.Tokenizer or string\n @param origin: The origin to use for relative names\n @type origin: dns.name.Name\n @param relativize: Should names be relativized?\n @type relativize: bool\n @rtype: dns.rdata.Rdata instance'
if isinstance(tok, string_types):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if (cls != GenericRdata):
token = tok.get()
tok.unget(token)
if (token.is_identifier() and (token.value == '\\#')):
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin, relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data), origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
|
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
'Build an rdata object from wire format\n\n This function attempts to dynamically load a class which\n implements the specified rdata class and type. If there is no\n class-and-type-specific implementation, the GenericRdata class\n is used.\n\n Once a class is chosen, its from_wire() class method is called\n with the parameters to this function.\n\n @param rdclass: The rdata class\n @type rdclass: int\n @param rdtype: The rdata type\n @type rdtype: int\n @param wire: The wire-format message\n @type wire: string\n @param current: The offset in wire of the beginning of the rdata.\n @type current: int\n @param rdlen: The length of the wire-format rdata\n @type rdlen: int\n @param origin: The origin to use for relative names\n @type origin: dns.name.Name\n @rtype: dns.rdata.Rdata instance'
wire = dns.wiredata.maybe_wrap(wire)
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
| -6,306,272,264,640,259,000
|
Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
from_wire
|
bopopescu/JobSniperRails
|
python
|
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
'Build an rdata object from wire format\n\n This function attempts to dynamically load a class which\n implements the specified rdata class and type. If there is no\n class-and-type-specific implementation, the GenericRdata class\n is used.\n\n Once a class is chosen, its from_wire() class method is called\n with the parameters to this function.\n\n @param rdclass: The rdata class\n @type rdclass: int\n @param rdtype: The rdata type\n @type rdtype: int\n @param wire: The wire-format message\n @type wire: string\n @param current: The offset in wire of the beginning of the rdata.\n @type current: int\n @param rdlen: The length of the wire-format rdata\n @type rdlen: int\n @param origin: The origin to use for relative names\n @type origin: dns.name.Name\n @rtype: dns.rdata.Rdata instance'
wire = dns.wiredata.maybe_wrap(wire)
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
|
def __init__(self, rdclass, rdtype):
'Initialize an rdata.\n @param rdclass: The rdata class\n @type rdclass: int\n @param rdtype: The rdata type\n @type rdtype: int\n '
self.rdclass = rdclass
self.rdtype = rdtype
| 5,392,004,270,510,241,000
|
Initialize an rdata.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
__init__
|
bopopescu/JobSniperRails
|
python
|
def __init__(self, rdclass, rdtype):
'Initialize an rdata.\n @param rdclass: The rdata class\n @type rdclass: int\n @param rdtype: The rdata type\n @type rdtype: int\n '
self.rdclass = rdclass
self.rdtype = rdtype
|
def covers(self):
'DNS SIG/RRSIG rdatas apply to a specific type; this type is\n returned by the covers() function. If the rdata type is not\n SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when\n creating rdatasets, allowing the rdataset to contain only RRSIGs\n of a particular type, e.g. RRSIG(NS).\n @rtype: int\n '
return dns.rdatatype.NONE
| -3,506,249,151,304,646,000
|
DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
@rtype: int
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
covers
|
bopopescu/JobSniperRails
|
python
|
def covers(self):
'DNS SIG/RRSIG rdatas apply to a specific type; this type is\n returned by the covers() function. If the rdata type is not\n SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when\n creating rdatasets, allowing the rdataset to contain only RRSIGs\n of a particular type, e.g. RRSIG(NS).\n @rtype: int\n '
return dns.rdatatype.NONE
|
def extended_rdatatype(self):
'Return a 32-bit type value, the least significant 16 bits of\n which are the ordinary DNS type, and the upper 16 bits of which are\n the "covered" type, if any.\n @rtype: int\n '
return ((self.covers() << 16) | self.rdtype)
| 5,964,719,601,966,584,000
|
Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
@rtype: int
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
extended_rdatatype
|
bopopescu/JobSniperRails
|
python
|
def extended_rdatatype(self):
'Return a 32-bit type value, the least significant 16 bits of\n which are the ordinary DNS type, and the upper 16 bits of which are\n the "covered" type, if any.\n @rtype: int\n '
return ((self.covers() << 16) | self.rdtype)
|
def to_text(self, origin=None, relativize=True, **kw):
'Convert an rdata to text format.\n @rtype: string\n '
raise NotImplementedError
| -1,293,614,360,225,144,300
|
Convert an rdata to text format.
@rtype: string
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
to_text
|
bopopescu/JobSniperRails
|
python
|
def to_text(self, origin=None, relativize=True, **kw):
'Convert an rdata to text format.\n @rtype: string\n '
raise NotImplementedError
|
def to_wire(self, file, compress=None, origin=None):
'Convert an rdata to wire format.\n @rtype: string\n '
raise NotImplementedError
| -891,095,099,515,168,300
|
Convert an rdata to wire format.
@rtype: string
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
to_wire
|
bopopescu/JobSniperRails
|
python
|
def to_wire(self, file, compress=None, origin=None):
'Convert an rdata to wire format.\n @rtype: string\n '
raise NotImplementedError
|
def to_digestable(self, origin=None):
'Convert rdata to a format suitable for digesting in hashes. This\n is also the DNSSEC canonical form.'
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
| 8,274,505,152,368,702,000
|
Convert rdata to a format suitable for digesting in hashes. This
is also the DNSSEC canonical form.
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
to_digestable
|
bopopescu/JobSniperRails
|
python
|
def to_digestable(self, origin=None):
'Convert rdata to a format suitable for digesting in hashes. This\n is also the DNSSEC canonical form.'
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
|
def validate(self):
"Check that the current contents of the rdata's fields are\n valid. If you change an rdata by assigning to its fields,\n it is a good idea to call validate() when you are done making\n changes.\n "
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
| 6,729,846,158,027,398,000
|
Check that the current contents of the rdata's fields are
valid. If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
validate
|
bopopescu/JobSniperRails
|
python
|
def validate(self):
"Check that the current contents of the rdata's fields are\n valid. If you change an rdata by assigning to its fields,\n it is a good idea to call validate() when you are done making\n changes.\n "
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
|
def _cmp(self, other):
'Compare an rdata with another rdata of the same rdtype and\n rdclass. Return < 0 if self < other in the DNSSEC ordering,\n 0 if self == other, and > 0 if self > other.\n '
our = self.to_digestable(dns.name.root)
their = other.to_digestable(dns.name.root)
if (our == their):
return 0
if (our > their):
return 1
return (- 1)
| -7,287,323,378,498,873,000
|
Compare an rdata with another rdata of the same rdtype and
rdclass. Return < 0 if self < other in the DNSSEC ordering,
0 if self == other, and > 0 if self > other.
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
_cmp
|
bopopescu/JobSniperRails
|
python
|
def _cmp(self, other):
'Compare an rdata with another rdata of the same rdtype and\n rdclass. Return < 0 if self < other in the DNSSEC ordering,\n 0 if self == other, and > 0 if self > other.\n '
our = self.to_digestable(dns.name.root)
their = other.to_digestable(dns.name.root)
if (our == their):
return 0
if (our > their):
return 1
return (- 1)
|
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
'Build an rdata object from text format.\n\n @param rdclass: The rdata class\n @type rdclass: int\n @param rdtype: The rdata type\n @type rdtype: int\n @param tok: The tokenizer\n @type tok: dns.tokenizer.Tokenizer\n @param origin: The origin to use for relative names\n @type origin: dns.name.Name\n @param relativize: should names be relativized?\n @type relativize: bool\n @rtype: dns.rdata.Rdata instance\n '
raise NotImplementedError
| 7,968,069,574,541,789,000
|
Build an rdata object from text format.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
from_text
|
bopopescu/JobSniperRails
|
python
|
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
'Build an rdata object from text format.\n\n @param rdclass: The rdata class\n @type rdclass: int\n @param rdtype: The rdata type\n @type rdtype: int\n @param tok: The tokenizer\n @type tok: dns.tokenizer.Tokenizer\n @param origin: The origin to use for relative names\n @type origin: dns.name.Name\n @param relativize: should names be relativized?\n @type relativize: bool\n @rtype: dns.rdata.Rdata instance\n '
raise NotImplementedError
|
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
'Build an rdata object from wire format\n\n @param rdclass: The rdata class\n @type rdclass: int\n @param rdtype: The rdata type\n @type rdtype: int\n @param wire: The wire-format message\n @type wire: string\n @param current: The offset in wire of the beginning of the rdata.\n @type current: int\n @param rdlen: The length of the wire-format rdata\n @type rdlen: int\n @param origin: The origin to use for relative names\n @type origin: dns.name.Name\n @rtype: dns.rdata.Rdata instance\n '
raise NotImplementedError
| 6,276,165,160,507,597,000
|
Build an rdata object from wire format
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
from_wire
|
bopopescu/JobSniperRails
|
python
|
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
'Build an rdata object from wire format\n\n @param rdclass: The rdata class\n @type rdclass: int\n @param rdtype: The rdata type\n @type rdtype: int\n @param wire: The wire-format message\n @type wire: string\n @param current: The offset in wire of the beginning of the rdata.\n @type current: int\n @param rdlen: The length of the wire-format rdata\n @type rdlen: int\n @param origin: The origin to use for relative names\n @type origin: dns.name.Name\n @rtype: dns.rdata.Rdata instance\n '
raise NotImplementedError
|
def choose_relativity(self, origin=None, relativize=True):
'Convert any domain names in the rdata to the specified\n relativization.\n '
pass
| -780,963,153,621,007,400
|
Convert any domain names in the rdata to the specified
relativization.
|
gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py
|
choose_relativity
|
bopopescu/JobSniperRails
|
python
|
def choose_relativity(self, origin=None, relativize=True):
'Convert any domain names in the rdata to the specified\n relativization.\n '
pass
|
def get_dicom_info_from_description(dicom_object, return_extra=False, sop_class_name='UNKNOWN'):
'\n Attempts to return some information from a DICOM\n This is typically used for naming converted NIFTI files\n\n Args:\n dicom_object (pydicom.dataset.FileDataset): The DICOM object\n return_extra (bool, optional): return information that is usually not required\n\n Returns:\n info (str): Some extracted information\n '
try:
dicom_sop_class_name = dicom_object.SOPClassUID.name
except AttributeError:
logger.warning(f'Could not find DICOM SOP Class UID, using {sop_class_name}.')
dicom_sop_class_name = sop_class_name
if ('Image' in dicom_sop_class_name):
image_modality = dicom_object.Modality
logger.info(f' Image modality: {image_modality}')
if (image_modality == 'CT'):
if return_extra:
try:
protocol_name = dicom_object.ProtocolName
if (protocol_name != ''):
return re.sub('[^\\w]', '_', protocol_name).upper()
except AttributeError:
logger.warning(' Could not find ProtocolName')
return ''
elif (image_modality == 'MR'):
try:
protocol_name = re.sub('[^\\w]', '_', dicom_object.ProtocolName).upper()
except AttributeError:
logger.warning(' Could not find ProtocolName')
protocol_name = ''
try:
sequence_name = re.sub('[^\\w]', '_', dicom_object.SequenceName).upper()
except AttributeError:
logger.warning(' Could not find SequenceName')
sequence_name = ''
try:
series_description = re.sub('[^\\w]', '_', dicom_object.SeriesDescription).upper()
except AttributeError:
logger.warning(' Could not find SequenceName')
series_description = ''
combined_name = '_'.join([protocol_name, sequence_name, series_description])
while ('__' in combined_name):
combined_name = combined_name.replace('__', '_')
if ((protocol_name != '') and (not return_extra)):
return protocol_name
else:
return combined_name
elif (image_modality == 'PT'):
try:
corrections = dicom_object.CorrectedImage
except AttributeError:
corrections = 'NONE'
if ('ATTN' in corrections):
return 'AC'
else:
return 'NAC'
| -8,754,313,118,472,001,000
|
Attempts to return some information from a DICOM
This is typically used for naming converted NIFTI files
Args:
dicom_object (pydicom.dataset.FileDataset): The DICOM object
return_extra (bool, optional): return information that is usually not required
Returns:
info (str): Some extracted information
|
platipy/dicom/io/crawl.py
|
get_dicom_info_from_description
|
RadiotherapyAI/platipy
|
python
|
def get_dicom_info_from_description(dicom_object, return_extra=False, sop_class_name='UNKNOWN'):
'\n Attempts to return some information from a DICOM\n This is typically used for naming converted NIFTI files\n\n Args:\n dicom_object (pydicom.dataset.FileDataset): The DICOM object\n return_extra (bool, optional): return information that is usually not required\n\n Returns:\n info (str): Some extracted information\n '
try:
dicom_sop_class_name = dicom_object.SOPClassUID.name
except AttributeError:
logger.warning(f'Could not find DICOM SOP Class UID, using {sop_class_name}.')
dicom_sop_class_name = sop_class_name
if ('Image' in dicom_sop_class_name):
image_modality = dicom_object.Modality
logger.info(f' Image modality: {image_modality}')
if (image_modality == 'CT'):
if return_extra:
try:
protocol_name = dicom_object.ProtocolName
if (protocol_name != ):
return re.sub('[^\\w]', '_', protocol_name).upper()
except AttributeError:
logger.warning(' Could not find ProtocolName')
return
elif (image_modality == 'MR'):
try:
protocol_name = re.sub('[^\\w]', '_', dicom_object.ProtocolName).upper()
except AttributeError:
logger.warning(' Could not find ProtocolName')
protocol_name =
try:
sequence_name = re.sub('[^\\w]', '_', dicom_object.SequenceName).upper()
except AttributeError:
logger.warning(' Could not find SequenceName')
sequence_name =
try:
series_description = re.sub('[^\\w]', '_', dicom_object.SeriesDescription).upper()
except AttributeError:
logger.warning(' Could not find SequenceName')
series_description =
combined_name = '_'.join([protocol_name, sequence_name, series_description])
while ('__' in combined_name):
combined_name = combined_name.replace('__', '_')
if ((protocol_name != ) and (not return_extra)):
return protocol_name
else:
return combined_name
elif (image_modality == 'PT'):
try:
corrections = dicom_object.CorrectedImage
except AttributeError:
corrections = 'NONE'
if ('ATTN' in corrections):
return 'AC'
else:
return 'NAC'
|
def safe_sort_dicom_image_list(dicom_image_list):
'\n Sorts a list of DICOM image files based on a DICOM tag value.\n This is a much safer method than reading SliceLocation.\n It takes mandatory DICOM fields (Image Position [Patient]) and (Image Orientation [Patient]).\n The list of DICOM files is sorted by projecting the image position onto the axis normal to the\n place defined by the image orientation.\n\n This accounts for differences in patient position (e.g. HFS/FFS).\n\n Args:\n dicom_image_list (list): [description]\n '
sorted_dict = {}
for dicom_file in dicom_image_list:
dcm = pydicom.read_file(dicom_file, force=True)
image_position = np.array(dcm.ImagePositionPatient, dtype=float)
image_orientation = np.array(dcm.ImageOrientationPatient, dtype=float)
image_plane_normal = np.cross(image_orientation[:3], image_orientation[3:])
slice_location = (image_position * image_plane_normal)[2]
sorted_dict[dicom_file] = slice_location
sorter_safe = (lambda dcm_file: sorted_dict[dcm_file])
return sorted(dicom_image_list, key=sorter_safe)
| -7,740,010,041,485,456,000
|
Sorts a list of DICOM image files based on a DICOM tag value.
This is a much safer method than reading SliceLocation.
It takes mandatory DICOM fields (Image Position [Patient]) and (Image Orientation [Patient]).
The list of DICOM files is sorted by projecting the image position onto the axis normal to the
place defined by the image orientation.
This accounts for differences in patient position (e.g. HFS/FFS).
Args:
dicom_image_list (list): [description]
|
platipy/dicom/io/crawl.py
|
safe_sort_dicom_image_list
|
RadiotherapyAI/platipy
|
python
|
def safe_sort_dicom_image_list(dicom_image_list):
'\n Sorts a list of DICOM image files based on a DICOM tag value.\n This is a much safer method than reading SliceLocation.\n It takes mandatory DICOM fields (Image Position [Patient]) and (Image Orientation [Patient]).\n The list of DICOM files is sorted by projecting the image position onto the axis normal to the\n place defined by the image orientation.\n\n This accounts for differences in patient position (e.g. HFS/FFS).\n\n Args:\n dicom_image_list (list): [description]\n '
sorted_dict = {}
for dicom_file in dicom_image_list:
dcm = pydicom.read_file(dicom_file, force=True)
image_position = np.array(dcm.ImagePositionPatient, dtype=float)
image_orientation = np.array(dcm.ImageOrientationPatient, dtype=float)
image_plane_normal = np.cross(image_orientation[:3], image_orientation[3:])
slice_location = (image_position * image_plane_normal)[2]
sorted_dict[dicom_file] = slice_location
sorter_safe = (lambda dcm_file: sorted_dict[dcm_file])
return sorted(dicom_image_list, key=sorter_safe)
|
def fix_missing_data(contour_data_list):
'\n Fixes missing points in contouring using simple linear interpolation\n\n\n Args:\n contour_data_list (list): The contour data for each slice\n\n Returns:\n contour_data (numpy array): Interpolated contour data\n '
contour_data = np.array(contour_data_list)
if (contour_data.any() == ''):
logger.warning(' Missing values detected.')
missing_values = np.where((contour_data == ''))[0]
if (missing_values.shape[0] > 1):
logger.warning(" More than one value missing, fixing this isn't implemented yet...")
else:
logger.warning(' Only one value missing.')
missing_index = missing_values[0]
missing_axis = (missing_index % 3)
if (missing_axis == 0):
logger.warning(' Missing value in x axis: interpolating.')
if (missing_index > (len(contour_data) - 3)):
lower_val = contour_data[(missing_index - 3)]
upper_val = contour_data[0]
elif (missing_index == 0):
lower_val = contour_data[(- 3)]
upper_val = contour_data[3]
else:
lower_val = contour_data[(missing_index - 3)]
upper_val = contour_data[(missing_index + 3)]
contour_data[missing_index] = (0.5 * (lower_val + upper_val))
elif (missing_axis == 1):
logger.warning(' Missing value in y axis: interpolating.')
if (missing_index > (len(contour_data) - 2)):
lower_val = contour_data[(missing_index - 3)]
upper_val = contour_data[1]
elif (missing_index == 0):
lower_val = contour_data[(- 2)]
upper_val = contour_data[4]
else:
lower_val = contour_data[(missing_index - 3)]
upper_val = contour_data[(missing_index + 3)]
contour_data[missing_index] = (0.5 * (lower_val + upper_val))
else:
logger.warning(' Missing value in z axis: taking slice value')
temp = contour_data[2::3].tolist()
temp.remove('')
contour_data[missing_index] = np.min(np.array(temp, dtype=np.double))
return contour_data
| -7,673,489,679,004,548,000
|
Fixes missing points in contouring using simple linear interpolation
Args:
contour_data_list (list): The contour data for each slice
Returns:
contour_data (numpy array): Interpolated contour data
|
platipy/dicom/io/crawl.py
|
fix_missing_data
|
RadiotherapyAI/platipy
|
python
|
def fix_missing_data(contour_data_list):
'\n Fixes missing points in contouring using simple linear interpolation\n\n\n Args:\n contour_data_list (list): The contour data for each slice\n\n Returns:\n contour_data (numpy array): Interpolated contour data\n '
contour_data = np.array(contour_data_list)
if (contour_data.any() == ):
logger.warning(' Missing values detected.')
missing_values = np.where((contour_data == ))[0]
if (missing_values.shape[0] > 1):
logger.warning(" More than one value missing, fixing this isn't implemented yet...")
else:
logger.warning(' Only one value missing.')
missing_index = missing_values[0]
missing_axis = (missing_index % 3)
if (missing_axis == 0):
logger.warning(' Missing value in x axis: interpolating.')
if (missing_index > (len(contour_data) - 3)):
lower_val = contour_data[(missing_index - 3)]
upper_val = contour_data[0]
elif (missing_index == 0):
lower_val = contour_data[(- 3)]
upper_val = contour_data[3]
else:
lower_val = contour_data[(missing_index - 3)]
upper_val = contour_data[(missing_index + 3)]
contour_data[missing_index] = (0.5 * (lower_val + upper_val))
elif (missing_axis == 1):
logger.warning(' Missing value in y axis: interpolating.')
if (missing_index > (len(contour_data) - 2)):
lower_val = contour_data[(missing_index - 3)]
upper_val = contour_data[1]
elif (missing_index == 0):
lower_val = contour_data[(- 2)]
upper_val = contour_data[4]
else:
lower_val = contour_data[(missing_index - 3)]
upper_val = contour_data[(missing_index + 3)]
contour_data[missing_index] = (0.5 * (lower_val + upper_val))
else:
logger.warning(' Missing value in z axis: taking slice value')
temp = contour_data[2::3].tolist()
temp.remove()
contour_data[missing_index] = np.min(np.array(temp, dtype=np.double))
return contour_data
|
def transform_point_set_from_dicom_struct(image, dicom_struct, spacing_override=False):
'\n This function is used to generate a binary mask from a set of vertices.\n This allows us to convert from DICOM-RTStruct format to any imaging format.\n\n Args:\n image ([SimpleITK.Image]): The image, used to copy imaging information\n (e.g. resolution, spacing)\n dicom_struct ([pydicom.Dataset]): The DICOM-RTStruct file\n spacing_override (bool | tuple, optional): Overwrite the spacing.\n Set with (axial_spacing, coronal_spacing, sagittal spacing). Defaults to False.\n\n Returns:\n list, list : final_struct_name_sequence, structure_list\n '
if spacing_override:
current_spacing = list(image.GetSpacing())
new_spacing = tuple([(current_spacing[k] if (spacing_override[k] == 0) else spacing_override[k]) for k in range(3)])
image.SetSpacing(new_spacing)
struct_point_sequence = dicom_struct.ROIContourSequence
struct_name_sequence = ['_'.join(i.ROIName.split()) for i in dicom_struct.StructureSetROISequence]
structure_list = []
final_struct_name_sequence = []
for (structIndex, structure_name) in enumerate(struct_name_sequence):
image_blank = np.zeros(image.GetSize()[::(- 1)], dtype=np.uint8)
logger.info(' Converting structure {0} with name: {1}'.format(structIndex, structure_name))
if (structIndex >= len(struct_point_sequence)):
logger.warning(' Contour sequence is missing, skipping.')
continue
if (not hasattr(struct_point_sequence[structIndex], 'ContourSequence')):
logger.warning(' No contour sequence found for this structure, skipping.')
continue
if (len(struct_point_sequence[structIndex].ContourSequence) == 0):
logger.warning(' Contour sequence is empty, skipping.')
continue
if (not (struct_point_sequence[structIndex].ContourSequence[0].ContourGeometricType == 'CLOSED_PLANAR')):
logger.warning(' This is not a closed planar structure, skipping.')
continue
for sl in range(len(struct_point_sequence[structIndex].ContourSequence)):
contour_data = fix_missing_data(struct_point_sequence[structIndex].ContourSequence[sl].ContourData)
struct_slice_contour_data = np.array(contour_data, dtype=np.double)
vertexArr_physical = struct_slice_contour_data.reshape((struct_slice_contour_data.shape[0] // 3), 3)
point_arr = np.array([image.TransformPhysicalPointToIndex(i) for i in vertexArr_physical]).T
[xVertexArr_image, yVertexArr_image] = point_arr[[0, 1]]
zIndex = point_arr[2][0]
if np.any((point_arr[2] != zIndex)):
logger.error(' Axial slice index varies in contour. Quitting now.')
logger.error(' Structure: {0}'.format(structure_name))
logger.error(' Slice index: {0}'.format(zIndex))
quit()
if (zIndex >= image.GetSize()[2]):
logger.warning(' Slice index greater than image size. Skipping slice.')
logger.warning(' Structure: {0}'.format(structure_name))
logger.warning(' Slice index: {0}'.format(zIndex))
continue
sliceArr = np.zeros(image.GetSize()[:2], dtype=np.uint8)
(filledIndicesX, filledIndicesY) = polygon(xVertexArr_image, yVertexArr_image, shape=sliceArr.shape)
sliceArr[(filledIndicesX, filledIndicesY)] = 1
image_blank[zIndex] += sliceArr.T
struct_image = sitk.GetImageFromArray((1 * (image_blank > 0)))
struct_image.CopyInformation(image)
structure_list.append(sitk.Cast(struct_image, sitk.sitkUInt8))
structure_name_clean = re.sub('[^\\w]', '_', structure_name).upper()
while ('__' in structure_name_clean):
structure_name_clean = structure_name_clean.replace('__', '_')
final_struct_name_sequence.append(structure_name_clean)
return (final_struct_name_sequence, structure_list)
| 2,426,919,697,974,402,600
|
This function is used to generate a binary mask from a set of vertices.
This allows us to convert from DICOM-RTStruct format to any imaging format.
Args:
image ([SimpleITK.Image]): The image, used to copy imaging information
(e.g. resolution, spacing)
dicom_struct ([pydicom.Dataset]): The DICOM-RTStruct file
spacing_override (bool | tuple, optional): Overwrite the spacing.
Set with (axial_spacing, coronal_spacing, sagittal spacing). Defaults to False.
Returns:
list, list : final_struct_name_sequence, structure_list
|
platipy/dicom/io/crawl.py
|
transform_point_set_from_dicom_struct
|
RadiotherapyAI/platipy
|
python
|
def transform_point_set_from_dicom_struct(image, dicom_struct, spacing_override=False):
'\n This function is used to generate a binary mask from a set of vertices.\n This allows us to convert from DICOM-RTStruct format to any imaging format.\n\n Args:\n image ([SimpleITK.Image]): The image, used to copy imaging information\n (e.g. resolution, spacing)\n dicom_struct ([pydicom.Dataset]): The DICOM-RTStruct file\n spacing_override (bool | tuple, optional): Overwrite the spacing.\n Set with (axial_spacing, coronal_spacing, sagittal spacing). Defaults to False.\n\n Returns:\n list, list : final_struct_name_sequence, structure_list\n '
if spacing_override:
current_spacing = list(image.GetSpacing())
new_spacing = tuple([(current_spacing[k] if (spacing_override[k] == 0) else spacing_override[k]) for k in range(3)])
image.SetSpacing(new_spacing)
struct_point_sequence = dicom_struct.ROIContourSequence
struct_name_sequence = ['_'.join(i.ROIName.split()) for i in dicom_struct.StructureSetROISequence]
structure_list = []
final_struct_name_sequence = []
for (structIndex, structure_name) in enumerate(struct_name_sequence):
image_blank = np.zeros(image.GetSize()[::(- 1)], dtype=np.uint8)
logger.info(' Converting structure {0} with name: {1}'.format(structIndex, structure_name))
if (structIndex >= len(struct_point_sequence)):
logger.warning(' Contour sequence is missing, skipping.')
continue
if (not hasattr(struct_point_sequence[structIndex], 'ContourSequence')):
logger.warning(' No contour sequence found for this structure, skipping.')
continue
if (len(struct_point_sequence[structIndex].ContourSequence) == 0):
logger.warning(' Contour sequence is empty, skipping.')
continue
if (not (struct_point_sequence[structIndex].ContourSequence[0].ContourGeometricType == 'CLOSED_PLANAR')):
logger.warning(' This is not a closed planar structure, skipping.')
continue
for sl in range(len(struct_point_sequence[structIndex].ContourSequence)):
contour_data = fix_missing_data(struct_point_sequence[structIndex].ContourSequence[sl].ContourData)
struct_slice_contour_data = np.array(contour_data, dtype=np.double)
vertexArr_physical = struct_slice_contour_data.reshape((struct_slice_contour_data.shape[0] // 3), 3)
point_arr = np.array([image.TransformPhysicalPointToIndex(i) for i in vertexArr_physical]).T
[xVertexArr_image, yVertexArr_image] = point_arr[[0, 1]]
zIndex = point_arr[2][0]
if np.any((point_arr[2] != zIndex)):
logger.error(' Axial slice index varies in contour. Quitting now.')
logger.error(' Structure: {0}'.format(structure_name))
logger.error(' Slice index: {0}'.format(zIndex))
quit()
if (zIndex >= image.GetSize()[2]):
logger.warning(' Slice index greater than image size. Skipping slice.')
logger.warning(' Structure: {0}'.format(structure_name))
logger.warning(' Slice index: {0}'.format(zIndex))
continue
sliceArr = np.zeros(image.GetSize()[:2], dtype=np.uint8)
(filledIndicesX, filledIndicesY) = polygon(xVertexArr_image, yVertexArr_image, shape=sliceArr.shape)
sliceArr[(filledIndicesX, filledIndicesY)] = 1
image_blank[zIndex] += sliceArr.T
struct_image = sitk.GetImageFromArray((1 * (image_blank > 0)))
struct_image.CopyInformation(image)
structure_list.append(sitk.Cast(struct_image, sitk.sitkUInt8))
structure_name_clean = re.sub('[^\\w]', '_', structure_name).upper()
while ('__' in structure_name_clean):
structure_name_clean = structure_name_clean.replace('__', '_')
final_struct_name_sequence.append(structure_name_clean)
return (final_struct_name_sequence, structure_list)
|
def process_dicom_file_list(dicom_file_list, parent_sorting_field='PatientName', verbose=False):
'\n Organise the DICOM files by the series UID\n '
dicom_series_dict_parent = {}
for (i, dicom_file) in enumerate(sorted(dicom_file_list)):
if (verbose is True):
logger.debug(f' Sorting file {i}')
dicom_file = dicom_file.as_posix()
if ('dicomdir' in dicom_file.lower()):
logger.warning('DICOMDIR is not supported in this tool, images are read directly. Skipping.')
continue
dicom_object = pydicom.read_file(dicom_file, force=True)
parent_sorting_field_data = dicom_object[parent_sorting_field].value
if (parent_sorting_field_data not in dicom_series_dict_parent.keys()):
dicom_series_dict_parent[parent_sorting_field_data] = {}
series_uid = dicom_object.SeriesInstanceUID
if (series_uid not in dicom_series_dict_parent[parent_sorting_field_data].keys()):
dicom_series_dict_parent[parent_sorting_field_data][series_uid] = [dicom_file]
else:
dicom_series_dict_parent[parent_sorting_field_data][series_uid].append(dicom_file)
return dicom_series_dict_parent
| 1,907,774,043,911,735,000
|
Organise the DICOM files by the series UID
|
platipy/dicom/io/crawl.py
|
process_dicom_file_list
|
RadiotherapyAI/platipy
|
python
|
def process_dicom_file_list(dicom_file_list, parent_sorting_field='PatientName', verbose=False):
'\n \n '
dicom_series_dict_parent = {}
for (i, dicom_file) in enumerate(sorted(dicom_file_list)):
if (verbose is True):
logger.debug(f' Sorting file {i}')
dicom_file = dicom_file.as_posix()
if ('dicomdir' in dicom_file.lower()):
logger.warning('DICOMDIR is not supported in this tool, images are read directly. Skipping.')
continue
dicom_object = pydicom.read_file(dicom_file, force=True)
parent_sorting_field_data = dicom_object[parent_sorting_field].value
if (parent_sorting_field_data not in dicom_series_dict_parent.keys()):
dicom_series_dict_parent[parent_sorting_field_data] = {}
series_uid = dicom_object.SeriesInstanceUID
if (series_uid not in dicom_series_dict_parent[parent_sorting_field_data].keys()):
dicom_series_dict_parent[parent_sorting_field_data][series_uid] = [dicom_file]
else:
dicom_series_dict_parent[parent_sorting_field_data][series_uid].append(dicom_file)
return dicom_series_dict_parent
|
def write_output_data_to_disk(output_data_dict, output_directory='./', output_file_suffix='.nii.gz', overwrite_existing_files=False):
'\n Write output to disk\n '
if (output_data_dict is None):
return
filename_fields = [i for i in output_data_dict.keys() if (i != 'parent_sorting_data')]
parent_sorting_data = output_data_dict['parent_sorting_data']
files_written = {}
'\n Write the the converted images to disk\n\n ! CONSIDER\n We could simply write as we go?\n Pro: save memory, important if processing very large files\n Con: Reading as we go allows proper indexing\n\n '
for field in filename_fields:
logger.info(f' Writing files for field: {field}')
p = ((pathlib.Path(output_directory) / parent_sorting_data) / field)
p.mkdir(parents=True, exist_ok=True)
files_written[field] = []
for (field_filename_base, field_list) in output_data_dict[field].items():
if isinstance(field_list, (tuple, list)):
field_list_flat = list(flatten(field_list))
for (suffix, file_to_write) in enumerate(field_list_flat):
field_filename = (field_filename_base + f'_{suffix}')
while ('__' in field_filename):
field_filename = field_filename.replace('__', '_')
while (field_filename[(- 1)] == '_'):
field_filename = field_filename[:(- 1)]
output_name = (((pathlib.Path(output_directory) / parent_sorting_data) / field) / (field_filename + output_file_suffix))
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f' File exists: {output_name}')
if overwrite_existing_files:
logger.warning(' You have selected to overwrite existing files.')
else:
logger.info(' You have selected to NOT overwrite existing files. Continuing.')
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
else:
field_filename = field_filename_base
file_to_write = field_list
while ('__' in field_filename):
field_filename = field_filename.replace('__', '_')
while (field_filename[(- 1)] == '_'):
field_filename = field_filename[:(- 1)]
'\n ! TO DO\n Use pathlib, and perform some checks so we don"t overwrite anything!\n '
output_name = (((pathlib.Path(output_directory) / parent_sorting_data) / field) / (field_filename + output_file_suffix))
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f' File exists: {output_name}')
if overwrite_existing_files:
logger.warning(' You have selected to overwrite existing files.')
else:
logger.info(' You have selected to NOT overwrite existing files. Continuing.')
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
return files_written
| 7,902,782,233,313,389,000
|
Write output to disk
|
platipy/dicom/io/crawl.py
|
write_output_data_to_disk
|
RadiotherapyAI/platipy
|
python
|
def write_output_data_to_disk(output_data_dict, output_directory='./', output_file_suffix='.nii.gz', overwrite_existing_files=False):
'\n \n '
if (output_data_dict is None):
return
filename_fields = [i for i in output_data_dict.keys() if (i != 'parent_sorting_data')]
parent_sorting_data = output_data_dict['parent_sorting_data']
files_written = {}
'\n Write the the converted images to disk\n\n ! CONSIDER\n We could simply write as we go?\n Pro: save memory, important if processing very large files\n Con: Reading as we go allows proper indexing\n\n '
for field in filename_fields:
logger.info(f' Writing files for field: {field}')
p = ((pathlib.Path(output_directory) / parent_sorting_data) / field)
p.mkdir(parents=True, exist_ok=True)
files_written[field] = []
for (field_filename_base, field_list) in output_data_dict[field].items():
if isinstance(field_list, (tuple, list)):
field_list_flat = list(flatten(field_list))
for (suffix, file_to_write) in enumerate(field_list_flat):
field_filename = (field_filename_base + f'_{suffix}')
while ('__' in field_filename):
field_filename = field_filename.replace('__', '_')
while (field_filename[(- 1)] == '_'):
field_filename = field_filename[:(- 1)]
output_name = (((pathlib.Path(output_directory) / parent_sorting_data) / field) / (field_filename + output_file_suffix))
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f' File exists: {output_name}')
if overwrite_existing_files:
logger.warning(' You have selected to overwrite existing files.')
else:
logger.info(' You have selected to NOT overwrite existing files. Continuing.')
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
else:
field_filename = field_filename_base
file_to_write = field_list
while ('__' in field_filename):
field_filename = field_filename.replace('__', '_')
while (field_filename[(- 1)] == '_'):
field_filename = field_filename[:(- 1)]
'\n ! TO DO\n Use pathlib, and perform some checks so we don"t overwrite anything!\n '
output_name = (((pathlib.Path(output_directory) / parent_sorting_data) / field) / (field_filename + output_file_suffix))
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f' File exists: {output_name}')
if overwrite_existing_files:
logger.warning(' You have selected to overwrite existing files.')
else:
logger.info(' You have selected to NOT overwrite existing files. Continuing.')
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
return files_written
|
def add_authorized_key(cluster: Cluster, public_key_path: Path) -> None:
'\n Add an authorized key to all nodes in the given cluster.\n '
nodes = {*cluster.masters, *cluster.agents, *cluster.public_agents}
for node in nodes:
node.run(args=['echo', '', '>>', '/root/.ssh/authorized_keys'], shell=True)
node.run(args=['echo', public_key_path.read_text(), '>>', '/root/.ssh/authorized_keys'], shell=True)
| -8,120,650,113,289,150,000
|
Add an authorized key to all nodes in the given cluster.
|
src/dcos_e2e_cli/common/credentials.py
|
add_authorized_key
|
dcos/dcos-e2e
|
python
|
def add_authorized_key(cluster: Cluster, public_key_path: Path) -> None:
'\n \n '
nodes = {*cluster.masters, *cluster.agents, *cluster.public_agents}
for node in nodes:
node.run(args=['echo', , '>>', '/root/.ssh/authorized_keys'], shell=True)
node.run(args=['echo', public_key_path.read_text(), '>>', '/root/.ssh/authorized_keys'], shell=True)
|
def _convert_auto_ivc_to_conn_name(conns_dict, name):
'\n Convert name of auto_ivc val to promoted input name.\n\n Parameters\n ----------\n conns_dict : dict\n Dictionary of global connections.\n name : str\n Name of auto_ivc to be found.\n\n Returns\n -------\n str\n Promoted input name.\n '
for (key, val) in conns_dict.items():
if (val == name):
return key
| -3,850,278,917,985,354,000
|
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
|
openmdao/utils/general_utils.py
|
_convert_auto_ivc_to_conn_name
|
DKilkenny/OpenMDAO
|
python
|
def _convert_auto_ivc_to_conn_name(conns_dict, name):
'\n Convert name of auto_ivc val to promoted input name.\n\n Parameters\n ----------\n conns_dict : dict\n Dictionary of global connections.\n name : str\n Name of auto_ivc to be found.\n\n Returns\n -------\n str\n Promoted input name.\n '
for (key, val) in conns_dict.items():
if (val == name):
return key
|
def ignore_errors(flag=None):
'\n Disable certain errors that will prevent setup from completing.\n\n Parameters\n ----------\n flag : bool or None\n If not None, set the value of _ignore_errors to this value.\n\n Returns\n -------\n bool\n The current value of _ignore_errors.\n '
global _ignore_errors
if (flag is not None):
_ignore_errors = flag
return _ignore_errors
| -2,966,108,365,804,464,000
|
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
|
openmdao/utils/general_utils.py
|
ignore_errors
|
DKilkenny/OpenMDAO
|
python
|
def ignore_errors(flag=None):
'\n Disable certain errors that will prevent setup from completing.\n\n Parameters\n ----------\n flag : bool or None\n If not None, set the value of _ignore_errors to this value.\n\n Returns\n -------\n bool\n The current value of _ignore_errors.\n '
global _ignore_errors
if (flag is not None):
_ignore_errors = flag
return _ignore_errors
|
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
'\n Raise an exception or issue a warning, depending on the value of _ignore_errors.\n\n Parameters\n ----------\n msg : str\n The error/warning message.\n exc : Exception class\n This exception class is used to create the exception to be raised.\n category : warning class\n This category is the class of warning to be issued.\n err : bool\n If None, use ignore_errors(), otherwise use value of err to determine whether to\n raise an exception (err=True) or issue a warning (err=False).\n '
if (((err is None) and ignore_errors()) or (err is False)):
issue_warning(msg, category=category)
else:
raise exc(msg)
| 4,533,055,769,744,363,500
|
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
|
openmdao/utils/general_utils.py
|
conditional_error
|
DKilkenny/OpenMDAO
|
python
|
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
'\n Raise an exception or issue a warning, depending on the value of _ignore_errors.\n\n Parameters\n ----------\n msg : str\n The error/warning message.\n exc : Exception class\n This exception class is used to create the exception to be raised.\n category : warning class\n This category is the class of warning to be issued.\n err : bool\n If None, use ignore_errors(), otherwise use value of err to determine whether to\n raise an exception (err=True) or issue a warning (err=False).\n '
if (((err is None) and ignore_errors()) or (err is False)):
issue_warning(msg, category=category)
else:
raise exc(msg)
|
@contextmanager
def ignore_errors_context(flag=True):
'\n Set ignore_errors to the given flag in this context.\n\n Parameters\n ----------\n flag : bool\n If not None, set ignore_errors to this value.\n\n Yields\n ------\n None\n '
save = ignore_errors()
ignore_errors(flag)
try:
(yield)
finally:
ignore_errors(save)
| 3,398,623,984,247,056,000
|
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
|
openmdao/utils/general_utils.py
|
ignore_errors_context
|
DKilkenny/OpenMDAO
|
python
|
@contextmanager
def ignore_errors_context(flag=True):
'\n Set ignore_errors to the given flag in this context.\n\n Parameters\n ----------\n flag : bool\n If not None, set ignore_errors to this value.\n\n Yields\n ------\n None\n '
save = ignore_errors()
ignore_errors(flag)
try:
(yield)
finally:
ignore_errors(save)
|
def simple_warning(msg, category=UserWarning, stacklevel=2):
'\n Display a simple warning message without the annoying extra line showing the warning call.\n\n Parameters\n ----------\n msg : str\n The warning message.\n category : class\n The warning class.\n stacklevel : int\n Number of levels up the stack to identify as the warning location.\n '
warn_deprecation('simple_warning is deprecated. Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
| -5,676,018,800,505,285,000
|
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
|
openmdao/utils/general_utils.py
|
simple_warning
|
DKilkenny/OpenMDAO
|
python
|
def simple_warning(msg, category=UserWarning, stacklevel=2):
'\n Display a simple warning message without the annoying extra line showing the warning call.\n\n Parameters\n ----------\n msg : str\n The warning message.\n category : class\n The warning class.\n stacklevel : int\n Number of levels up the stack to identify as the warning location.\n '
warn_deprecation('simple_warning is deprecated. Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
|
def ensure_compatible(name, value, shape=None, indices=None):
'\n Make value compatible with the specified shape or the shape of indices.\n\n Parameters\n ----------\n name : str\n The name of the value.\n value : float or list or tuple or ndarray or Iterable\n The value of a variable.\n shape : int or tuple or list or None\n The expected or desired shape of the value.\n indices : Indexer or None\n The indices into a source variable.\n\n Returns\n -------\n ndarray\n The value in a shape compatible with the specified shape and/or indices.\n tuple\n The resulting shape of the value.\n\n Raises\n ------\n ValueError\n If value cannot be made to conform to shape or if shape and indices\n are incompatible.\n '
if isinstance(value, Iterable):
value = np.asarray(value)
if (shape is not None):
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif (not np.isscalar(value)):
shape = np.atleast_1d(value).shape
if (indices is not None):
if ((not indices._flat_src) and (shape is None)):
raise RuntimeError(("src_indices for '%s' is not flat, so its input shape must be provided." % name))
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass
else:
if ((shape is not None) and (np.product(indshape) != np.product(shape))):
raise ValueError(("Shape of indices %s does not match shape of %s for '%s'." % (indshape, shape, name)))
if (shape is None):
shape = indshape
if (shape is None):
value = np.atleast_1d(value)
shape = value.shape
elif (np.isscalar(value) or (value.shape == (1,))):
value = (np.ones(shape) * value)
else:
value = np.atleast_1d(value).astype(np.float64)
if (value.shape != shape):
raise ValueError(("Incompatible shape for '%s': Expected %s but got %s." % (name, shape, value.shape)))
return (value, shape)
| -7,353,129,919,173,986,000
|
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
|
openmdao/utils/general_utils.py
|
ensure_compatible
|
DKilkenny/OpenMDAO
|
python
|
def ensure_compatible(name, value, shape=None, indices=None):
'\n Make value compatible with the specified shape or the shape of indices.\n\n Parameters\n ----------\n name : str\n The name of the value.\n value : float or list or tuple or ndarray or Iterable\n The value of a variable.\n shape : int or tuple or list or None\n The expected or desired shape of the value.\n indices : Indexer or None\n The indices into a source variable.\n\n Returns\n -------\n ndarray\n The value in a shape compatible with the specified shape and/or indices.\n tuple\n The resulting shape of the value.\n\n Raises\n ------\n ValueError\n If value cannot be made to conform to shape or if shape and indices\n are incompatible.\n '
if isinstance(value, Iterable):
value = np.asarray(value)
if (shape is not None):
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif (not np.isscalar(value)):
shape = np.atleast_1d(value).shape
if (indices is not None):
if ((not indices._flat_src) and (shape is None)):
raise RuntimeError(("src_indices for '%s' is not flat, so its input shape must be provided." % name))
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass
else:
if ((shape is not None) and (np.product(indshape) != np.product(shape))):
raise ValueError(("Shape of indices %s does not match shape of %s for '%s'." % (indshape, shape, name)))
if (shape is None):
shape = indshape
if (shape is None):
value = np.atleast_1d(value)
shape = value.shape
elif (np.isscalar(value) or (value.shape == (1,))):
value = (np.ones(shape) * value)
else:
value = np.atleast_1d(value).astype(np.float64)
if (value.shape != shape):
raise ValueError(("Incompatible shape for '%s': Expected %s but got %s." % (name, shape, value.shape)))
return (value, shape)
|
def determine_adder_scaler(ref0, ref, adder, scaler):
'\n Determine proper values of adder and scaler based on user arguments.\n\n Adder and Scaler are used internally because the transformation is\n slightly more efficient.\n\n Parameters\n ----------\n ref0 : float or ndarray, optional\n Value of response variable that scales to 0.0 in the driver.\n ref : float or ndarray, optional\n Value of response variable that scales to 1.0 in the driver.\n adder : float or ndarray, optional\n Value to add to the model value to get the scaled value. Adder\n is first in precedence.\n scaler : float or ndarray, optional\n Value to multiply the model value to get the scaled value. Scaler\n is second in precedence.\n\n Returns\n -------\n tuple\n Adder and scaler, properly formatted and based on ref/ref0 if provided.\n\n Raises\n ------\n ValueError\n If both ref/ref0 and adder/scaler were provided.\n\n Notes\n -----\n The response can be scaled using ref and ref0.\n The argument :code:`ref0` represents the physical value when the scaled value is 0.\n The argument :code:`ref` represents the physical value when the scaled value is 1.\n '
if ((ref0 is not None) or (ref is not None)):
if ((scaler is not None) or (adder is not None)):
raise ValueError('Inputs ref/ref0 are mutually exclusive with scaler/adder')
if (ref is None):
ref = 1.0
if (ref0 is None):
ref0 = 0.0
adder = (- ref0)
scaler = (1.0 / (ref + adder))
else:
if (scaler is None):
scaler = 1.0
if (adder is None):
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return (adder, scaler)
| -8,816,729,246,448,999,000
|
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
|
openmdao/utils/general_utils.py
|
determine_adder_scaler
|
DKilkenny/OpenMDAO
|
python
|
def determine_adder_scaler(ref0, ref, adder, scaler):
'\n Determine proper values of adder and scaler based on user arguments.\n\n Adder and Scaler are used internally because the transformation is\n slightly more efficient.\n\n Parameters\n ----------\n ref0 : float or ndarray, optional\n Value of response variable that scales to 0.0 in the driver.\n ref : float or ndarray, optional\n Value of response variable that scales to 1.0 in the driver.\n adder : float or ndarray, optional\n Value to add to the model value to get the scaled value. Adder\n is first in precedence.\n scaler : float or ndarray, optional\n Value to multiply the model value to get the scaled value. Scaler\n is second in precedence.\n\n Returns\n -------\n tuple\n Adder and scaler, properly formatted and based on ref/ref0 if provided.\n\n Raises\n ------\n ValueError\n If both ref/ref0 and adder/scaler were provided.\n\n Notes\n -----\n The response can be scaled using ref and ref0.\n The argument :code:`ref0` represents the physical value when the scaled value is 0.\n The argument :code:`ref` represents the physical value when the scaled value is 1.\n '
if ((ref0 is not None) or (ref is not None)):
if ((scaler is not None) or (adder is not None)):
raise ValueError('Inputs ref/ref0 are mutually exclusive with scaler/adder')
if (ref is None):
ref = 1.0
if (ref0 is None):
ref0 = 0.0
adder = (- ref0)
scaler = (1.0 / (ref + adder))
else:
if (scaler is None):
scaler = 1.0
if (adder is None):
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return (adder, scaler)
|
def set_pyoptsparse_opt(optname, fallback=True):
"\n For testing, sets the pyoptsparse optimizer using the given optimizer name.\n\n This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.\n This can be used on systems that have SNOPT installed to force them to use\n SLSQP in order to mimic our test machines on travis and appveyor.\n\n Parameters\n ----------\n optname : str\n Name of pyoptsparse optimizer that is requested by the test.\n fallback : bool\n If True, fall back to SLSQP if optname can't be found.\n\n Returns\n -------\n object\n Pyoptsparse optimizer instance.\n str\n Pyoptsparse optimizer string.\n "
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if (fallback and (optname != 'SLSQP')):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if (fallback and isinstance(opt, Mock)):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if ((not fallback) and (OPTIMIZER != optname)):
raise unittest.SkipTest(('pyoptsparse is not providing %s' % optname))
return (OPT, OPTIMIZER)
| -5,513,538,858,391,290,000
|
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
|
openmdao/utils/general_utils.py
|
set_pyoptsparse_opt
|
DKilkenny/OpenMDAO
|
python
|
def set_pyoptsparse_opt(optname, fallback=True):
"\n For testing, sets the pyoptsparse optimizer using the given optimizer name.\n\n This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.\n This can be used on systems that have SNOPT installed to force them to use\n SLSQP in order to mimic our test machines on travis and appveyor.\n\n Parameters\n ----------\n optname : str\n Name of pyoptsparse optimizer that is requested by the test.\n fallback : bool\n If True, fall back to SLSQP if optname can't be found.\n\n Returns\n -------\n object\n Pyoptsparse optimizer instance.\n str\n Pyoptsparse optimizer string.\n "
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if (fallback and (optname != 'SLSQP')):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if (fallback and isinstance(opt, Mock)):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if ((not fallback) and (OPTIMIZER != optname)):
raise unittest.SkipTest(('pyoptsparse is not providing %s' % optname))
return (OPT, OPTIMIZER)
|
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
'\n Format array option values.\n\n Checks that the given array values are either None, float, or an iterable\n of numeric values. On output all iterables of numeric values are\n converted to a flat np.ndarray. If values is scalar, it is converted\n to float.\n\n Parameters\n ----------\n name : str\n The path of the variable relative to the current system.\n values : float or numpy ndarray or Iterable\n Values of the array option to be formatted to the expected form.\n val_if_none : float or numpy ndarray\n The default value for the option if values is None.\n flatten : bool\n Set to True to flatten any ndarray return.\n\n Returns\n -------\n float or np.ndarray\n Values transformed to the expected form.\n\n Raises\n ------\n ValueError\n If values is Iterable but cannot be converted to a numpy ndarray\n TypeError\n If values is scalar, not None, and not a Number.\n '
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif ((not isinstance(values, str)) and isinstance(values, Iterable)):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif (values is None):
values = val_if_none
elif (values == float('inf')):
values = INF_BOUND
elif (values == (- float('inf'))):
values = (- INF_BOUND)
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of numeric values, or a scalar numeric value. Got {1} instead.'.format(name, values))
return values
| -1,012,974,045,651,745,500
|
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
|
openmdao/utils/general_utils.py
|
format_as_float_or_array
|
DKilkenny/OpenMDAO
|
python
|
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
'\n Format array option values.\n\n Checks that the given array values are either None, float, or an iterable\n of numeric values. On output all iterables of numeric values are\n converted to a flat np.ndarray. If values is scalar, it is converted\n to float.\n\n Parameters\n ----------\n name : str\n The path of the variable relative to the current system.\n values : float or numpy ndarray or Iterable\n Values of the array option to be formatted to the expected form.\n val_if_none : float or numpy ndarray\n The default value for the option if values is None.\n flatten : bool\n Set to True to flatten any ndarray return.\n\n Returns\n -------\n float or np.ndarray\n Values transformed to the expected form.\n\n Raises\n ------\n ValueError\n If values is Iterable but cannot be converted to a numpy ndarray\n TypeError\n If values is scalar, not None, and not a Number.\n '
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif ((not isinstance(values, str)) and isinstance(values, Iterable)):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif (values is None):
values = val_if_none
elif (values == float('inf')):
values = INF_BOUND
elif (values == (- float('inf'))):
values = (- INF_BOUND)
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of numeric values, or a scalar numeric value. Got {1} instead.'.format(name, values))
return values
|
def all_ancestors(pathname, delim='.'):
'\n Return a generator of pathnames of the starting object and all of its parents.\n\n Pathnames are ordered from longest to shortest.\n\n Parameters\n ----------\n pathname : str\n Pathname of starting object.\n delim : str\n Delimiter used to split the name.\n\n Yields\n ------\n str\n '
parts = pathname.split(delim)
for i in range(len(parts), 0, (- 1)):
(yield delim.join(parts[:i]))
| -3,061,827,664,611,178,000
|
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
|
openmdao/utils/general_utils.py
|
all_ancestors
|
DKilkenny/OpenMDAO
|
python
|
def all_ancestors(pathname, delim='.'):
'\n Return a generator of pathnames of the starting object and all of its parents.\n\n Pathnames are ordered from longest to shortest.\n\n Parameters\n ----------\n pathname : str\n Pathname of starting object.\n delim : str\n Delimiter used to split the name.\n\n Yields\n ------\n str\n '
parts = pathname.split(delim)
for i in range(len(parts), 0, (- 1)):
(yield delim.join(parts[:i]))
|
def find_matches(pattern, var_list):
'\n Return list of variable names that match given pattern.\n\n Parameters\n ----------\n pattern : str\n Glob pattern or variable name.\n var_list : list of str\n List of variable names to search for pattern.\n\n Returns\n -------\n list\n Variable names that match pattern.\n '
if (pattern == '*'):
return var_list
elif (pattern in var_list):
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
| 7,818,583,003,261,877,000
|
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
|
openmdao/utils/general_utils.py
|
find_matches
|
DKilkenny/OpenMDAO
|
python
|
def find_matches(pattern, var_list):
'\n Return list of variable names that match given pattern.\n\n Parameters\n ----------\n pattern : str\n Glob pattern or variable name.\n var_list : list of str\n List of variable names to search for pattern.\n\n Returns\n -------\n list\n Variable names that match pattern.\n '
if (pattern == '*'):
return var_list
elif (pattern in var_list):
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
|
def pad_name(name, pad_num=10, quotes=False):
'\n Pad a string so that they all line up when stacked.\n\n Parameters\n ----------\n name : str\n The string to pad.\n pad_num : int\n The number of total spaces the string should take up.\n quotes : bool\n If name should be quoted.\n\n Returns\n -------\n str\n Padded string.\n '
l_name = len(name)
quotes_len = (2 if quotes else 0)
if ((l_name + quotes_len) < pad_num):
pad = (pad_num - (l_name + quotes_len))
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = '{name}{sep:<{pad}}'
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
elif quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
| -1,679,614,277,903,369,500
|
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
|
openmdao/utils/general_utils.py
|
pad_name
|
DKilkenny/OpenMDAO
|
python
|
def pad_name(name, pad_num=10, quotes=False):
'\n Pad a string so that they all line up when stacked.\n\n Parameters\n ----------\n name : str\n The string to pad.\n pad_num : int\n The number of total spaces the string should take up.\n quotes : bool\n If name should be quoted.\n\n Returns\n -------\n str\n Padded string.\n '
l_name = len(name)
quotes_len = (2 if quotes else 0)
if ((l_name + quotes_len) < pad_num):
pad = (pad_num - (l_name + quotes_len))
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = '{name}{sep:<{pad}}'
pad_name = pad_str.format(name=name, sep=, pad=pad)
return pad_name
elif quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
|
def run_model(prob, ignore_exception=False):
'\n Call `run_model` on problem and capture output.\n\n Parameters\n ----------\n prob : Problem\n An instance of Problem.\n ignore_exception : bool\n Set to True to ignore an exception of any kind.\n\n Returns\n -------\n string\n Output from calling `run_model` on the Problem, captured from stdout.\n '
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if (not ignore_exception):
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
| 1,922,682,566,468,383,700
|
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
|
openmdao/utils/general_utils.py
|
run_model
|
DKilkenny/OpenMDAO
|
python
|
def run_model(prob, ignore_exception=False):
'\n Call `run_model` on problem and capture output.\n\n Parameters\n ----------\n prob : Problem\n An instance of Problem.\n ignore_exception : bool\n Set to True to ignore an exception of any kind.\n\n Returns\n -------\n string\n Output from calling `run_model` on the Problem, captured from stdout.\n '
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if (not ignore_exception):
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
|
def run_driver(prob):
'\n Call `run_driver` on problem and capture output.\n\n Parameters\n ----------\n prob : Problem\n An instance of Problem.\n\n Returns\n -------\n bool\n Failure flag; True if failed to converge, False is successful.\n string\n Output from calling `run_driver` on the Problem, captured from stdout.\n '
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return (failed, strout.getvalue())
| -7,239,618,793,923,645,000
|
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
|
openmdao/utils/general_utils.py
|
run_driver
|
DKilkenny/OpenMDAO
|
python
|
def run_driver(prob):
'\n Call `run_driver` on problem and capture output.\n\n Parameters\n ----------\n prob : Problem\n An instance of Problem.\n\n Returns\n -------\n bool\n Failure flag; True if failed to converge, False is successful.\n string\n Output from calling `run_driver` on the Problem, captured from stdout.\n '
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return (failed, strout.getvalue())
|
@contextmanager
def printoptions(*args, **kwds):
'\n Context manager for setting numpy print options.\n\n Set print options for the scope of the `with` block, and restore the old\n options at the end. See `numpy.set_printoptions` for the full description of\n available options. If any invalid options are specified, they will be ignored.\n\n >>> with printoptions(precision=2):\n ... print(np.array([2.0])) / 3\n [0.67]\n The `as`-clause of the `with`-statement gives the current print options:\n >>> with printoptions(precision=2) as opts:\n ... assert_equal(opts, np.get_printoptions())\n\n Parameters\n ----------\n *args : list\n Variable-length argument list.\n **kwds : dict\n Arbitrary keyword arguments.\n\n Yields\n ------\n str or int\n\n See Also\n --------\n set_printoptions, get_printoptions\n '
opts = np.get_printoptions()
kw_opts = dict(((key, val) for (key, val) in kwds.items() if (key in opts)))
try:
np.set_printoptions(*args, **kw_opts)
(yield np.get_printoptions())
finally:
np.set_printoptions(**opts)
| 6,457,766,634,299,743,000
|
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
|
openmdao/utils/general_utils.py
|
printoptions
|
DKilkenny/OpenMDAO
|
python
|
@contextmanager
def printoptions(*args, **kwds):
'\n Context manager for setting numpy print options.\n\n Set print options for the scope of the `with` block, and restore the old\n options at the end. See `numpy.set_printoptions` for the full description of\n available options. If any invalid options are specified, they will be ignored.\n\n >>> with printoptions(precision=2):\n ... print(np.array([2.0])) / 3\n [0.67]\n The `as`-clause of the `with`-statement gives the current print options:\n >>> with printoptions(precision=2) as opts:\n ... assert_equal(opts, np.get_printoptions())\n\n Parameters\n ----------\n *args : list\n Variable-length argument list.\n **kwds : dict\n Arbitrary keyword arguments.\n\n Yields\n ------\n str or int\n\n See Also\n --------\n set_printoptions, get_printoptions\n '
opts = np.get_printoptions()
kw_opts = dict(((key, val) for (key, val) in kwds.items() if (key in opts)))
try:
np.set_printoptions(*args, **kw_opts)
(yield np.get_printoptions())
finally:
np.set_printoptions(**opts)
|
def do_nothing_context():
"\n Do nothing.\n\n Useful when you have a block of code that only requires a context manager sometimes,\n and you don't want to repeat the context managed block.\n\n Returns\n -------\n contextmanager\n A do nothing context manager.\n "
return contextmanager(_nothing)()
| 7,486,286,516,754,432,000
|
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
|
openmdao/utils/general_utils.py
|
do_nothing_context
|
DKilkenny/OpenMDAO
|
python
|
def do_nothing_context():
"\n Do nothing.\n\n Useful when you have a block of code that only requires a context manager sometimes,\n and you don't want to repeat the context managed block.\n\n Returns\n -------\n contextmanager\n A do nothing context manager.\n "
return contextmanager(_nothing)()
|
def remove_whitespace(s, right=False, left=False):
'\n Remove white-space characters from the given string.\n\n If neither right nor left is specified (the default),\n then all white-space is removed.\n\n Parameters\n ----------\n s : str\n The string to be modified.\n right : bool\n If True, remove white-space from the end of the string.\n left : bool\n If True, remove white-space from the beginning of the string.\n\n Returns\n -------\n str\n The string with white-space removed.\n '
if ((not left) and (not right)):
return re.sub('\\s+', '', s, flags=re.UNICODE)
elif (right and left):
return re.sub('^\\s+|\\s+$', '', s, flags=re.UNICODE)
elif right:
return re.sub('\\s+$', '', s, flags=re.UNICODE)
else:
return re.sub('^\\s+', '', s, flags=re.UNICODE)
| 6,533,136,798,250,963,000
|
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
|
openmdao/utils/general_utils.py
|
remove_whitespace
|
DKilkenny/OpenMDAO
|
python
|
def remove_whitespace(s, right=False, left=False):
'\n Remove white-space characters from the given string.\n\n If neither right nor left is specified (the default),\n then all white-space is removed.\n\n Parameters\n ----------\n s : str\n The string to be modified.\n right : bool\n If True, remove white-space from the end of the string.\n left : bool\n If True, remove white-space from the beginning of the string.\n\n Returns\n -------\n str\n The string with white-space removed.\n '
if ((not left) and (not right)):
return re.sub('\\s+', , s, flags=re.UNICODE)
elif (right and left):
return re.sub('^\\s+|\\s+$', , s, flags=re.UNICODE)
elif right:
return re.sub('\\s+$', , s, flags=re.UNICODE)
else:
return re.sub('^\\s+', , s, flags=re.UNICODE)
|
def str2valid_python_name(s):
'\n Translate a given string into a valid python variable name.\n\n Parameters\n ----------\n s : str\n The string to be translated.\n\n Returns\n -------\n str\n The valid python name string.\n '
return s.translate(_transtab)
| 1,932,803,673,183,064,000
|
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
|
openmdao/utils/general_utils.py
|
str2valid_python_name
|
DKilkenny/OpenMDAO
|
python
|
def str2valid_python_name(s):
'\n Translate a given string into a valid python variable name.\n\n Parameters\n ----------\n s : str\n The string to be translated.\n\n Returns\n -------\n str\n The valid python name string.\n '
return s.translate(_transtab)
|
def make_serializable(o):
"\n Recursively convert numpy types to native types for JSON serialization.\n\n This function should NOT be passed into json.dump or json.dumps as the 'default' arg.\n\n Parameters\n ----------\n o : object\n The object to be converted.\n\n Returns\n -------\n object\n The converted object.\n "
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif (isinstance(o, bool) or isinstance(o, complex)):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
| -2,465,878,391,897,661,400
|
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
|
openmdao/utils/general_utils.py
|
make_serializable
|
DKilkenny/OpenMDAO
|
python
|
def make_serializable(o):
"\n Recursively convert numpy types to native types for JSON serialization.\n\n This function should NOT be passed into json.dump or json.dumps as the 'default' arg.\n\n Parameters\n ----------\n o : object\n The object to be converted.\n\n Returns\n -------\n object\n The converted object.\n "
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif (isinstance(o, bool) or isinstance(o, complex)):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
|
def make_serializable_key(o):
"\n Recursively convert numpy types to native types for JSON serialization.\n\n This function is for making serizializable dictionary keys, so no containers.\n This function should NOT be passed into json.dump or json.dumps as the 'default' arg.\n\n Parameters\n ----------\n o : object\n The object to be converted.\n\n Returns\n -------\n object\n The converted object.\n "
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
| -4,248,340,428,172,972,500
|
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
|
openmdao/utils/general_utils.py
|
make_serializable_key
|
DKilkenny/OpenMDAO
|
python
|
def make_serializable_key(o):
"\n Recursively convert numpy types to native types for JSON serialization.\n\n This function is for making serizializable dictionary keys, so no containers.\n This function should NOT be passed into json.dump or json.dumps as the 'default' arg.\n\n Parameters\n ----------\n o : object\n The object to be converted.\n\n Returns\n -------\n object\n The converted object.\n "
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
|
def default_noraise(o):
"\n Try to convert some extra types during JSON serialization.\n\n This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will\n attempt to convert values if possible, but if no conversion works, will return\n 'unserializable object (<type>)' instead of raising a TypeError.\n\n Parameters\n ----------\n o : object\n The object to be converted.\n\n Returns\n -------\n object\n The converted object.\n "
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif (isinstance(o, bool) or isinstance(o, complex)):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif (o is None):
return None
else:
return f'unserializable object ({type(o).__name__})'
| 1,492,094,519,533,654,000
|
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
|
openmdao/utils/general_utils.py
|
default_noraise
|
DKilkenny/OpenMDAO
|
python
|
def default_noraise(o):
"\n Try to convert some extra types during JSON serialization.\n\n This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will\n attempt to convert values if possible, but if no conversion works, will return\n 'unserializable object (<type>)' instead of raising a TypeError.\n\n Parameters\n ----------\n o : object\n The object to be converted.\n\n Returns\n -------\n object\n The converted object.\n "
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif (isinstance(o, bool) or isinstance(o, complex)):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif (o is None):
return None
else:
return f'unserializable object ({type(o).__name__})'
|
def make_set(str_data, name=None):
'\n Construct a set containing the specified character strings.\n\n Parameters\n ----------\n str_data : None, str, or list of strs\n Character string(s) to be included in the set.\n\n name : str, optional\n A name to be used in error messages.\n\n Returns\n -------\n set\n A set of character strings.\n '
if (not str_data):
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if (not isinstance(item, str)):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError('The {} argument should be str, set, or list: {}'.format(name, str_data))
else:
raise TypeError('The argument should be str, set, or list: {}'.format(str_data))
| 6,344,895,469,572,138,000
|
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
|
openmdao/utils/general_utils.py
|
make_set
|
DKilkenny/OpenMDAO
|
python
|
def make_set(str_data, name=None):
'\n Construct a set containing the specified character strings.\n\n Parameters\n ----------\n str_data : None, str, or list of strs\n Character string(s) to be included in the set.\n\n name : str, optional\n A name to be used in error messages.\n\n Returns\n -------\n set\n A set of character strings.\n '
if (not str_data):
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if (not isinstance(item, str)):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError('The {} argument should be str, set, or list: {}'.format(name, str_data))
else:
raise TypeError('The argument should be str, set, or list: {}'.format(str_data))
|
def match_includes_excludes(name, includes=None, excludes=None):
'\n Check to see if the variable names pass through the includes and excludes filter.\n\n Parameters\n ----------\n name : str\n Name to be checked for match.\n includes : iter of str or None\n Glob patterns for name to include in the filtering. None, the default, means\n include all.\n excludes : iter of str or None\n Glob patterns for name to exclude in the filtering.\n\n Returns\n -------\n bool\n Return True if the name passes through the filtering of includes and excludes.\n '
if (excludes is not None):
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
if (includes is None):
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
| 2,588,734,518,395,102,000
|
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
|
openmdao/utils/general_utils.py
|
match_includes_excludes
|
DKilkenny/OpenMDAO
|
python
|
def match_includes_excludes(name, includes=None, excludes=None):
'\n Check to see if the variable names pass through the includes and excludes filter.\n\n Parameters\n ----------\n name : str\n Name to be checked for match.\n includes : iter of str or None\n Glob patterns for name to include in the filtering. None, the default, means\n include all.\n excludes : iter of str or None\n Glob patterns for name to exclude in the filtering.\n\n Returns\n -------\n bool\n Return True if the name passes through the filtering of includes and excludes.\n '
if (excludes is not None):
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
if (includes is None):
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
|
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
'\n Check to see if the variable names pass through the includes and excludes filter.\n\n Parameters\n ----------\n name : str\n Unpromoted variable name to be checked for match.\n prom_name : str\n Promoted variable name to be checked for match.\n includes : iter of str or None\n Glob patterns for name to include in the filtering. None, the default, means\n to include all.\n excludes : iter of str or None\n Glob patterns for name to exclude in the filtering.\n\n Returns\n -------\n bool\n Return True if the name passes through the filtering of includes and excludes.\n '
diff = (name != prom_name)
if (excludes is not None):
for pattern in excludes:
if (fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern))):
return False
if (includes is None):
return True
else:
for pattern in includes:
if (fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern))):
return True
return False
| 1,778,470,870,226,834,700
|
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
|
openmdao/utils/general_utils.py
|
match_prom_or_abs
|
DKilkenny/OpenMDAO
|
python
|
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
'\n Check to see if the variable names pass through the includes and excludes filter.\n\n Parameters\n ----------\n name : str\n Unpromoted variable name to be checked for match.\n prom_name : str\n Promoted variable name to be checked for match.\n includes : iter of str or None\n Glob patterns for name to include in the filtering. None, the default, means\n to include all.\n excludes : iter of str or None\n Glob patterns for name to exclude in the filtering.\n\n Returns\n -------\n bool\n Return True if the name passes through the filtering of includes and excludes.\n '
diff = (name != prom_name)
if (excludes is not None):
for pattern in excludes:
if (fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern))):
return False
if (includes is None):
return True
else:
for pattern in includes:
if (fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern))):
return True
return False
|
def env_truthy(env_var):
"\n Return True if the given environment variable is 'truthy'.\n\n Parameters\n ----------\n env_var : str\n The name of the environment variable.\n\n Returns\n -------\n bool\n True if the specified environment variable is 'truthy'.\n "
return (os.environ.get(env_var, '0').lower() not in _falsey)
| 8,997,511,053,205,589,000
|
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
|
openmdao/utils/general_utils.py
|
env_truthy
|
DKilkenny/OpenMDAO
|
python
|
def env_truthy(env_var):
"\n Return True if the given environment variable is 'truthy'.\n\n Parameters\n ----------\n env_var : str\n The name of the environment variable.\n\n Returns\n -------\n bool\n True if the specified environment variable is 'truthy'.\n "
return (os.environ.get(env_var, '0').lower() not in _falsey)
|
def common_subpath(pathnames):
"\n Return the common dotted subpath found in all of the given dotted pathnames.\n\n Parameters\n ----------\n pathnames : iter of str\n Dotted pathnames of systems.\n\n Returns\n -------\n str\n Common dotted subpath. Returns '' if no common subpath is found.\n "
if (len(pathnames) == 1):
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if (p0 != splits[i][common_loc]):
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
| -4,609,442,889,970,753,000
|
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
|
openmdao/utils/general_utils.py
|
common_subpath
|
DKilkenny/OpenMDAO
|
python
|
def common_subpath(pathnames):
"\n Return the common dotted subpath found in all of the given dotted pathnames.\n\n Parameters\n ----------\n pathnames : iter of str\n Dotted pathnames of systems.\n\n Returns\n -------\n str\n Common dotted subpath. Returns if no common subpath is found.\n "
if (len(pathnames) == 1):
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if (p0 != splits[i][common_loc]):
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return
|
def _is_slicer_op(indices):
'\n Check if an indexer contains a slice or ellipsis operator.\n\n Parameters\n ----------\n indices : ndarray\n Indices to check.\n\n Returns\n -------\n bool\n Returns True if indices contains a colon or ellipsis operator.\n '
if isinstance(indices, tuple):
return any(((isinstance(i, slice) or (i is ...)) for i in indices))
return isinstance(indices, slice)
| 5,967,391,470,871,776,000
|
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
|
openmdao/utils/general_utils.py
|
_is_slicer_op
|
DKilkenny/OpenMDAO
|
python
|
def _is_slicer_op(indices):
'\n Check if an indexer contains a slice or ellipsis operator.\n\n Parameters\n ----------\n indices : ndarray\n Indices to check.\n\n Returns\n -------\n bool\n Returns True if indices contains a colon or ellipsis operator.\n '
if isinstance(indices, tuple):
return any(((isinstance(i, slice) or (i is ...)) for i in indices))
return isinstance(indices, slice)
|
def _slice_indices(slicer, arr_size, arr_shape):
'\n Return an index array based on a slice or slice tuple and the array size and shape.\n\n Parameters\n ----------\n slicer : slice or tuple containing slices\n Slice object to slice array\n arr_size : int\n Size of output array\n arr_shape : tuple\n Tuple of output array shape\n\n Returns\n -------\n array\n Returns the sliced indices.\n '
if isinstance(slicer, slice):
(start, stop, step) = (slicer.start, slicer.stop, slicer.step)
if (start is None):
start = 0
if (stop is None):
stop = arr_size
if (step is None):
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
| 5,302,177,245,538,873,000
|
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
|
openmdao/utils/general_utils.py
|
_slice_indices
|
DKilkenny/OpenMDAO
|
python
|
def _slice_indices(slicer, arr_size, arr_shape):
'\n Return an index array based on a slice or slice tuple and the array size and shape.\n\n Parameters\n ----------\n slicer : slice or tuple containing slices\n Slice object to slice array\n arr_size : int\n Size of output array\n arr_shape : tuple\n Tuple of output array shape\n\n Returns\n -------\n array\n Returns the sliced indices.\n '
if isinstance(slicer, slice):
(start, stop, step) = (slicer.start, slicer.stop, slicer.step)
if (start is None):
start = 0
if (stop is None):
stop = arr_size
if (step is None):
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
|
def _prom2ivc_src_name_iter(prom_dict):
'\n Yield keys from prom_dict with promoted input names converted to ivc source names.\n\n Parameters\n ----------\n prom_dict : dict\n Original dict with some promoted paths.\n\n Yields\n ------\n str\n name\n '
for (name, meta) in prom_dict.items():
if (meta['ivc_source'] is not None):
(yield meta['ivc_source'])
else:
(yield name)
| 690,393,987,370,168,600
|
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
|
openmdao/utils/general_utils.py
|
_prom2ivc_src_name_iter
|
DKilkenny/OpenMDAO
|
python
|
def _prom2ivc_src_name_iter(prom_dict):
'\n Yield keys from prom_dict with promoted input names converted to ivc source names.\n\n Parameters\n ----------\n prom_dict : dict\n Original dict with some promoted paths.\n\n Yields\n ------\n str\n name\n '
for (name, meta) in prom_dict.items():
if (meta['ivc_source'] is not None):
(yield meta['ivc_source'])
else:
(yield name)
|
def _prom2ivc_src_item_iter(prom_dict):
'\n Yield items from prom_dict with promoted input names converted to ivc source names.\n\n The result is that all names are absolute.\n\n Parameters\n ----------\n prom_dict : dict\n Original dict with some promoted paths.\n\n Yields\n ------\n tuple\n name, metadata\n '
for (name, meta) in prom_dict.items():
if (meta['ivc_source'] is not None):
(yield (meta['ivc_source'], meta))
else:
(yield (name, meta))
| 6,250,075,840,540,254,000
|
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
|
openmdao/utils/general_utils.py
|
_prom2ivc_src_item_iter
|
DKilkenny/OpenMDAO
|
python
|
def _prom2ivc_src_item_iter(prom_dict):
'\n Yield items from prom_dict with promoted input names converted to ivc source names.\n\n The result is that all names are absolute.\n\n Parameters\n ----------\n prom_dict : dict\n Original dict with some promoted paths.\n\n Yields\n ------\n tuple\n name, metadata\n '
for (name, meta) in prom_dict.items():
if (meta['ivc_source'] is not None):
(yield (meta['ivc_source'], meta))
else:
(yield (name, meta))
|
def _prom2ivc_src_dict(prom_dict):
'\n Convert a dictionary with promoted input names into one with ivc source names.\n\n Parameters\n ----------\n prom_dict : dict\n Original dict with some promoted paths.\n\n Returns\n -------\n dict\n New dict with ivc source pathnames.\n '
return {name: meta for (name, meta) in _prom2ivc_src_item_iter(prom_dict)}
| 1,931,912,990,526,470,100
|
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
|
openmdao/utils/general_utils.py
|
_prom2ivc_src_dict
|
DKilkenny/OpenMDAO
|
python
|
def _prom2ivc_src_dict(prom_dict):
'\n Convert a dictionary with promoted input names into one with ivc source names.\n\n Parameters\n ----------\n prom_dict : dict\n Original dict with some promoted paths.\n\n Returns\n -------\n dict\n New dict with ivc source pathnames.\n '
return {name: meta for (name, meta) in _prom2ivc_src_item_iter(prom_dict)}
|
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
'\n Compute lower level src_indices based on parent src_indices.\n\n Parameters\n ----------\n parent_src_inds : ndarray\n Parent src_indices.\n parent_src_shape : tuple\n Shape of source expected by parent.\n my_src_inds : ndarray or fancy index\n Src_indices at the current system level, before conversion.\n my_src_shape : tuple\n Expected source shape at the current system level.\n\n Returns\n -------\n ndarray\n Final src_indices based on those of the parent.\n '
if (parent_src_inds is None):
return my_src_inds
elif (my_src_inds is None):
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
| 4,043,396,470,340,805,000
|
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
|
openmdao/utils/general_utils.py
|
convert_src_inds
|
DKilkenny/OpenMDAO
|
python
|
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
'\n Compute lower level src_indices based on parent src_indices.\n\n Parameters\n ----------\n parent_src_inds : ndarray\n Parent src_indices.\n parent_src_shape : tuple\n Shape of source expected by parent.\n my_src_inds : ndarray or fancy index\n Src_indices at the current system level, before conversion.\n my_src_shape : tuple\n Expected source shape at the current system level.\n\n Returns\n -------\n ndarray\n Final src_indices based on those of the parent.\n '
if (parent_src_inds is None):
return my_src_inds
elif (my_src_inds is None):
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
|
def shape2tuple(shape):
'\n Return shape as a tuple.\n\n Parameters\n ----------\n shape : int or tuple\n The given shape.\n\n Returns\n -------\n tuple\n The shape as a tuple.\n '
if isinstance(shape, Number):
return (shape,)
elif (shape is None):
return shape
return tuple(shape)
| -5,092,143,027,922,796,000
|
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
|
openmdao/utils/general_utils.py
|
shape2tuple
|
DKilkenny/OpenMDAO
|
python
|
def shape2tuple(shape):
'\n Return shape as a tuple.\n\n Parameters\n ----------\n shape : int or tuple\n The given shape.\n\n Returns\n -------\n tuple\n The shape as a tuple.\n '
if isinstance(shape, Number):
return (shape,)
elif (shape is None):
return shape
return tuple(shape)
|
def get_connection_owner(system, tgt):
"\n Return (owner, promoted_src, promoted_tgt) for the given connected target.\n\n Note : this is not speedy. It's intended for use only in error messages.\n\n Parameters\n ----------\n system : System\n Any System. The search always goes from the model level down.\n tgt : str\n Absolute pathname of the target variable.\n\n Returns\n -------\n tuple\n (wning group, promoted source name, promoted target name).\n "
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if ((src in abs2prom['output']) and (tgt in abs2prom['input'][tgt])):
if (abs2prom['input'][tgt] != abs2prom['output'][src]):
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if (tprom in g._manual_connections):
return (g.pathname, g._var_allprocs_abs2prom['output'][src], tprom)
return (None, None, None)
| 1,633,914,159,028,749,300
|
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
|
openmdao/utils/general_utils.py
|
get_connection_owner
|
DKilkenny/OpenMDAO
|
python
|
def get_connection_owner(system, tgt):
"\n Return (owner, promoted_src, promoted_tgt) for the given connected target.\n\n Note : this is not speedy. It's intended for use only in error messages.\n\n Parameters\n ----------\n system : System\n Any System. The search always goes from the model level down.\n tgt : str\n Absolute pathname of the target variable.\n\n Returns\n -------\n tuple\n (wning group, promoted source name, promoted target name).\n "
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if ((src in abs2prom['output']) and (tgt in abs2prom['input'][tgt])):
if (abs2prom['input'][tgt] != abs2prom['output'][src]):
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if (tprom in g._manual_connections):
return (g.pathname, g._var_allprocs_abs2prom['output'][src], tprom)
return (None, None, None)
|
def wing_dbg():
'\n Make import of wingdbstub contingent on value of WING_DBG environment variable.\n\n Also will import wingdbstub from the WINGHOME directory.\n '
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = (sys.path[:] + [os.environ['WINGHOME']])
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
| 8,914,793,370,689,681,000
|
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
|
openmdao/utils/general_utils.py
|
wing_dbg
|
DKilkenny/OpenMDAO
|
python
|
def wing_dbg():
'\n Make import of wingdbstub contingent on value of WING_DBG environment variable.\n\n Also will import wingdbstub from the WINGHOME directory.\n '
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = (sys.path[:] + [os.environ['WINGHOME']])
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
|
def __contains__(self, name):
'\n Return if the named object is contained.\n\n Parameters\n ----------\n name : str\n Name of the object being looked up.\n\n Returns\n -------\n bool\n Always returns True.\n '
return True
| -8,732,378,914,084,561,000
|
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
|
openmdao/utils/general_utils.py
|
__contains__
|
DKilkenny/OpenMDAO
|
python
|
def __contains__(self, name):
'\n Return if the named object is contained.\n\n Parameters\n ----------\n name : str\n Name of the object being looked up.\n\n Returns\n -------\n bool\n Always returns True.\n '
return True
|
def __init__(self, system, vname, use_vec_offset=True):
'\n Initialize the iterator.\n '
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if (vname in abs2meta):
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = (np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0)
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = (self._start + sizes[(rank, var_idx)])
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range((slices[vname].stop - slices[vname].start))
| -7,698,128,074,785,812,000
|
Initialize the iterator.
|
openmdao/utils/general_utils.py
|
__init__
|
DKilkenny/OpenMDAO
|
python
|
def __init__(self, system, vname, use_vec_offset=True):
'\n \n '
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if (vname in abs2meta):
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = (np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0)
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = (self._start + sizes[(rank, var_idx)])
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range((slices[vname].stop - slices[vname].start))
|
def _serial_iter(self):
'\n Iterate over a local non-distributed variable.\n\n Yields\n ------\n int\n Variable index.\n '
(yield from self._inds)
| 3,925,686,889,734,001,700
|
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
|
openmdao/utils/general_utils.py
|
_serial_iter
|
DKilkenny/OpenMDAO
|
python
|
def _serial_iter(self):
'\n Iterate over a local non-distributed variable.\n\n Yields\n ------\n int\n Variable index.\n '
(yield from self._inds)
|
def _dist_iter(self):
'\n Iterate over a distributed variable.\n\n Yields\n ------\n int or None\n Variable index or None if index is not local to this rank.\n '
start = self._start
end = self._end
for i in range(self._dist_size):
if ((i >= start) and (i < end)):
(yield ((i - start) + self._offset))
else:
(yield None)
| 3,273,171,553,087,816,700
|
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
|
openmdao/utils/general_utils.py
|
_dist_iter
|
DKilkenny/OpenMDAO
|
python
|
def _dist_iter(self):
'\n Iterate over a distributed variable.\n\n Yields\n ------\n int or None\n Variable index or None if index is not local to this rank.\n '
start = self._start
end = self._end
for i in range(self._dist_size):
if ((i >= start) and (i < end)):
(yield ((i - start) + self._offset))
else:
(yield None)
|
def __iter__(self):
'\n Return an iterator.\n\n Returns\n -------\n iterator\n An iterator over our indices.\n '
return self._iter()
| 3,586,504,963,431,038,500
|
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
|
openmdao/utils/general_utils.py
|
__iter__
|
DKilkenny/OpenMDAO
|
python
|
def __iter__(self):
'\n Return an iterator.\n\n Returns\n -------\n iterator\n An iterator over our indices.\n '
return self._iter()
|
def create_network_interfaces(self, **kwargs):
'\n Create a new network interface\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.create_network_interfaces(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :param NetworkInterface network_interface: The attribute map used to create the network interface\n :return: NetworkInterfaceResponse\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_network_interfaces_with_http_info(**kwargs)
else:
data = self.create_network_interfaces_with_http_info(**kwargs)
return data
| -8,308,409,485,413,751,000
|
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
|
purity_fb/purity_fb_1dot3/apis/network_interfaces_api.py
|
create_network_interfaces
|
asun-ps/purity_fb_python_client
|
python
|
def create_network_interfaces(self, **kwargs):
'\n Create a new network interface\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.create_network_interfaces(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :param NetworkInterface network_interface: The attribute map used to create the network interface\n :return: NetworkInterfaceResponse\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_network_interfaces_with_http_info(**kwargs)
else:
data = self.create_network_interfaces_with_http_info(**kwargs)
return data
|
def create_network_interfaces_with_http_info(self, **kwargs):
'\n Create a new network interface\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.create_network_interfaces_with_http_info(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :param NetworkInterface network_interface: The attribute map used to create the network interface\n :return: NetworkInterfaceResponse\n If the method is called asynchronously,\n returns the request thread.\n '
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method create_network_interfaces" % key))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if ('names' in params):
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('network_interface' in params):
body_params = params['network_interface']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='NetworkInterfaceResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
| 6,628,856,100,416,965,000
|
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
|
purity_fb/purity_fb_1dot3/apis/network_interfaces_api.py
|
create_network_interfaces_with_http_info
|
asun-ps/purity_fb_python_client
|
python
|
def create_network_interfaces_with_http_info(self, **kwargs):
'\n Create a new network interface\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.create_network_interfaces_with_http_info(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :param NetworkInterface network_interface: The attribute map used to create the network interface\n :return: NetworkInterfaceResponse\n If the method is called asynchronously,\n returns the request thread.\n '
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method create_network_interfaces" % key))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if ('names' in params):
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('network_interface' in params):
body_params = params['network_interface']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='NetworkInterfaceResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
|
def delete_network_interfaces(self, **kwargs):
'\n Delete a network interface\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.delete_network_interfaces(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_network_interfaces_with_http_info(**kwargs)
else:
data = self.delete_network_interfaces_with_http_info(**kwargs)
return data
| -1,217,760,738,808,310,800
|
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
|
purity_fb/purity_fb_1dot3/apis/network_interfaces_api.py
|
delete_network_interfaces
|
asun-ps/purity_fb_python_client
|
python
|
def delete_network_interfaces(self, **kwargs):
'\n Delete a network interface\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.delete_network_interfaces(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_network_interfaces_with_http_info(**kwargs)
else:
data = self.delete_network_interfaces_with_http_info(**kwargs)
return data
|
def delete_network_interfaces_with_http_info(self, **kwargs):
'\n Delete a network interface\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.delete_network_interfaces_with_http_info(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n '
all_params = ['names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method delete_network_interfaces" % key))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if ('names' in params):
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
| 6,066,101,161,732,652,000
|
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
|
purity_fb/purity_fb_1dot3/apis/network_interfaces_api.py
|
delete_network_interfaces_with_http_info
|
asun-ps/purity_fb_python_client
|
python
|
def delete_network_interfaces_with_http_info(self, **kwargs):
'\n Delete a network interface\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.delete_network_interfaces_with_http_info(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n '
all_params = ['names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method delete_network_interfaces" % key))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if ('names' in params):
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
|
def list_network_interfaces(self, **kwargs):
"\n List network interfaces\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.list_network_interfaces(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :param str filter: The filter to be used for query.\n :param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).\n :param int start: The offset of the first resource to return from a collection.\n :param int limit: limit, should be >= 0\n :param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.\n :return: NetworkInterfaceResponse\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_network_interfaces_with_http_info(**kwargs)
else:
data = self.list_network_interfaces_with_http_info(**kwargs)
return data
| -5,871,237,290,454,569,000
|
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
|
purity_fb/purity_fb_1dot3/apis/network_interfaces_api.py
|
list_network_interfaces
|
asun-ps/purity_fb_python_client
|
python
|
def list_network_interfaces(self, **kwargs):
"\n List network interfaces\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.list_network_interfaces(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :param str filter: The filter to be used for query.\n :param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).\n :param int start: The offset of the first resource to return from a collection.\n :param int limit: limit, should be >= 0\n :param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.\n :return: NetworkInterfaceResponse\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_network_interfaces_with_http_info(**kwargs)
else:
data = self.list_network_interfaces_with_http_info(**kwargs)
return data
|
def list_network_interfaces_with_http_info(self, **kwargs):
"\n List network interfaces\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.list_network_interfaces_with_http_info(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :param str filter: The filter to be used for query.\n :param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).\n :param int start: The offset of the first resource to return from a collection.\n :param int limit: limit, should be >= 0\n :param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.\n :return: NetworkInterfaceResponse\n If the method is called asynchronously,\n returns the request thread.\n "
all_params = ['names', 'filter', 'sort', 'start', 'limit', 'token']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method list_network_interfaces" % key))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if ('names' in params):
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if ('filter' in params):
query_params.append(('filter', params['filter']))
if ('sort' in params):
query_params.append(('sort', params['sort']))
if ('start' in params):
query_params.append(('start', params['start']))
if ('limit' in params):
query_params.append(('limit', params['limit']))
if ('token' in params):
query_params.append(('token', params['token']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='NetworkInterfaceResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
| -8,858,434,167,035,889,000
|
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
|
purity_fb/purity_fb_1dot3/apis/network_interfaces_api.py
|
list_network_interfaces_with_http_info
|
asun-ps/purity_fb_python_client
|
python
|
def list_network_interfaces_with_http_info(self, **kwargs):
"\n List network interfaces\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.list_network_interfaces_with_http_info(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :param str filter: The filter to be used for query.\n :param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).\n :param int start: The offset of the first resource to return from a collection.\n :param int limit: limit, should be >= 0\n :param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.\n :return: NetworkInterfaceResponse\n If the method is called asynchronously,\n returns the request thread.\n "
all_params = ['names', 'filter', 'sort', 'start', 'limit', 'token']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method list_network_interfaces" % key))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if ('names' in params):
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if ('filter' in params):
query_params.append(('filter', params['filter']))
if ('sort' in params):
query_params.append(('sort', params['sort']))
if ('start' in params):
query_params.append(('start', params['start']))
if ('limit' in params):
query_params.append(('limit', params['limit']))
if ('token' in params):
query_params.append(('token', params['token']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='NetworkInterfaceResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
|
def update_network_interfaces(self, **kwargs):
'\n Update an existing network interface\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.update_network_interfaces(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :param NetworkInterface network_interface: the attribute map used to update the network interface\n :return: NetworkInterfaceResponse\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_network_interfaces_with_http_info(**kwargs)
else:
data = self.update_network_interfaces_with_http_info(**kwargs)
return data
| -8,657,946,211,867,635,000
|
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
|
purity_fb/purity_fb_1dot3/apis/network_interfaces_api.py
|
update_network_interfaces
|
asun-ps/purity_fb_python_client
|
python
|
def update_network_interfaces(self, **kwargs):
'\n Update an existing network interface\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.update_network_interfaces(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :param NetworkInterface network_interface: the attribute map used to update the network interface\n :return: NetworkInterfaceResponse\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_network_interfaces_with_http_info(**kwargs)
else:
data = self.update_network_interfaces_with_http_info(**kwargs)
return data
|
def update_network_interfaces_with_http_info(self, **kwargs):
'\n Update an existing network interface\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.update_network_interfaces_with_http_info(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :param NetworkInterface network_interface: the attribute map used to update the network interface\n :return: NetworkInterfaceResponse\n If the method is called asynchronously,\n returns the request thread.\n '
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method update_network_interfaces" % key))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if ('names' in params):
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('network_interface' in params):
body_params = params['network_interface']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='NetworkInterfaceResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
| -4,722,062,144,713,662,000
|
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
|
purity_fb/purity_fb_1dot3/apis/network_interfaces_api.py
|
update_network_interfaces_with_http_info
|
asun-ps/purity_fb_python_client
|
python
|
def update_network_interfaces_with_http_info(self, **kwargs):
'\n Update an existing network interface\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.update_network_interfaces_with_http_info(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.\n :param NetworkInterface network_interface: the attribute map used to update the network interface\n :return: NetworkInterfaceResponse\n If the method is called asynchronously,\n returns the request thread.\n '
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method update_network_interfaces" % key))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if ('names' in params):
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('network_interface' in params):
body_params = params['network_interface']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='NetworkInterfaceResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
|
def main():
' Calls the other functions to test them. '
run_test_first_is_elsewhere_too()
| -7,653,343,239,728,754,000
|
Calls the other functions to test them.
|
src/m3_more_nested_loops_in_sequences.py
|
main
|
dalesil/19-MoreLoopsWithinLoops
|
python
|
def main():
' '
run_test_first_is_elsewhere_too()
|
def run_test_largest_number():
' Tests the largest_number function. '
print()
print('-------------------------------------')
print('Testing the LARGEST_NUMBER function:')
print('-------------------------------------')
expected = 13
answer = largest_number([(3, 1, 4), (13, 10, 11, 7, 10), [1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
expected = (- 1111111111111111)
answer = largest_number(([], [(- 1111111111111111)], []))
print('Expected and actual are:', expected, answer)
expected = None
answer = largest_number(([], [], []))
print('Expected and actual are:', expected, answer)
expected = 13
answer = largest_number([(3, 1, 4), (13, 10, 11, 7, 10), [1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
| 7,014,046,524,202,184,000
|
Tests the largest_number function.
|
src/m3_more_nested_loops_in_sequences.py
|
run_test_largest_number
|
dalesil/19-MoreLoopsWithinLoops
|
python
|
def run_test_largest_number():
' '
print()
print('-------------------------------------')
print('Testing the LARGEST_NUMBER function:')
print('-------------------------------------')
expected = 13
answer = largest_number([(3, 1, 4), (13, 10, 11, 7, 10), [1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
expected = (- 1111111111111111)
answer = largest_number(([], [(- 1111111111111111)], []))
print('Expected and actual are:', expected, answer)
expected = None
answer = largest_number(([], [], []))
print('Expected and actual are:', expected, answer)
expected = 13
answer = largest_number([(3, 1, 4), (13, 10, 11, 7, 10), [1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
|
def largest_number(seq_seq):
'\n Returns the largest number in the subsequences of the given\n sequence of sequences. Returns None if there are NO numbers\n in the subsequences.\n\n For example, if the given argument is:\n [(3, 1, 4),\n (13, 10, 11, 7, 10),\n [1, 2, 3, 4]]\n then this function returns 13.\n\n As another example, if the given argument is:\n ([], [-1111111111111111], [])\n then this function returns -1111111111111111.\n\n As yet another example, if the given argument is:\n ([], [], [])\n then this function returns None.\n\n Preconditions:\n :type seq_seq: (list, tuple)\n and the given argument is a sequence of sequences,\n where each subsequence contains only numbers.\n '
x = None
for j in range(len(seq_seq)):
for k in range(len(seq_seq[j])):
x = j
y = k
for l in range(len(seq_seq)):
for o in range(len(seq_seq[l])):
if (seq_seq[l][o] > seq_seq[x][y]):
x = l
y = o
if (x == None):
return None
return seq_seq[x][y]
| -447,333,767,110,148,740
|
Returns the largest number in the subsequences of the given
sequence of sequences. Returns None if there are NO numbers
in the subsequences.
For example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[1, 2, 3, 4]]
then this function returns 13.
As another example, if the given argument is:
([], [-1111111111111111], [])
then this function returns -1111111111111111.
As yet another example, if the given argument is:
([], [], [])
then this function returns None.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences,
where each subsequence contains only numbers.
|
src/m3_more_nested_loops_in_sequences.py
|
largest_number
|
dalesil/19-MoreLoopsWithinLoops
|
python
|
def largest_number(seq_seq):
'\n Returns the largest number in the subsequences of the given\n sequence of sequences. Returns None if there are NO numbers\n in the subsequences.\n\n For example, if the given argument is:\n [(3, 1, 4),\n (13, 10, 11, 7, 10),\n [1, 2, 3, 4]]\n then this function returns 13.\n\n As another example, if the given argument is:\n ([], [-1111111111111111], [])\n then this function returns -1111111111111111.\n\n As yet another example, if the given argument is:\n ([], [], [])\n then this function returns None.\n\n Preconditions:\n :type seq_seq: (list, tuple)\n and the given argument is a sequence of sequences,\n where each subsequence contains only numbers.\n '
x = None
for j in range(len(seq_seq)):
for k in range(len(seq_seq[j])):
x = j
y = k
for l in range(len(seq_seq)):
for o in range(len(seq_seq[l])):
if (seq_seq[l][o] > seq_seq[x][y]):
x = l
y = o
if (x == None):
return None
return seq_seq[x][y]
|
def run_test_largest_negative_number():
' Tests the largest_negative_number function. '
print()
print('-------------------------------------------------')
print('Testing the LARGEST_NEGATIVE_NUMBER function:')
print('-------------------------------------------------')
expected = 11
answer = largest_number([(3, 1, 4), ((- 13), 10, 11, 7, 10), [1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
expected = (- 2)
answer = largest_number(([(- 10)], [(- 1111111111111111)], [(- 2)]))
print('Expected and actual are:', expected, answer)
expected = None
answer = largest_number(([], [], []))
print('Expected and actual are:', expected, answer)
| 7,173,169,023,766,363,000
|
Tests the largest_negative_number function.
|
src/m3_more_nested_loops_in_sequences.py
|
run_test_largest_negative_number
|
dalesil/19-MoreLoopsWithinLoops
|
python
|
def run_test_largest_negative_number():
' '
print()
print('-------------------------------------------------')
print('Testing the LARGEST_NEGATIVE_NUMBER function:')
print('-------------------------------------------------')
expected = 11
answer = largest_number([(3, 1, 4), ((- 13), 10, 11, 7, 10), [1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
expected = (- 2)
answer = largest_number(([(- 10)], [(- 1111111111111111)], [(- 2)]))
print('Expected and actual are:', expected, answer)
expected = None
answer = largest_number(([], [], []))
print('Expected and actual are:', expected, answer)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.