body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def boosted_trees_predict(tree_ensemble_handle, bucketized_features, logits_dimension, name=None):
'Runs multiple additive regression ensemble predictors on input instances and\n\n computes the logits. It is designed to be used during prediction.\n It traverses all the trees and calculates the final score for each instance.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n bucketized_features: A list of at least 1 `Tensor` objects with type `int32`.\n A list of rank 1 Tensors containing bucket id for each\n feature.\n logits_dimension: An `int`.\n scalar, dimension of the logits, to be used for partial logits\n shape.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (not isinstance(bucketized_features, (list, tuple))):
raise TypeError(("Expected list for 'bucketized_features' argument to 'boosted_trees_predict' Op, not %r." % bucketized_features))
_attr_num_bucketized_features = len(bucketized_features)
logits_dimension = _execute.make_int(logits_dimension, 'logits_dimension')
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesPredict', tree_ensemble_handle=tree_ensemble_handle, bucketized_features=bucketized_features, logits_dimension=logits_dimension, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('num_bucketized_features', _op.get_attr('num_bucketized_features'), 'logits_dimension', _op.get_attr('logits_dimension'))
_execute.record_gradient('BoostedTreesPredict', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesPredict', name, _ctx._post_execution_callbacks, tree_ensemble_handle, bucketized_features, 'logits_dimension', logits_dimension)
return _result
except _core._FallbackException:
return boosted_trees_predict_eager_fallback(tree_ensemble_handle, bucketized_features, logits_dimension=logits_dimension, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| 2,189,912,239,693,258,200
|
Runs multiple additive regression ensemble predictors on input instances and
computes the logits. It is designed to be used during prediction.
It traverses all the trees and calculates the final score for each instance.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
bucketized_features: A list of at least 1 `Tensor` objects with type `int32`.
A list of rank 1 Tensors containing bucket id for each
feature.
logits_dimension: An `int`.
scalar, dimension of the logits, to be used for partial logits
shape.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_predict
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_predict(tree_ensemble_handle, bucketized_features, logits_dimension, name=None):
'Runs multiple additive regression ensemble predictors on input instances and\n\n computes the logits. It is designed to be used during prediction.\n It traverses all the trees and calculates the final score for each instance.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n bucketized_features: A list of at least 1 `Tensor` objects with type `int32`.\n A list of rank 1 Tensors containing bucket id for each\n feature.\n logits_dimension: An `int`.\n scalar, dimension of the logits, to be used for partial logits\n shape.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (not isinstance(bucketized_features, (list, tuple))):
raise TypeError(("Expected list for 'bucketized_features' argument to 'boosted_trees_predict' Op, not %r." % bucketized_features))
_attr_num_bucketized_features = len(bucketized_features)
logits_dimension = _execute.make_int(logits_dimension, 'logits_dimension')
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesPredict', tree_ensemble_handle=tree_ensemble_handle, bucketized_features=bucketized_features, logits_dimension=logits_dimension, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('num_bucketized_features', _op.get_attr('num_bucketized_features'), 'logits_dimension', _op.get_attr('logits_dimension'))
_execute.record_gradient('BoostedTreesPredict', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesPredict', name, _ctx._post_execution_callbacks, tree_ensemble_handle, bucketized_features, 'logits_dimension', logits_dimension)
return _result
except _core._FallbackException:
return boosted_trees_predict_eager_fallback(tree_ensemble_handle, bucketized_features, logits_dimension=logits_dimension, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
|
def boosted_trees_predict_eager_fallback(tree_ensemble_handle, bucketized_features, logits_dimension, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_predict\n '
_ctx = (ctx if ctx else _context.context())
if (not isinstance(bucketized_features, (list, tuple))):
raise TypeError(("Expected list for 'bucketized_features' argument to 'boosted_trees_predict' Op, not %r." % bucketized_features))
_attr_num_bucketized_features = len(bucketized_features)
logits_dimension = _execute.make_int(logits_dimension, 'logits_dimension')
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
bucketized_features = _ops.convert_n_to_tensor(bucketized_features, _dtypes.int32)
_inputs_flat = ([tree_ensemble_handle] + list(bucketized_features))
_attrs = ('num_bucketized_features', _attr_num_bucketized_features, 'logits_dimension', logits_dimension)
_result = _execute.execute(b'BoostedTreesPredict', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BoostedTreesPredict', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
| -6,167,013,392,166,292,000
|
This is the slowpath function for Eager mode.
This is for function boosted_trees_predict
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_predict_eager_fallback
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_predict_eager_fallback(tree_ensemble_handle, bucketized_features, logits_dimension, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_predict\n '
_ctx = (ctx if ctx else _context.context())
if (not isinstance(bucketized_features, (list, tuple))):
raise TypeError(("Expected list for 'bucketized_features' argument to 'boosted_trees_predict' Op, not %r." % bucketized_features))
_attr_num_bucketized_features = len(bucketized_features)
logits_dimension = _execute.make_int(logits_dimension, 'logits_dimension')
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
bucketized_features = _ops.convert_n_to_tensor(bucketized_features, _dtypes.int32)
_inputs_flat = ([tree_ensemble_handle] + list(bucketized_features))
_attrs = ('num_bucketized_features', _attr_num_bucketized_features, 'logits_dimension', logits_dimension)
_result = _execute.execute(b'BoostedTreesPredict', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BoostedTreesPredict', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
|
def boosted_trees_serialize_ensemble(tree_ensemble_handle, name=None):
'Serializes the tree ensemble to a proto.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the tree ensemble.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (stamp_token, tree_ensemble_serialized).\n\n stamp_token: A `Tensor` of type `int64`.\n tree_ensemble_serialized: A `Tensor` of type `string`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesSerializeEnsemble', tree_ensemble_handle=tree_ensemble_handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient('BoostedTreesSerializeEnsemble', _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesSerializeEnsembleOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesSerializeEnsemble', name, _ctx._post_execution_callbacks, tree_ensemble_handle)
_result = _BoostedTreesSerializeEnsembleOutput._make(_result)
return _result
except _core._FallbackException:
return boosted_trees_serialize_ensemble_eager_fallback(tree_ensemble_handle, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| 5,663,834,042,271,118,000
|
Serializes the tree ensemble to a proto.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
Handle to the tree ensemble.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (stamp_token, tree_ensemble_serialized).
stamp_token: A `Tensor` of type `int64`.
tree_ensemble_serialized: A `Tensor` of type `string`.
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_serialize_ensemble
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_serialize_ensemble(tree_ensemble_handle, name=None):
'Serializes the tree ensemble to a proto.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the tree ensemble.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (stamp_token, tree_ensemble_serialized).\n\n stamp_token: A `Tensor` of type `int64`.\n tree_ensemble_serialized: A `Tensor` of type `string`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesSerializeEnsemble', tree_ensemble_handle=tree_ensemble_handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient('BoostedTreesSerializeEnsemble', _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesSerializeEnsembleOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesSerializeEnsemble', name, _ctx._post_execution_callbacks, tree_ensemble_handle)
_result = _BoostedTreesSerializeEnsembleOutput._make(_result)
return _result
except _core._FallbackException:
return boosted_trees_serialize_ensemble_eager_fallback(tree_ensemble_handle, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
|
def boosted_trees_serialize_ensemble_eager_fallback(tree_ensemble_handle, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_serialize_ensemble\n '
_ctx = (ctx if ctx else _context.context())
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
_inputs_flat = [tree_ensemble_handle]
_attrs = None
_result = _execute.execute(b'BoostedTreesSerializeEnsemble', 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BoostedTreesSerializeEnsemble', _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesSerializeEnsembleOutput._make(_result)
return _result
| -4,808,437,526,666,825,000
|
This is the slowpath function for Eager mode.
This is for function boosted_trees_serialize_ensemble
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_serialize_ensemble_eager_fallback
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_serialize_ensemble_eager_fallback(tree_ensemble_handle, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_serialize_ensemble\n '
_ctx = (ctx if ctx else _context.context())
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
_inputs_flat = [tree_ensemble_handle]
_attrs = None
_result = _execute.execute(b'BoostedTreesSerializeEnsemble', 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BoostedTreesSerializeEnsemble', _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesSerializeEnsembleOutput._make(_result)
return _result
|
def boosted_trees_training_predict(tree_ensemble_handle, cached_tree_ids, cached_node_ids, bucketized_features, logits_dimension, name=None):
'Runs multiple additive regression ensemble predictors on input instances and\n\n computes the update to cached logits. It is designed to be used during training.\n It traverses the trees starting from cached tree id and cached node id and\n calculates the updates to be pushed to the cache.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n cached_tree_ids: A `Tensor` of type `int32`.\n Rank 1 Tensor containing cached tree ids which is the starting\n tree of prediction.\n cached_node_ids: A `Tensor` of type `int32`.\n Rank 1 Tensor containing cached node id which is the starting\n node of prediction.\n bucketized_features: A list of at least 1 `Tensor` objects with type `int32`.\n A list of rank 1 Tensors containing bucket id for each\n feature.\n logits_dimension: An `int`.\n scalar, dimension of the logits, to be used for partial logits\n shape.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (partial_logits, tree_ids, node_ids).\n\n partial_logits: A `Tensor` of type `float32`.\n tree_ids: A `Tensor` of type `int32`.\n node_ids: A `Tensor` of type `int32`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (not isinstance(bucketized_features, (list, tuple))):
raise TypeError(("Expected list for 'bucketized_features' argument to 'boosted_trees_training_predict' Op, not %r." % bucketized_features))
_attr_num_bucketized_features = len(bucketized_features)
logits_dimension = _execute.make_int(logits_dimension, 'logits_dimension')
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesTrainingPredict', tree_ensemble_handle=tree_ensemble_handle, cached_tree_ids=cached_tree_ids, cached_node_ids=cached_node_ids, bucketized_features=bucketized_features, logits_dimension=logits_dimension, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('num_bucketized_features', _op.get_attr('num_bucketized_features'), 'logits_dimension', _op.get_attr('logits_dimension'))
_execute.record_gradient('BoostedTreesTrainingPredict', _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesTrainingPredictOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesTrainingPredict', name, _ctx._post_execution_callbacks, tree_ensemble_handle, cached_tree_ids, cached_node_ids, bucketized_features, 'logits_dimension', logits_dimension)
_result = _BoostedTreesTrainingPredictOutput._make(_result)
return _result
except _core._FallbackException:
return boosted_trees_training_predict_eager_fallback(tree_ensemble_handle, cached_tree_ids, cached_node_ids, bucketized_features, logits_dimension=logits_dimension, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| 4,996,428,193,543,894,000
|
Runs multiple additive regression ensemble predictors on input instances and
computes the update to cached logits. It is designed to be used during training.
It traverses the trees starting from cached tree id and cached node id and
calculates the updates to be pushed to the cache.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
cached_tree_ids: A `Tensor` of type `int32`.
Rank 1 Tensor containing cached tree ids which is the starting
tree of prediction.
cached_node_ids: A `Tensor` of type `int32`.
Rank 1 Tensor containing cached node id which is the starting
node of prediction.
bucketized_features: A list of at least 1 `Tensor` objects with type `int32`.
A list of rank 1 Tensors containing bucket id for each
feature.
logits_dimension: An `int`.
scalar, dimension of the logits, to be used for partial logits
shape.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (partial_logits, tree_ids, node_ids).
partial_logits: A `Tensor` of type `float32`.
tree_ids: A `Tensor` of type `int32`.
node_ids: A `Tensor` of type `int32`.
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_training_predict
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_training_predict(tree_ensemble_handle, cached_tree_ids, cached_node_ids, bucketized_features, logits_dimension, name=None):
'Runs multiple additive regression ensemble predictors on input instances and\n\n computes the update to cached logits. It is designed to be used during training.\n It traverses the trees starting from cached tree id and cached node id and\n calculates the updates to be pushed to the cache.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n cached_tree_ids: A `Tensor` of type `int32`.\n Rank 1 Tensor containing cached tree ids which is the starting\n tree of prediction.\n cached_node_ids: A `Tensor` of type `int32`.\n Rank 1 Tensor containing cached node id which is the starting\n node of prediction.\n bucketized_features: A list of at least 1 `Tensor` objects with type `int32`.\n A list of rank 1 Tensors containing bucket id for each\n feature.\n logits_dimension: An `int`.\n scalar, dimension of the logits, to be used for partial logits\n shape.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (partial_logits, tree_ids, node_ids).\n\n partial_logits: A `Tensor` of type `float32`.\n tree_ids: A `Tensor` of type `int32`.\n node_ids: A `Tensor` of type `int32`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (not isinstance(bucketized_features, (list, tuple))):
raise TypeError(("Expected list for 'bucketized_features' argument to 'boosted_trees_training_predict' Op, not %r." % bucketized_features))
_attr_num_bucketized_features = len(bucketized_features)
logits_dimension = _execute.make_int(logits_dimension, 'logits_dimension')
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesTrainingPredict', tree_ensemble_handle=tree_ensemble_handle, cached_tree_ids=cached_tree_ids, cached_node_ids=cached_node_ids, bucketized_features=bucketized_features, logits_dimension=logits_dimension, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('num_bucketized_features', _op.get_attr('num_bucketized_features'), 'logits_dimension', _op.get_attr('logits_dimension'))
_execute.record_gradient('BoostedTreesTrainingPredict', _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesTrainingPredictOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesTrainingPredict', name, _ctx._post_execution_callbacks, tree_ensemble_handle, cached_tree_ids, cached_node_ids, bucketized_features, 'logits_dimension', logits_dimension)
_result = _BoostedTreesTrainingPredictOutput._make(_result)
return _result
except _core._FallbackException:
return boosted_trees_training_predict_eager_fallback(tree_ensemble_handle, cached_tree_ids, cached_node_ids, bucketized_features, logits_dimension=logits_dimension, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
|
def boosted_trees_training_predict_eager_fallback(tree_ensemble_handle, cached_tree_ids, cached_node_ids, bucketized_features, logits_dimension, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_training_predict\n '
_ctx = (ctx if ctx else _context.context())
if (not isinstance(bucketized_features, (list, tuple))):
raise TypeError(("Expected list for 'bucketized_features' argument to 'boosted_trees_training_predict' Op, not %r." % bucketized_features))
_attr_num_bucketized_features = len(bucketized_features)
logits_dimension = _execute.make_int(logits_dimension, 'logits_dimension')
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
cached_tree_ids = _ops.convert_to_tensor(cached_tree_ids, _dtypes.int32)
cached_node_ids = _ops.convert_to_tensor(cached_node_ids, _dtypes.int32)
bucketized_features = _ops.convert_n_to_tensor(bucketized_features, _dtypes.int32)
_inputs_flat = ([tree_ensemble_handle, cached_tree_ids, cached_node_ids] + list(bucketized_features))
_attrs = ('num_bucketized_features', _attr_num_bucketized_features, 'logits_dimension', logits_dimension)
_result = _execute.execute(b'BoostedTreesTrainingPredict', 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BoostedTreesTrainingPredict', _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesTrainingPredictOutput._make(_result)
return _result
| -5,873,690,754,351,361,000
|
This is the slowpath function for Eager mode.
This is for function boosted_trees_training_predict
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_training_predict_eager_fallback
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_training_predict_eager_fallback(tree_ensemble_handle, cached_tree_ids, cached_node_ids, bucketized_features, logits_dimension, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_training_predict\n '
_ctx = (ctx if ctx else _context.context())
if (not isinstance(bucketized_features, (list, tuple))):
raise TypeError(("Expected list for 'bucketized_features' argument to 'boosted_trees_training_predict' Op, not %r." % bucketized_features))
_attr_num_bucketized_features = len(bucketized_features)
logits_dimension = _execute.make_int(logits_dimension, 'logits_dimension')
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
cached_tree_ids = _ops.convert_to_tensor(cached_tree_ids, _dtypes.int32)
cached_node_ids = _ops.convert_to_tensor(cached_node_ids, _dtypes.int32)
bucketized_features = _ops.convert_n_to_tensor(bucketized_features, _dtypes.int32)
_inputs_flat = ([tree_ensemble_handle, cached_tree_ids, cached_node_ids] + list(bucketized_features))
_attrs = ('num_bucketized_features', _attr_num_bucketized_features, 'logits_dimension', logits_dimension)
_result = _execute.execute(b'BoostedTreesTrainingPredict', 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('BoostedTreesTrainingPredict', _inputs_flat, _attrs, _result, name)
_result = _BoostedTreesTrainingPredictOutput._make(_result)
return _result
|
def boosted_trees_update_ensemble(tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, left_node_contribs, right_node_contribs, max_depth, learning_rate, pruning_mode, name=None):
"Updates the tree ensemble by either adding a layer to the last tree being grown\n\n or by starting a new tree.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the ensemble variable.\n feature_ids: A `Tensor` of type `int32`.\n Rank 1 tensor with ids for each feature. This is the real id of\n the feature that will be used in the split.\n node_ids: A list of `Tensor` objects with type `int32`.\n List of rank 1 tensors representing the nodes for which this feature\n has a split.\n gains: A list with the same length as `node_ids` of `Tensor` objects with type `float32`.\n List of rank 1 tensors representing the gains for each of the feature's\n split.\n thresholds: A list with the same length as `node_ids` of `Tensor` objects with type `int32`.\n List of rank 1 tensors representing the thesholds for each of the\n feature's split.\n left_node_contribs: A list with the same length as `node_ids` of `Tensor` objects with type `float32`.\n List of rank 2 tensors with left leaf contribs for each of\n the feature's splits. Will be added to the previous node values to constitute\n the values of the left nodes.\n right_node_contribs: A list with the same length as `node_ids` of `Tensor` objects with type `float32`.\n List of rank 2 tensors with right leaf contribs for each\n of the feature's splits. Will be added to the previous node values to constitute\n the values of the right nodes.\n max_depth: A `Tensor` of type `int32`. Max depth of the tree to build.\n learning_rate: A `Tensor` of type `float32`.\n shrinkage const for each new tree.\n pruning_mode: An `int` that is `>= 0`.\n 0-No pruning, 1-Pre-pruning, 2-Post-pruning.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n "
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (not isinstance(node_ids, (list, tuple))):
raise TypeError(("Expected list for 'node_ids' argument to 'boosted_trees_update_ensemble' Op, not %r." % node_ids))
_attr_num_features = len(node_ids)
if (not isinstance(gains, (list, tuple))):
raise TypeError(("Expected list for 'gains' argument to 'boosted_trees_update_ensemble' Op, not %r." % gains))
if (len(gains) != _attr_num_features):
raise ValueError(("List argument 'gains' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(gains), _attr_num_features)))
if (not isinstance(thresholds, (list, tuple))):
raise TypeError(("Expected list for 'thresholds' argument to 'boosted_trees_update_ensemble' Op, not %r." % thresholds))
if (len(thresholds) != _attr_num_features):
raise ValueError(("List argument 'thresholds' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(thresholds), _attr_num_features)))
if (not isinstance(left_node_contribs, (list, tuple))):
raise TypeError(("Expected list for 'left_node_contribs' argument to 'boosted_trees_update_ensemble' Op, not %r." % left_node_contribs))
if (len(left_node_contribs) != _attr_num_features):
raise ValueError(("List argument 'left_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(left_node_contribs), _attr_num_features)))
if (not isinstance(right_node_contribs, (list, tuple))):
raise TypeError(("Expected list for 'right_node_contribs' argument to 'boosted_trees_update_ensemble' Op, not %r." % right_node_contribs))
if (len(right_node_contribs) != _attr_num_features):
raise ValueError(("List argument 'right_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(right_node_contribs), _attr_num_features)))
pruning_mode = _execute.make_int(pruning_mode, 'pruning_mode')
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesUpdateEnsemble', tree_ensemble_handle=tree_ensemble_handle, feature_ids=feature_ids, node_ids=node_ids, gains=gains, thresholds=thresholds, left_node_contribs=left_node_contribs, right_node_contribs=right_node_contribs, max_depth=max_depth, learning_rate=learning_rate, pruning_mode=pruning_mode, name=name)
return _op
_result = None
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesUpdateEnsemble', name, _ctx._post_execution_callbacks, tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, left_node_contribs, right_node_contribs, max_depth, learning_rate, 'pruning_mode', pruning_mode)
return _result
except _core._FallbackException:
return boosted_trees_update_ensemble_eager_fallback(tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, left_node_contribs, right_node_contribs, max_depth, learning_rate, pruning_mode=pruning_mode, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| -1,718,888,198,187,871,200
|
Updates the tree ensemble by either adding a layer to the last tree being grown
or by starting a new tree.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
Handle to the ensemble variable.
feature_ids: A `Tensor` of type `int32`.
Rank 1 tensor with ids for each feature. This is the real id of
the feature that will be used in the split.
node_ids: A list of `Tensor` objects with type `int32`.
List of rank 1 tensors representing the nodes for which this feature
has a split.
gains: A list with the same length as `node_ids` of `Tensor` objects with type `float32`.
List of rank 1 tensors representing the gains for each of the feature's
split.
thresholds: A list with the same length as `node_ids` of `Tensor` objects with type `int32`.
List of rank 1 tensors representing the thesholds for each of the
feature's split.
left_node_contribs: A list with the same length as `node_ids` of `Tensor` objects with type `float32`.
List of rank 2 tensors with left leaf contribs for each of
the feature's splits. Will be added to the previous node values to constitute
the values of the left nodes.
right_node_contribs: A list with the same length as `node_ids` of `Tensor` objects with type `float32`.
List of rank 2 tensors with right leaf contribs for each
of the feature's splits. Will be added to the previous node values to constitute
the values of the right nodes.
max_depth: A `Tensor` of type `int32`. Max depth of the tree to build.
learning_rate: A `Tensor` of type `float32`.
shrinkage const for each new tree.
pruning_mode: An `int` that is `>= 0`.
0-No pruning, 1-Pre-pruning, 2-Post-pruning.
name: A name for the operation (optional).
Returns:
The created Operation.
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_update_ensemble
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_update_ensemble(tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, left_node_contribs, right_node_contribs, max_depth, learning_rate, pruning_mode, name=None):
"Updates the tree ensemble by either adding a layer to the last tree being grown\n\n or by starting a new tree.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the ensemble variable.\n feature_ids: A `Tensor` of type `int32`.\n Rank 1 tensor with ids for each feature. This is the real id of\n the feature that will be used in the split.\n node_ids: A list of `Tensor` objects with type `int32`.\n List of rank 1 tensors representing the nodes for which this feature\n has a split.\n gains: A list with the same length as `node_ids` of `Tensor` objects with type `float32`.\n List of rank 1 tensors representing the gains for each of the feature's\n split.\n thresholds: A list with the same length as `node_ids` of `Tensor` objects with type `int32`.\n List of rank 1 tensors representing the thesholds for each of the\n feature's split.\n left_node_contribs: A list with the same length as `node_ids` of `Tensor` objects with type `float32`.\n List of rank 2 tensors with left leaf contribs for each of\n the feature's splits. Will be added to the previous node values to constitute\n the values of the left nodes.\n right_node_contribs: A list with the same length as `node_ids` of `Tensor` objects with type `float32`.\n List of rank 2 tensors with right leaf contribs for each\n of the feature's splits. Will be added to the previous node values to constitute\n the values of the right nodes.\n max_depth: A `Tensor` of type `int32`. Max depth of the tree to build.\n learning_rate: A `Tensor` of type `float32`.\n shrinkage const for each new tree.\n pruning_mode: An `int` that is `>= 0`.\n 0-No pruning, 1-Pre-pruning, 2-Post-pruning.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n "
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
if (not isinstance(node_ids, (list, tuple))):
raise TypeError(("Expected list for 'node_ids' argument to 'boosted_trees_update_ensemble' Op, not %r." % node_ids))
_attr_num_features = len(node_ids)
if (not isinstance(gains, (list, tuple))):
raise TypeError(("Expected list for 'gains' argument to 'boosted_trees_update_ensemble' Op, not %r." % gains))
if (len(gains) != _attr_num_features):
raise ValueError(("List argument 'gains' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(gains), _attr_num_features)))
if (not isinstance(thresholds, (list, tuple))):
raise TypeError(("Expected list for 'thresholds' argument to 'boosted_trees_update_ensemble' Op, not %r." % thresholds))
if (len(thresholds) != _attr_num_features):
raise ValueError(("List argument 'thresholds' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(thresholds), _attr_num_features)))
if (not isinstance(left_node_contribs, (list, tuple))):
raise TypeError(("Expected list for 'left_node_contribs' argument to 'boosted_trees_update_ensemble' Op, not %r." % left_node_contribs))
if (len(left_node_contribs) != _attr_num_features):
raise ValueError(("List argument 'left_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(left_node_contribs), _attr_num_features)))
if (not isinstance(right_node_contribs, (list, tuple))):
raise TypeError(("Expected list for 'right_node_contribs' argument to 'boosted_trees_update_ensemble' Op, not %r." % right_node_contribs))
if (len(right_node_contribs) != _attr_num_features):
raise ValueError(("List argument 'right_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(right_node_contribs), _attr_num_features)))
pruning_mode = _execute.make_int(pruning_mode, 'pruning_mode')
(_, _, _op) = _op_def_lib._apply_op_helper('BoostedTreesUpdateEnsemble', tree_ensemble_handle=tree_ensemble_handle, feature_ids=feature_ids, node_ids=node_ids, gains=gains, thresholds=thresholds, left_node_contribs=left_node_contribs, right_node_contribs=right_node_contribs, max_depth=max_depth, learning_rate=learning_rate, pruning_mode=pruning_mode, name=name)
return _op
_result = None
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'BoostedTreesUpdateEnsemble', name, _ctx._post_execution_callbacks, tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, left_node_contribs, right_node_contribs, max_depth, learning_rate, 'pruning_mode', pruning_mode)
return _result
except _core._FallbackException:
return boosted_trees_update_ensemble_eager_fallback(tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, left_node_contribs, right_node_contribs, max_depth, learning_rate, pruning_mode=pruning_mode, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
|
def boosted_trees_update_ensemble_eager_fallback(tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, left_node_contribs, right_node_contribs, max_depth, learning_rate, pruning_mode, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_update_ensemble\n '
_ctx = (ctx if ctx else _context.context())
if (not isinstance(node_ids, (list, tuple))):
raise TypeError(("Expected list for 'node_ids' argument to 'boosted_trees_update_ensemble' Op, not %r." % node_ids))
_attr_num_features = len(node_ids)
if (not isinstance(gains, (list, tuple))):
raise TypeError(("Expected list for 'gains' argument to 'boosted_trees_update_ensemble' Op, not %r." % gains))
if (len(gains) != _attr_num_features):
raise ValueError(("List argument 'gains' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(gains), _attr_num_features)))
if (not isinstance(thresholds, (list, tuple))):
raise TypeError(("Expected list for 'thresholds' argument to 'boosted_trees_update_ensemble' Op, not %r." % thresholds))
if (len(thresholds) != _attr_num_features):
raise ValueError(("List argument 'thresholds' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(thresholds), _attr_num_features)))
if (not isinstance(left_node_contribs, (list, tuple))):
raise TypeError(("Expected list for 'left_node_contribs' argument to 'boosted_trees_update_ensemble' Op, not %r." % left_node_contribs))
if (len(left_node_contribs) != _attr_num_features):
raise ValueError(("List argument 'left_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(left_node_contribs), _attr_num_features)))
if (not isinstance(right_node_contribs, (list, tuple))):
raise TypeError(("Expected list for 'right_node_contribs' argument to 'boosted_trees_update_ensemble' Op, not %r." % right_node_contribs))
if (len(right_node_contribs) != _attr_num_features):
raise ValueError(("List argument 'right_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(right_node_contribs), _attr_num_features)))
pruning_mode = _execute.make_int(pruning_mode, 'pruning_mode')
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
feature_ids = _ops.convert_to_tensor(feature_ids, _dtypes.int32)
node_ids = _ops.convert_n_to_tensor(node_ids, _dtypes.int32)
gains = _ops.convert_n_to_tensor(gains, _dtypes.float32)
thresholds = _ops.convert_n_to_tensor(thresholds, _dtypes.int32)
left_node_contribs = _ops.convert_n_to_tensor(left_node_contribs, _dtypes.float32)
right_node_contribs = _ops.convert_n_to_tensor(right_node_contribs, _dtypes.float32)
max_depth = _ops.convert_to_tensor(max_depth, _dtypes.int32)
learning_rate = _ops.convert_to_tensor(learning_rate, _dtypes.float32)
_inputs_flat = (((((([tree_ensemble_handle, feature_ids] + list(node_ids)) + list(gains)) + list(thresholds)) + list(left_node_contribs)) + list(right_node_contribs)) + [max_depth, learning_rate])
_attrs = ('pruning_mode', pruning_mode, 'num_features', _attr_num_features)
_result = _execute.execute(b'BoostedTreesUpdateEnsemble', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
| 5,264,325,416,892,002,000
|
This is the slowpath function for Eager mode.
This is for function boosted_trees_update_ensemble
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
boosted_trees_update_ensemble_eager_fallback
|
Con-Mi/lambda-packs
|
python
|
def boosted_trees_update_ensemble_eager_fallback(tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, left_node_contribs, right_node_contribs, max_depth, learning_rate, pruning_mode, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function boosted_trees_update_ensemble\n '
_ctx = (ctx if ctx else _context.context())
if (not isinstance(node_ids, (list, tuple))):
raise TypeError(("Expected list for 'node_ids' argument to 'boosted_trees_update_ensemble' Op, not %r." % node_ids))
_attr_num_features = len(node_ids)
if (not isinstance(gains, (list, tuple))):
raise TypeError(("Expected list for 'gains' argument to 'boosted_trees_update_ensemble' Op, not %r." % gains))
if (len(gains) != _attr_num_features):
raise ValueError(("List argument 'gains' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(gains), _attr_num_features)))
if (not isinstance(thresholds, (list, tuple))):
raise TypeError(("Expected list for 'thresholds' argument to 'boosted_trees_update_ensemble' Op, not %r." % thresholds))
if (len(thresholds) != _attr_num_features):
raise ValueError(("List argument 'thresholds' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(thresholds), _attr_num_features)))
if (not isinstance(left_node_contribs, (list, tuple))):
raise TypeError(("Expected list for 'left_node_contribs' argument to 'boosted_trees_update_ensemble' Op, not %r." % left_node_contribs))
if (len(left_node_contribs) != _attr_num_features):
raise ValueError(("List argument 'left_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(left_node_contribs), _attr_num_features)))
if (not isinstance(right_node_contribs, (list, tuple))):
raise TypeError(("Expected list for 'right_node_contribs' argument to 'boosted_trees_update_ensemble' Op, not %r." % right_node_contribs))
if (len(right_node_contribs) != _attr_num_features):
raise ValueError(("List argument 'right_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d must match length %d of argument 'node_ids'." % (len(right_node_contribs), _attr_num_features)))
pruning_mode = _execute.make_int(pruning_mode, 'pruning_mode')
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
feature_ids = _ops.convert_to_tensor(feature_ids, _dtypes.int32)
node_ids = _ops.convert_n_to_tensor(node_ids, _dtypes.int32)
gains = _ops.convert_n_to_tensor(gains, _dtypes.float32)
thresholds = _ops.convert_n_to_tensor(thresholds, _dtypes.int32)
left_node_contribs = _ops.convert_n_to_tensor(left_node_contribs, _dtypes.float32)
right_node_contribs = _ops.convert_n_to_tensor(right_node_contribs, _dtypes.float32)
max_depth = _ops.convert_to_tensor(max_depth, _dtypes.int32)
learning_rate = _ops.convert_to_tensor(learning_rate, _dtypes.float32)
_inputs_flat = (((((([tree_ensemble_handle, feature_ids] + list(node_ids)) + list(gains)) + list(thresholds)) + list(left_node_contribs)) + list(right_node_contribs)) + [max_depth, learning_rate])
_attrs = ('pruning_mode', pruning_mode, 'num_features', _attr_num_features)
_result = _execute.execute(b'BoostedTreesUpdateEnsemble', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_result = None
return _result
|
def is_boosted_trees_ensemble_initialized(tree_ensemble_handle, name=None):
'Checks whether a tree ensemble has been initialized.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the tree ensemble resouce.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `bool`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
(_, _, _op) = _op_def_lib._apply_op_helper('IsBoostedTreesEnsembleInitialized', tree_ensemble_handle=tree_ensemble_handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient('IsBoostedTreesEnsembleInitialized', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'IsBoostedTreesEnsembleInitialized', name, _ctx._post_execution_callbacks, tree_ensemble_handle)
return _result
except _core._FallbackException:
return is_boosted_trees_ensemble_initialized_eager_fallback(tree_ensemble_handle, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
| -7,141,706,510,654,712,000
|
Checks whether a tree ensemble has been initialized.
Args:
tree_ensemble_handle: A `Tensor` of type `resource`.
Handle to the tree ensemble resouce.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
is_boosted_trees_ensemble_initialized
|
Con-Mi/lambda-packs
|
python
|
def is_boosted_trees_ensemble_initialized(tree_ensemble_handle, name=None):
'Checks whether a tree ensemble has been initialized.\n\n Args:\n tree_ensemble_handle: A `Tensor` of type `resource`.\n Handle to the tree ensemble resouce.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `bool`.\n '
_ctx = _context._context
if ((_ctx is None) or (not _ctx._eager_context.is_eager)):
(_, _, _op) = _op_def_lib._apply_op_helper('IsBoostedTreesEnsembleInitialized', tree_ensemble_handle=tree_ensemble_handle, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient('IsBoostedTreesEnsembleInitialized', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._context_handle, _ctx._eager_context.device_name, 'IsBoostedTreesEnsembleInitialized', name, _ctx._post_execution_callbacks, tree_ensemble_handle)
return _result
except _core._FallbackException:
return is_boosted_trees_ensemble_initialized_eager_fallback(tree_ensemble_handle, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
|
def is_boosted_trees_ensemble_initialized_eager_fallback(tree_ensemble_handle, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function is_boosted_trees_ensemble_initialized\n '
_ctx = (ctx if ctx else _context.context())
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
_inputs_flat = [tree_ensemble_handle]
_attrs = None
_result = _execute.execute(b'IsBoostedTreesEnsembleInitialized', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('IsBoostedTreesEnsembleInitialized', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
| -4,720,951,345,601,357,000
|
This is the slowpath function for Eager mode.
This is for function is_boosted_trees_ensemble_initialized
|
Keras_tensorflow_nightly/source2.7/tensorflow/python/ops/gen_boosted_trees_ops.py
|
is_boosted_trees_ensemble_initialized_eager_fallback
|
Con-Mi/lambda-packs
|
python
|
def is_boosted_trees_ensemble_initialized_eager_fallback(tree_ensemble_handle, name=None, ctx=None):
'This is the slowpath function for Eager mode.\n This is for function is_boosted_trees_ensemble_initialized\n '
_ctx = (ctx if ctx else _context.context())
tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
_inputs_flat = [tree_ensemble_handle]
_attrs = None
_result = _execute.execute(b'IsBoostedTreesEnsembleInitialized', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('IsBoostedTreesEnsembleInitialized', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
|
def get(name='default', tool=None):
'Load a profile by name. If tool is specified, the specs are\n searched to the tool and if found, the specs are applied.\n '
s = name.split(' ')
p = Profile()
for ss in s:
tup = ss.split('=')
if (len(tup) == 1):
l = Profile(profile=tup[0])
p.update(l)
else:
spec = p.specs.get(tup[0], Profile())
spec.update(Profile(profile=tup[1]))
p.specs[tup[0]] = spec
return p
| -4,386,867,688,206,611,000
|
Load a profile by name. If tool is specified, the specs are
searched to the tool and if found, the specs are applied.
|
jip/profiles.py
|
get
|
VDBWRAIR/pyjip
|
python
|
def get(name='default', tool=None):
'Load a profile by name. If tool is specified, the specs are\n searched to the tool and if found, the specs are applied.\n '
s = name.split(' ')
p = Profile()
for ss in s:
tup = ss.split('=')
if (len(tup) == 1):
l = Profile(profile=tup[0])
p.update(l)
else:
spec = p.specs.get(tup[0], Profile())
spec.update(Profile(profile=tup[1]))
p.specs[tup[0]] = spec
return p
|
def get_specs(path=None):
'Load specs form default locations and then update from specs in given\n path if specified.\n\n :param path: optional path to an additional spec file\n '
def load_json(jf):
with open(jf) as of:
try:
data = json.load(of)
except ValueError:
log.error('Malformed json file %s', jf)
raise jip.ValidationError('jip.profiles', ('Malformed json file %s' % jf))
return data
global specs
cwd = os.path.join(os.getcwd(), 'jip.specs')
home = os.path.join(os.getenv('HOME', ''), '.jip/jip.specs')
specs = {}
if os.path.exists(home):
specs = _update(specs, load_json(home))
if os.path.exists(cwd):
specs = _update(specs, load_json(cwd))
if (path and os.path.exists(path)):
specs = _update(specs, load_json(path))
return specs
| 6,262,297,116,974,644,000
|
Load specs form default locations and then update from specs in given
path if specified.
:param path: optional path to an additional spec file
|
jip/profiles.py
|
get_specs
|
VDBWRAIR/pyjip
|
python
|
def get_specs(path=None):
'Load specs form default locations and then update from specs in given\n path if specified.\n\n :param path: optional path to an additional spec file\n '
def load_json(jf):
with open(jf) as of:
try:
data = json.load(of)
except ValueError:
log.error('Malformed json file %s', jf)
raise jip.ValidationError('jip.profiles', ('Malformed json file %s' % jf))
return data
global specs
cwd = os.path.join(os.getcwd(), 'jip.specs')
home = os.path.join(os.getenv('HOME', ), '.jip/jip.specs')
specs = {}
if os.path.exists(home):
specs = _update(specs, load_json(home))
if os.path.exists(cwd):
specs = _update(specs, load_json(cwd))
if (path and os.path.exists(path)):
specs = _update(specs, load_json(path))
return specs
|
def apply_to_pipeline(self, pipeline):
'Apply this profile to the pipeline\n\n :param pipeline: the pipeline\n :type pipeline: :class:`jip.pipeline.Pipeline`\n '
for node in pipeline.nodes():
self.apply_to_node(node)
| -2,517,469,574,172,574,000
|
Apply this profile to the pipeline
:param pipeline: the pipeline
:type pipeline: :class:`jip.pipeline.Pipeline`
|
jip/profiles.py
|
apply_to_pipeline
|
VDBWRAIR/pyjip
|
python
|
def apply_to_pipeline(self, pipeline):
'Apply this profile to the pipeline\n\n :param pipeline: the pipeline\n :type pipeline: :class:`jip.pipeline.Pipeline`\n '
for node in pipeline.nodes():
self.apply_to_node(node)
|
@property
def err(self):
'Set the jobs error log file\n\n :getter: access the jobs name\n :setter: set the jobs name\n :type: string\n '
return self.log
| -9,118,220,976,951,909,000
|
Set the jobs error log file
:getter: access the jobs name
:setter: set the jobs name
:type: string
|
jip/profiles.py
|
err
|
VDBWRAIR/pyjip
|
python
|
@property
def err(self):
'Set the jobs error log file\n\n :getter: access the jobs name\n :setter: set the jobs name\n :type: string\n '
return self.log
|
@property
def dir(self):
'Set the jobs working directory\n\n :getter: access the jobs working directory\n :setter: set the jobs working directory\n :type: string\n '
return self.working_dir
| 7,015,927,195,201,332,000
|
Set the jobs working directory
:getter: access the jobs working directory
:setter: set the jobs working directory
:type: string
|
jip/profiles.py
|
dir
|
VDBWRAIR/pyjip
|
python
|
@property
def dir(self):
'Set the jobs working directory\n\n :getter: access the jobs working directory\n :setter: set the jobs working directory\n :type: string\n '
return self.working_dir
|
@property
def name(self):
'Set the jobs name\n\n :getter: access the jobs name\n :setter: set the jobs name\n :type: string\n '
return self._name
| 5,520,016,232,128,242,000
|
Set the jobs name
:getter: access the jobs name
:setter: set the jobs name
:type: string
|
jip/profiles.py
|
name
|
VDBWRAIR/pyjip
|
python
|
@property
def name(self):
'Set the jobs name\n\n :getter: access the jobs name\n :setter: set the jobs name\n :type: string\n '
return self._name
|
def load(self, profile_name):
'Set this profiles values to the values loaded from the profile\n stored under the given name. An exception is raised if no profile of\n that name could be found.\n\n :param profile_name: the name of the profile that will be loaded\n :type profile_name: string\n '
import jip
profiles = jip.config.get('profiles', {})
if (profile_name not in profiles):
raise ValueError(('Profile %s not found!' % profile_name))
profile = profiles[profile_name]
self.threads = profile.get('threads', self.threads)
self.nodes = profile.get('nodes', self.nodes)
self.tasks = profile.get('tasks', self.tasks)
self.tasks_per_node = profile.get('tasks_per_node', self.tasks_per_node)
self.environment = profile.get('environment', self.environment)
self.time = profile.get('time', self.time)
self.queue = profile.get('queue', self.queue)
self.priority = profile.get('priority', self.priority)
self.log = profile.get('log', self.log)
self.out = profile.get('out', self.out)
self.account = profile.get('account', self.account)
self.mem = profile.get('mem', self.mem)
self.extra = profile.get('extra', self.extra)
self.env = profile.get('env', self.env)
self.description = profile.get('description', self.description)
| -6,359,191,593,366,046,000
|
Set this profiles values to the values loaded from the profile
stored under the given name. An exception is raised if no profile of
that name could be found.
:param profile_name: the name of the profile that will be loaded
:type profile_name: string
|
jip/profiles.py
|
load
|
VDBWRAIR/pyjip
|
python
|
def load(self, profile_name):
'Set this profiles values to the values loaded from the profile\n stored under the given name. An exception is raised if no profile of\n that name could be found.\n\n :param profile_name: the name of the profile that will be loaded\n :type profile_name: string\n '
import jip
profiles = jip.config.get('profiles', {})
if (profile_name not in profiles):
raise ValueError(('Profile %s not found!' % profile_name))
profile = profiles[profile_name]
self.threads = profile.get('threads', self.threads)
self.nodes = profile.get('nodes', self.nodes)
self.tasks = profile.get('tasks', self.tasks)
self.tasks_per_node = profile.get('tasks_per_node', self.tasks_per_node)
self.environment = profile.get('environment', self.environment)
self.time = profile.get('time', self.time)
self.queue = profile.get('queue', self.queue)
self.priority = profile.get('priority', self.priority)
self.log = profile.get('log', self.log)
self.out = profile.get('out', self.out)
self.account = profile.get('account', self.account)
self.mem = profile.get('mem', self.mem)
self.extra = profile.get('extra', self.extra)
self.env = profile.get('env', self.env)
self.description = profile.get('description', self.description)
|
def load_args(self, args):
'Update this profile from the given dictionary of command line\n arguments. The argument names must match the profile attributes\n '
for (k, v) in args.iteritems():
k = re.sub('^-+', '', k)
k = re.sub('-', '_', k)
if (v and hasattr(self, k)):
for single in v.split(' '):
tup = single.split('=')
if (len(tup) == 1):
setattr(self, k, single)
else:
spec_profile = self.specs.get(tup[0], Profile())
setattr(spec_profile, k, tup[1])
self.specs[tup[0]] = spec_profile
| 4,070,589,057,203,432,000
|
Update this profile from the given dictionary of command line
arguments. The argument names must match the profile attributes
|
jip/profiles.py
|
load_args
|
VDBWRAIR/pyjip
|
python
|
def load_args(self, args):
'Update this profile from the given dictionary of command line\n arguments. The argument names must match the profile attributes\n '
for (k, v) in args.iteritems():
k = re.sub('^-+', , k)
k = re.sub('-', '_', k)
if (v and hasattr(self, k)):
for single in v.split(' '):
tup = single.split('=')
if (len(tup) == 1):
setattr(self, k, single)
else:
spec_profile = self.specs.get(tup[0], Profile())
setattr(spec_profile, k, tup[1])
self.specs[tup[0]] = spec_profile
|
def apply_overwrite(self, job):
'Apply the profile and overwrite all settings that are set\n in this profile\n '
log.debug('Profiles | Overwriting job profile to %s', job)
if self.name:
job.name = self._render_job_name(job)
if self.threads:
job.threads = int(self.threads)
if (self.nodes is not None):
job.nodes = self.nodes
if (self.tasks is not None):
job.tasks = self.tasks
if (self.tasks_per_node is not None):
job.tasks_per_node = self.tasks_per_node
if (self.environment is not None):
job.environment = self.environment
if (self.queue is not None):
job.queue = self.queue
if (self.priority is not None):
job.priority = self.priority
if (self.time is not None):
job.max_time = jip.utils.parse_time(self.time)
if (self.mem is not None):
job.max_memory = jip.utils.parse_mem(self.mem)
if (self.log is not None):
job.stderr = self._render(job, self.log)
if (self.out is not None):
job.stdout = self._render(job, self.out)
if (self.account is not None):
job.account = self.account
if (self.temp is not None):
job.temp = self.temp
if (self.extra is not None):
job.extra = self.extra
if (self.working_dir is not None):
job.working_directory = os.path.abspath(self.working_dir)
if (job.stdout and (not job.stdout.startswith('/'))):
job.stdout = os.path.join(job.working_directory, job.stdout)
if (job.stderr and (not job.stderr.startswith('/'))):
job.stderr = os.path.join(job.working_directory, job.stderr)
if self.env:
current = os.environ.copy()
if job.env:
current.update(job.env)
rendered = {}
for (k, v) in self.env.iteritems():
rendered[k] = render_template(v, **current)
job.env.update(rendered)
if hasattr(job, 'pipe_to'):
for child in job.pipe_to:
self.apply_overwrite(child)
for (spec_name, spec) in self.specs.iteritems():
if fnmatch.fnmatch(job.name, spec_name):
spec.apply_overwrite(job)
| -5,163,799,074,057,687,000
|
Apply the profile and overwrite all settings that are set
in this profile
|
jip/profiles.py
|
apply_overwrite
|
VDBWRAIR/pyjip
|
python
|
def apply_overwrite(self, job):
'Apply the profile and overwrite all settings that are set\n in this profile\n '
log.debug('Profiles | Overwriting job profile to %s', job)
if self.name:
job.name = self._render_job_name(job)
if self.threads:
job.threads = int(self.threads)
if (self.nodes is not None):
job.nodes = self.nodes
if (self.tasks is not None):
job.tasks = self.tasks
if (self.tasks_per_node is not None):
job.tasks_per_node = self.tasks_per_node
if (self.environment is not None):
job.environment = self.environment
if (self.queue is not None):
job.queue = self.queue
if (self.priority is not None):
job.priority = self.priority
if (self.time is not None):
job.max_time = jip.utils.parse_time(self.time)
if (self.mem is not None):
job.max_memory = jip.utils.parse_mem(self.mem)
if (self.log is not None):
job.stderr = self._render(job, self.log)
if (self.out is not None):
job.stdout = self._render(job, self.out)
if (self.account is not None):
job.account = self.account
if (self.temp is not None):
job.temp = self.temp
if (self.extra is not None):
job.extra = self.extra
if (self.working_dir is not None):
job.working_directory = os.path.abspath(self.working_dir)
if (job.stdout and (not job.stdout.startswith('/'))):
job.stdout = os.path.join(job.working_directory, job.stdout)
if (job.stderr and (not job.stderr.startswith('/'))):
job.stderr = os.path.join(job.working_directory, job.stderr)
if self.env:
current = os.environ.copy()
if job.env:
current.update(job.env)
rendered = {}
for (k, v) in self.env.iteritems():
rendered[k] = render_template(v, **current)
job.env.update(rendered)
if hasattr(job, 'pipe_to'):
for child in job.pipe_to:
self.apply_overwrite(child)
for (spec_name, spec) in self.specs.iteritems():
if fnmatch.fnmatch(job.name, spec_name):
spec.apply_overwrite(job)
|
def apply(self, job, pipeline=False, overwrite=False):
'Apply this profile to the given job.'
log.debug('Profiles | Applying job profile to %s', job)
if overwrite:
self.apply_overwrite(job)
return
if (not pipeline):
job.name = self._render_job_name(job)
elif (self.name is not None):
log.info('Apply pipeline name to job: %s %s', job, self.name)
job.pipeline = self._render(job, self.name)
if (self.threads and (job.threads is None)):
job.threads = int(self.threads)
if ((self.nodes is not None) and (job.nodes is None)):
job.nodes = self.nodes
if ((self.tasks is not None) and (job.tasks is None)):
job.tasks = self.tasks
if ((self.tasks_per_node is not None) and (job.tasts_per_node is None)):
job.tasks_per_node = self.tasks_per_node
if ((self.environment is not None) and (job.environment is None)):
job.environment = self.environment
if ((self.queue is not None) and (job.queue is None)):
job.queue = self.queue
if ((self.priority is not None) and (job.priority is None)):
job.priority = self.priority
if ((self.time is not None) and (job.max_time is None)):
job.max_time = jip.utils.parse_time(self.time)
if (self.mem is not None):
if (job.max_memory is None):
job.max_memory = 0
job.max_memory += jip.utils.parse_mem(self.mem)
if ((self.log is not None) and (job.stderr is None)):
job.stderr = self._render(job, self.log)
if ((self.out is not None) and (job.stdout is None)):
job.stdout = self._render(job, self.out)
if ((self.account is not None) and (job.account is None)):
job.account = self.account
if ((self.temp is not None) and (job.temp is None)):
job.temp = self.temp
if ((self.extra is not None) and (job.extra is None)):
job.extra = self.extra
if ((self.working_dir is not None) and (job.working_directory is None)):
job.working_directory = os.path.abspath(self.working_dir)
if (job.stdout and (not job.stdout.startswith('/'))):
job.stdout = os.path.join(job.working_directory, job.stdout)
if (job.stderr and (not job.stderr.startswith('/'))):
job.stderr = os.path.join(job.working_directory, job.stderr)
if self.env:
current = os.environ.copy()
if job.env:
current.update(job.env)
rendered = {}
for (k, v) in self.env.iteritems():
rendered[k] = render_template(v, **current)
job.env.update(rendered)
if hasattr(job, 'pipe_to'):
for child in job.pipe_to:
self.apply(child)
| -7,001,295,115,198,891,000
|
Apply this profile to the given job.
|
jip/profiles.py
|
apply
|
VDBWRAIR/pyjip
|
python
|
def apply(self, job, pipeline=False, overwrite=False):
log.debug('Profiles | Applying job profile to %s', job)
if overwrite:
self.apply_overwrite(job)
return
if (not pipeline):
job.name = self._render_job_name(job)
elif (self.name is not None):
log.info('Apply pipeline name to job: %s %s', job, self.name)
job.pipeline = self._render(job, self.name)
if (self.threads and (job.threads is None)):
job.threads = int(self.threads)
if ((self.nodes is not None) and (job.nodes is None)):
job.nodes = self.nodes
if ((self.tasks is not None) and (job.tasks is None)):
job.tasks = self.tasks
if ((self.tasks_per_node is not None) and (job.tasts_per_node is None)):
job.tasks_per_node = self.tasks_per_node
if ((self.environment is not None) and (job.environment is None)):
job.environment = self.environment
if ((self.queue is not None) and (job.queue is None)):
job.queue = self.queue
if ((self.priority is not None) and (job.priority is None)):
job.priority = self.priority
if ((self.time is not None) and (job.max_time is None)):
job.max_time = jip.utils.parse_time(self.time)
if (self.mem is not None):
if (job.max_memory is None):
job.max_memory = 0
job.max_memory += jip.utils.parse_mem(self.mem)
if ((self.log is not None) and (job.stderr is None)):
job.stderr = self._render(job, self.log)
if ((self.out is not None) and (job.stdout is None)):
job.stdout = self._render(job, self.out)
if ((self.account is not None) and (job.account is None)):
job.account = self.account
if ((self.temp is not None) and (job.temp is None)):
job.temp = self.temp
if ((self.extra is not None) and (job.extra is None)):
job.extra = self.extra
if ((self.working_dir is not None) and (job.working_directory is None)):
job.working_directory = os.path.abspath(self.working_dir)
if (job.stdout and (not job.stdout.startswith('/'))):
job.stdout = os.path.join(job.working_directory, job.stdout)
if (job.stderr and (not job.stderr.startswith('/'))):
job.stderr = os.path.join(job.working_directory, job.stderr)
if self.env:
current = os.environ.copy()
if job.env:
current.update(job.env)
rendered = {}
for (k, v) in self.env.iteritems():
rendered[k] = render_template(v, **current)
job.env.update(rendered)
if hasattr(job, 'pipe_to'):
for child in job.pipe_to:
self.apply(child)
|
def update(self, profile, overwrite=True):
'Update this profile from a given profile. All values that are\n not None in the other profile are applied to this\n profile\n\n :param profile: the other profile\n :type profile: :class:`Profile`\n :param overwrite: if True, value will be set regardless. Otherwise, the\n new value will only be applied if the old value\n is None\n '
attrs = ['environment', 'nodes', 'threads', 'tasks', 'tasks_per_node', 'queue', 'time', 'mem', 'priority', 'log', 'out', 'account', 'prefix', 'env', 'temp', 'extra', 'working_dir']
for attr in attrs:
other = profile.__getattribute__(attr)
if ((other is not None) and (overwrite or (self.__getattribute__(attr) is None))):
setattr(self, attr, other)
| 6,928,398,125,538,205,000
|
Update this profile from a given profile. All values that are
not None in the other profile are applied to this
profile
:param profile: the other profile
:type profile: :class:`Profile`
:param overwrite: if True, value will be set regardless. Otherwise, the
new value will only be applied if the old value
is None
|
jip/profiles.py
|
update
|
VDBWRAIR/pyjip
|
python
|
def update(self, profile, overwrite=True):
'Update this profile from a given profile. All values that are\n not None in the other profile are applied to this\n profile\n\n :param profile: the other profile\n :type profile: :class:`Profile`\n :param overwrite: if True, value will be set regardless. Otherwise, the\n new value will only be applied if the old value\n is None\n '
attrs = ['environment', 'nodes', 'threads', 'tasks', 'tasks_per_node', 'queue', 'time', 'mem', 'priority', 'log', 'out', 'account', 'prefix', 'env', 'temp', 'extra', 'working_dir']
for attr in attrs:
other = profile.__getattribute__(attr)
if ((other is not None) and (overwrite or (self.__getattribute__(attr) is None))):
setattr(self, attr, other)
|
def merge(self, master):
'Merge this profile with the given master profile.\n\n Currently this merges the working directory of jobs\n\n :param master: the master profile\n '
self.working_dir = (master.working_dir if (self.working_dir is None) else self.working_dir)
| 7,669,131,453,964,874,000
|
Merge this profile with the given master profile.
Currently this merges the working directory of jobs
:param master: the master profile
|
jip/profiles.py
|
merge
|
VDBWRAIR/pyjip
|
python
|
def merge(self, master):
'Merge this profile with the given master profile.\n\n Currently this merges the working directory of jobs\n\n :param master: the master profile\n '
self.working_dir = (master.working_dir if (self.working_dir is None) else self.working_dir)
|
@classmethod
def from_job(cls, job):
'Create a profile based on a given job. All properties\n are set according to the given job, except the jobs temp state,\n which will be kept unmodified.\n\n :param job: the job\n :returns: new profile generated from the job\n '
profile = cls()
profile.threads = (job.threads if (job.threads > 0) else None)
profile.nodes = job.nodes
profile.tasks = job.tasks
profile.tasts_per_node = job.tasks_per_node
profile.environment = job.environment
profile.queue = job.queue
profile.priority = job.priority
profile.time = job.max_time
profile.mem = job.max_memory
profile.log = job.stderr
profile.out = job.stdout
profile.account = job.account
profile.extra = job.extra
profile.working_dir = job.working_directory
profile.env = job.env
return profile
| 1,635,524,039,944,568,600
|
Create a profile based on a given job. All properties
are set according to the given job, except the jobs temp state,
which will be kept unmodified.
:param job: the job
:returns: new profile generated from the job
|
jip/profiles.py
|
from_job
|
VDBWRAIR/pyjip
|
python
|
@classmethod
def from_job(cls, job):
'Create a profile based on a given job. All properties\n are set according to the given job, except the jobs temp state,\n which will be kept unmodified.\n\n :param job: the job\n :returns: new profile generated from the job\n '
profile = cls()
profile.threads = (job.threads if (job.threads > 0) else None)
profile.nodes = job.nodes
profile.tasks = job.tasks
profile.tasts_per_node = job.tasks_per_node
profile.environment = job.environment
profile.queue = job.queue
profile.priority = job.priority
profile.time = job.max_time
profile.mem = job.max_memory
profile.log = job.stderr
profile.out = job.stdout
profile.account = job.account
profile.extra = job.extra
profile.working_dir = job.working_directory
profile.env = job.env
return profile
|
@classmethod
def from_file(cls, file_name):
'Load a profile from a json file\n\n :param file_name: the name of the input file\n '
with open(file_name) as of:
try:
data = json.load(of)
except ValueError:
log.error('Malformed json file %s', file_name)
raise jip.ValidationError('jip.profiles', ('Malformed json file %s' % file_name))
return cls.from_dict(data)
| -8,192,833,256,130,372,000
|
Load a profile from a json file
:param file_name: the name of the input file
|
jip/profiles.py
|
from_file
|
VDBWRAIR/pyjip
|
python
|
@classmethod
def from_file(cls, file_name):
'Load a profile from a json file\n\n :param file_name: the name of the input file\n '
with open(file_name) as of:
try:
data = json.load(of)
except ValueError:
log.error('Malformed json file %s', file_name)
raise jip.ValidationError('jip.profiles', ('Malformed json file %s' % file_name))
return cls.from_dict(data)
|
@classmethod
def from_dict(cls, data):
'Load a profile from a dictionary'
profile = cls()
for (k, v) in data.iteritems():
if (k != 'jobs'):
profile.__setattr__(k, v)
if ('jobs' in data):
for (name, spec) in data['jobs'].iteritems():
profile.specs[name] = cls.from_dict(spec)
return profile
| 4,905,723,379,121,253,000
|
Load a profile from a dictionary
|
jip/profiles.py
|
from_dict
|
VDBWRAIR/pyjip
|
python
|
@classmethod
def from_dict(cls, data):
profile = cls()
for (k, v) in data.iteritems():
if (k != 'jobs'):
profile.__setattr__(k, v)
if ('jobs' in data):
for (name, spec) in data['jobs'].iteritems():
profile.specs[name] = cls.from_dict(spec)
return profile
|
def find_docs():
'Yields files as per the whitelist.'
loc = '../doc/source/{}.rst'
whitelist = ['about', 'installation', 'configuration', 'commands', 'running', 'logging', 'test-anatomy', 'unittests', 'contributing']
for fname in whitelist:
fpath = loc.format(fname)
if os.path.isfile(fpath):
(yield fpath)
| -4,493,309,502,573,737,500
|
Yields files as per the whitelist.
|
scripts/readme.py
|
find_docs
|
abdullahzamanbabar/syntribos
|
python
|
def find_docs():
loc = '../doc/source/{}.rst'
whitelist = ['about', 'installation', 'configuration', 'commands', 'running', 'logging', 'test-anatomy', 'unittests', 'contributing']
for fname in whitelist:
fpath = loc.format(fname)
if os.path.isfile(fpath):
(yield fpath)
|
def concat_docs():
'Concatinates files yielded by the generator `find_docs`.'
file_path = os.path.dirname(os.path.realpath(__file__))
(head, tail) = os.path.split(file_path)
outfile = (head + '/README.rst')
if (not os.path.isfile(outfile)):
print('../README.rst not found, exiting!')
exit(1)
with open(outfile, 'w') as readme_handle:
readme_handle.write(repository_tags)
for doc in find_docs():
with open(doc, 'r') as doc_handle:
for line in doc_handle:
readme_handle.write(line)
readme_handle.write('\n')
| 3,950,141,068,991,426,600
|
Concatinates files yielded by the generator `find_docs`.
|
scripts/readme.py
|
concat_docs
|
abdullahzamanbabar/syntribos
|
python
|
def concat_docs():
file_path = os.path.dirname(os.path.realpath(__file__))
(head, tail) = os.path.split(file_path)
outfile = (head + '/README.rst')
if (not os.path.isfile(outfile)):
print('../README.rst not found, exiting!')
exit(1)
with open(outfile, 'w') as readme_handle:
readme_handle.write(repository_tags)
for doc in find_docs():
with open(doc, 'r') as doc_handle:
for line in doc_handle:
readme_handle.write(line)
readme_handle.write('\n')
|
def get_data_loaders(batch_size: int, model):
'Helper method to create dataloaders for ssl, kNN train and kNN test\n\n Args:\n batch_size: Desired batch size for all dataloaders\n '
col_fn = collate_fn
if isinstance(model, SwaVModel):
col_fn = swav_collate_fn
elif isinstance(model, DINOModel):
col_fn = dino_collate_fn
dataloader_train_ssl = torch.utils.data.DataLoader(dataset_train_ssl, batch_size=batch_size, shuffle=True, collate_fn=col_fn, drop_last=True, num_workers=num_workers)
dataloader_train_kNN = torch.utils.data.DataLoader(dataset_train_kNN, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=num_workers)
dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=num_workers)
return (dataloader_train_ssl, dataloader_train_kNN, dataloader_test)
| -1,410,701,609,825,753,000
|
Helper method to create dataloaders for ssl, kNN train and kNN test
Args:
batch_size: Desired batch size for all dataloaders
|
docs/source/getting_started/benchmarks/cifar10_benchmark.py
|
get_data_loaders
|
dczifra/lightly
|
python
|
def get_data_loaders(batch_size: int, model):
'Helper method to create dataloaders for ssl, kNN train and kNN test\n\n Args:\n batch_size: Desired batch size for all dataloaders\n '
col_fn = collate_fn
if isinstance(model, SwaVModel):
col_fn = swav_collate_fn
elif isinstance(model, DINOModel):
col_fn = dino_collate_fn
dataloader_train_ssl = torch.utils.data.DataLoader(dataset_train_ssl, batch_size=batch_size, shuffle=True, collate_fn=col_fn, drop_last=True, num_workers=num_workers)
dataloader_train_kNN = torch.utils.data.DataLoader(dataset_train_kNN, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=num_workers)
dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=num_workers)
return (dataloader_train_ssl, dataloader_train_kNN, dataloader_test)
|
def setup_data(self, path):
'\n Adds additional perspectives. For example, in the conversation:\n\n x1 y1\n x2 y2\n x3\n\n Creates the additional dialog:\n\n y1 x2\n y2 x3\n '
alternate = []
for (entry, new) in super().setup_data(path):
if new:
for (i, e) in enumerate(self._rebuild(alternate)):
if self._is_valid(e):
(yield (e, (i == 0)))
alternate.clear()
alternate.append(entry)
if self._is_valid(entry):
(yield (entry, new))
if alternate:
for (i, e) in enumerate(self._rebuild(alternate)):
if self._is_valid(e):
(yield (e, (i == 0)))
| 5,024,126,366,058,978,000
|
Adds additional perspectives. For example, in the conversation:
x1 y1
x2 y2
x3
Creates the additional dialog:
y1 x2
y2 x3
|
doc/integrations/pytorch/parlai/tasks/cornell_movie/agents.py
|
setup_data
|
GuillaumeLeclerc/cortx
|
python
|
def setup_data(self, path):
'\n Adds additional perspectives. For example, in the conversation:\n\n x1 y1\n x2 y2\n x3\n\n Creates the additional dialog:\n\n y1 x2\n y2 x3\n '
alternate = []
for (entry, new) in super().setup_data(path):
if new:
for (i, e) in enumerate(self._rebuild(alternate)):
if self._is_valid(e):
(yield (e, (i == 0)))
alternate.clear()
alternate.append(entry)
if self._is_valid(entry):
(yield (entry, new))
if alternate:
for (i, e) in enumerate(self._rebuild(alternate)):
if self._is_valid(e):
(yield (e, (i == 0)))
|
def tearDown(self):
'Clean up the database, delete tables and functions. '
cursor = self.connection.cursor()
cursor.execute('\n TRUNCATE data_dictionary CASCADE\n ')
self.connection.commit()
super(IntegrationTestField, self).tearDown()
| -7,246,195,938,868,994,000
|
Clean up the database, delete tables and functions.
|
socorro/unittest/external/postgresql/test_field.py
|
tearDown
|
Acidburn0zzz/socorro
|
python
|
def tearDown(self):
' '
cursor = self.connection.cursor()
cursor.execute('\n TRUNCATE data_dictionary CASCADE\n ')
self.connection.commit()
super(IntegrationTestField, self).tearDown()
|
def _isscalar(x):
'\n Check whether x is if a scalar type, or 0-dim.\n\n Parameters\n ----------\n x : anything\n An input to be checked for scalar-ness.\n\n Returns\n -------\n is_scalar : boolean\n True if the input is a scalar, False otherwise.\n '
return (np.isscalar(x) or (hasattr(x, 'shape') and (x.shape == ())))
| -5,589,859,407,075,775,000
|
Check whether x is if a scalar type, or 0-dim.
Parameters
----------
x : anything
An input to be checked for scalar-ness.
Returns
-------
is_scalar : boolean
True if the input is a scalar, False otherwise.
|
HARK/interpolation.py
|
_isscalar
|
cohenimhuji/HARK
|
python
|
def _isscalar(x):
'\n Check whether x is if a scalar type, or 0-dim.\n\n Parameters\n ----------\n x : anything\n An input to be checked for scalar-ness.\n\n Returns\n -------\n is_scalar : boolean\n True if the input is a scalar, False otherwise.\n '
return (np.isscalar(x) or (hasattr(x, 'shape') and (x.shape == ())))
|
def calcLogSumChoiceProbs(Vals, sigma):
'\n Returns the final optimal value and choice probabilities given the choice\n specific value functions `Vals`. Probabilities are degenerate if sigma == 0.0.\n Parameters\n ----------\n Vals : [numpy.array]\n A numpy.array that holds choice specific values at common grid points.\n sigma : float\n A number that controls the variance of the taste shocks\n Returns\n -------\n V : [numpy.array]\n A numpy.array that holds the integrated value function.\n P : [numpy.array]\n A numpy.array that holds the discrete choice probabilities\n '
if (sigma == 0.0):
Pflat = np.argmax(Vals, axis=0)
V = np.zeros(Vals[0].shape)
Probs = np.zeros(Vals.shape)
for i in range(Vals.shape[0]):
optimalIndices = (Pflat == i)
V[optimalIndices] = Vals[i][optimalIndices]
Probs[i][optimalIndices] = 1
return (V, Probs)
maxV = np.max(Vals, axis=0)
sumexp = np.sum(np.exp(((Vals - maxV) / sigma)), axis=0)
LogSumV = np.log(sumexp)
LogSumV = (maxV + (sigma * LogSumV))
Probs = np.exp(((Vals - LogSumV) / sigma))
return (LogSumV, Probs)
| -8,109,232,112,225,450,000
|
Returns the final optimal value and choice probabilities given the choice
specific value functions `Vals`. Probabilities are degenerate if sigma == 0.0.
Parameters
----------
Vals : [numpy.array]
A numpy.array that holds choice specific values at common grid points.
sigma : float
A number that controls the variance of the taste shocks
Returns
-------
V : [numpy.array]
A numpy.array that holds the integrated value function.
P : [numpy.array]
A numpy.array that holds the discrete choice probabilities
|
HARK/interpolation.py
|
calcLogSumChoiceProbs
|
cohenimhuji/HARK
|
python
|
def calcLogSumChoiceProbs(Vals, sigma):
'\n Returns the final optimal value and choice probabilities given the choice\n specific value functions `Vals`. Probabilities are degenerate if sigma == 0.0.\n Parameters\n ----------\n Vals : [numpy.array]\n A numpy.array that holds choice specific values at common grid points.\n sigma : float\n A number that controls the variance of the taste shocks\n Returns\n -------\n V : [numpy.array]\n A numpy.array that holds the integrated value function.\n P : [numpy.array]\n A numpy.array that holds the discrete choice probabilities\n '
if (sigma == 0.0):
Pflat = np.argmax(Vals, axis=0)
V = np.zeros(Vals[0].shape)
Probs = np.zeros(Vals.shape)
for i in range(Vals.shape[0]):
optimalIndices = (Pflat == i)
V[optimalIndices] = Vals[i][optimalIndices]
Probs[i][optimalIndices] = 1
return (V, Probs)
maxV = np.max(Vals, axis=0)
sumexp = np.sum(np.exp(((Vals - maxV) / sigma)), axis=0)
LogSumV = np.log(sumexp)
LogSumV = (maxV + (sigma * LogSumV))
Probs = np.exp(((Vals - LogSumV) / sigma))
return (LogSumV, Probs)
|
def calcChoiceProbs(Vals, sigma):
'\n Returns the choice probabilities given the choice specific value functions\n `Vals`. Probabilities are degenerate if sigma == 0.0.\n Parameters\n ----------\n Vals : [numpy.array]\n A numpy.array that holds choice specific values at common grid points.\n sigma : float\n A number that controls the variance of the taste shocks\n Returns\n -------\n Probs : [numpy.array]\n A numpy.array that holds the discrete choice probabilities\n '
if (sigma == 0.0):
Pflat = np.argmax(Vals, axis=0)
Probs = np.zeros(Vals.shape)
for i in range(Vals.shape[0]):
Probs[i][(Pflat == i)] = 1
return Probs
maxV = np.max(Vals, axis=0)
Probs = np.divide(np.exp(((Vals - maxV) / sigma)), np.sum(np.exp(((Vals - maxV) / sigma)), axis=0))
return Probs
| -5,392,378,735,282,516,000
|
Returns the choice probabilities given the choice specific value functions
`Vals`. Probabilities are degenerate if sigma == 0.0.
Parameters
----------
Vals : [numpy.array]
A numpy.array that holds choice specific values at common grid points.
sigma : float
A number that controls the variance of the taste shocks
Returns
-------
Probs : [numpy.array]
A numpy.array that holds the discrete choice probabilities
|
HARK/interpolation.py
|
calcChoiceProbs
|
cohenimhuji/HARK
|
python
|
def calcChoiceProbs(Vals, sigma):
'\n Returns the choice probabilities given the choice specific value functions\n `Vals`. Probabilities are degenerate if sigma == 0.0.\n Parameters\n ----------\n Vals : [numpy.array]\n A numpy.array that holds choice specific values at common grid points.\n sigma : float\n A number that controls the variance of the taste shocks\n Returns\n -------\n Probs : [numpy.array]\n A numpy.array that holds the discrete choice probabilities\n '
if (sigma == 0.0):
Pflat = np.argmax(Vals, axis=0)
Probs = np.zeros(Vals.shape)
for i in range(Vals.shape[0]):
Probs[i][(Pflat == i)] = 1
return Probs
maxV = np.max(Vals, axis=0)
Probs = np.divide(np.exp(((Vals - maxV) / sigma)), np.sum(np.exp(((Vals - maxV) / sigma)), axis=0))
return Probs
|
def calcLogSum(Vals, sigma):
'\n Returns the optimal value given the choice specific value functions Vals.\n Parameters\n ----------\n Vals : [numpy.array]\n A numpy.array that holds choice specific values at common grid points.\n sigma : float\n A number that controls the variance of the taste shocks\n Returns\n -------\n V : [numpy.array]\n A numpy.array that holds the integrated value function.\n '
if (sigma == 0.0):
V = np.amax(Vals, axis=0)
return V
maxV = np.max(Vals, axis=0)
sumexp = np.sum(np.exp(((Vals - maxV) / sigma)), axis=0)
LogSumV = np.log(sumexp)
LogSumV = (maxV + (sigma * LogSumV))
return LogSumV
| -5,382,952,211,769,793,000
|
Returns the optimal value given the choice specific value functions Vals.
Parameters
----------
Vals : [numpy.array]
A numpy.array that holds choice specific values at common grid points.
sigma : float
A number that controls the variance of the taste shocks
Returns
-------
V : [numpy.array]
A numpy.array that holds the integrated value function.
|
HARK/interpolation.py
|
calcLogSum
|
cohenimhuji/HARK
|
python
|
def calcLogSum(Vals, sigma):
'\n Returns the optimal value given the choice specific value functions Vals.\n Parameters\n ----------\n Vals : [numpy.array]\n A numpy.array that holds choice specific values at common grid points.\n sigma : float\n A number that controls the variance of the taste shocks\n Returns\n -------\n V : [numpy.array]\n A numpy.array that holds the integrated value function.\n '
if (sigma == 0.0):
V = np.amax(Vals, axis=0)
return V
maxV = np.max(Vals, axis=0)
sumexp = np.sum(np.exp(((Vals - maxV) / sigma)), axis=0)
LogSumV = np.log(sumexp)
LogSumV = (maxV + (sigma * LogSumV))
return LogSumV
|
def __call__(self, x):
'\n Evaluates the interpolated function at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n\n Returns\n -------\n y : np.array or float\n The interpolated function evaluated at x: y = f(x), with the same\n shape as x.\n '
z = np.asarray(x)
return self._evaluate(z.flatten()).reshape(z.shape)
| -6,247,232,095,514,035,000
|
Evaluates the interpolated function at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
Returns
-------
y : np.array or float
The interpolated function evaluated at x: y = f(x), with the same
shape as x.
|
HARK/interpolation.py
|
__call__
|
cohenimhuji/HARK
|
python
|
def __call__(self, x):
'\n Evaluates the interpolated function at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n\n Returns\n -------\n y : np.array or float\n The interpolated function evaluated at x: y = f(x), with the same\n shape as x.\n '
z = np.asarray(x)
return self._evaluate(z.flatten()).reshape(z.shape)
|
def derivative(self, x):
"\n Evaluates the derivative of the interpolated function at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n\n Returns\n -------\n dydx : np.array or float\n The interpolated function's first derivative evaluated at x:\n dydx = f'(x), with the same shape as x.\n "
z = np.asarray(x)
return self._der(z.flatten()).reshape(z.shape)
| -4,085,258,743,740,778,000
|
Evaluates the derivative of the interpolated function at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
Returns
-------
dydx : np.array or float
The interpolated function's first derivative evaluated at x:
dydx = f'(x), with the same shape as x.
|
HARK/interpolation.py
|
derivative
|
cohenimhuji/HARK
|
python
|
def derivative(self, x):
"\n Evaluates the derivative of the interpolated function at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n\n Returns\n -------\n dydx : np.array or float\n The interpolated function's first derivative evaluated at x:\n dydx = f'(x), with the same shape as x.\n "
z = np.asarray(x)
return self._der(z.flatten()).reshape(z.shape)
|
def eval_with_derivative(self, x):
"\n Evaluates the interpolated function and its derivative at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n\n Returns\n -------\n y : np.array or float\n The interpolated function evaluated at x: y = f(x), with the same\n shape as x.\n dydx : np.array or float\n The interpolated function's first derivative evaluated at x:\n dydx = f'(x), with the same shape as x.\n "
z = np.asarray(x)
(y, dydx) = self._evalAndDer(z.flatten())
return (y.reshape(z.shape), dydx.reshape(z.shape))
| -3,044,315,355,937,710,600
|
Evaluates the interpolated function and its derivative at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
Returns
-------
y : np.array or float
The interpolated function evaluated at x: y = f(x), with the same
shape as x.
dydx : np.array or float
The interpolated function's first derivative evaluated at x:
dydx = f'(x), with the same shape as x.
|
HARK/interpolation.py
|
eval_with_derivative
|
cohenimhuji/HARK
|
python
|
def eval_with_derivative(self, x):
"\n Evaluates the interpolated function and its derivative at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n\n Returns\n -------\n y : np.array or float\n The interpolated function evaluated at x: y = f(x), with the same\n shape as x.\n dydx : np.array or float\n The interpolated function's first derivative evaluated at x:\n dydx = f'(x), with the same shape as x.\n "
z = np.asarray(x)
(y, dydx) = self._evalAndDer(z.flatten())
return (y.reshape(z.shape), dydx.reshape(z.shape))
|
def _evaluate(self, x):
'\n Interpolated function evaluator, to be defined in subclasses.\n '
raise NotImplementedError()
| -2,762,862,387,833,791,500
|
Interpolated function evaluator, to be defined in subclasses.
|
HARK/interpolation.py
|
_evaluate
|
cohenimhuji/HARK
|
python
|
def _evaluate(self, x):
'\n \n '
raise NotImplementedError()
|
def _der(self, x):
'\n Interpolated function derivative evaluator, to be defined in subclasses.\n '
raise NotImplementedError()
| -7,585,230,399,061,635,000
|
Interpolated function derivative evaluator, to be defined in subclasses.
|
HARK/interpolation.py
|
_der
|
cohenimhuji/HARK
|
python
|
def _der(self, x):
'\n \n '
raise NotImplementedError()
|
def _evalAndDer(self, x):
'\n Interpolated function and derivative evaluator, to be defined in subclasses.\n '
raise NotImplementedError()
| -8,138,018,477,864,143,000
|
Interpolated function and derivative evaluator, to be defined in subclasses.
|
HARK/interpolation.py
|
_evalAndDer
|
cohenimhuji/HARK
|
python
|
def _evalAndDer(self, x):
'\n \n '
raise NotImplementedError()
|
def __call__(self, x, y):
'\n Evaluates the interpolated function at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n fxy : np.array or float\n The interpolated function evaluated at x,y: fxy = f(x,y), with the\n same shape as x and y.\n '
xa = np.asarray(x)
ya = np.asarray(y)
return self._evaluate(xa.flatten(), ya.flatten()).reshape(xa.shape)
| -3,649,113,944,786,035,700
|
Evaluates the interpolated function at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
Returns
-------
fxy : np.array or float
The interpolated function evaluated at x,y: fxy = f(x,y), with the
same shape as x and y.
|
HARK/interpolation.py
|
__call__
|
cohenimhuji/HARK
|
python
|
def __call__(self, x, y):
'\n Evaluates the interpolated function at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n fxy : np.array or float\n The interpolated function evaluated at x,y: fxy = f(x,y), with the\n same shape as x and y.\n '
xa = np.asarray(x)
ya = np.asarray(y)
return self._evaluate(xa.flatten(), ya.flatten()).reshape(xa.shape)
|
def derivativeX(self, x, y):
'\n Evaluates the partial derivative of interpolated function with respect\n to x (the first argument) at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n dfdx : np.array or float\n The derivative of the interpolated function with respect to x, eval-\n uated at x,y: dfdx = f_x(x,y), with the same shape as x and y.\n '
xa = np.asarray(x)
ya = np.asarray(y)
return self._derX(xa.flatten(), ya.flatten()).reshape(xa.shape)
| -5,819,444,738,102,269,000
|
Evaluates the partial derivative of interpolated function with respect
to x (the first argument) at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
Returns
-------
dfdx : np.array or float
The derivative of the interpolated function with respect to x, eval-
uated at x,y: dfdx = f_x(x,y), with the same shape as x and y.
|
HARK/interpolation.py
|
derivativeX
|
cohenimhuji/HARK
|
python
|
def derivativeX(self, x, y):
'\n Evaluates the partial derivative of interpolated function with respect\n to x (the first argument) at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n dfdx : np.array or float\n The derivative of the interpolated function with respect to x, eval-\n uated at x,y: dfdx = f_x(x,y), with the same shape as x and y.\n '
xa = np.asarray(x)
ya = np.asarray(y)
return self._derX(xa.flatten(), ya.flatten()).reshape(xa.shape)
|
def derivativeY(self, x, y):
'\n Evaluates the partial derivative of interpolated function with respect\n to y (the second argument) at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n dfdy : np.array or float\n The derivative of the interpolated function with respect to y, eval-\n uated at x,y: dfdx = f_y(x,y), with the same shape as x and y.\n '
xa = np.asarray(x)
ya = np.asarray(y)
return self._derY(xa.flatten(), ya.flatten()).reshape(xa.shape)
| 1,417,053,829,423,489,300
|
Evaluates the partial derivative of interpolated function with respect
to y (the second argument) at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
Returns
-------
dfdy : np.array or float
The derivative of the interpolated function with respect to y, eval-
uated at x,y: dfdx = f_y(x,y), with the same shape as x and y.
|
HARK/interpolation.py
|
derivativeY
|
cohenimhuji/HARK
|
python
|
def derivativeY(self, x, y):
'\n Evaluates the partial derivative of interpolated function with respect\n to y (the second argument) at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n dfdy : np.array or float\n The derivative of the interpolated function with respect to y, eval-\n uated at x,y: dfdx = f_y(x,y), with the same shape as x and y.\n '
xa = np.asarray(x)
ya = np.asarray(y)
return self._derY(xa.flatten(), ya.flatten()).reshape(xa.shape)
|
def _evaluate(self, x, y):
'\n Interpolated function evaluator, to be defined in subclasses.\n '
raise NotImplementedError()
| 8,579,099,796,809,700,000
|
Interpolated function evaluator, to be defined in subclasses.
|
HARK/interpolation.py
|
_evaluate
|
cohenimhuji/HARK
|
python
|
def _evaluate(self, x, y):
'\n \n '
raise NotImplementedError()
|
def _derX(self, x, y):
'\n Interpolated function x-derivative evaluator, to be defined in subclasses.\n '
raise NotImplementedError()
| 1,381,617,938,470,823,700
|
Interpolated function x-derivative evaluator, to be defined in subclasses.
|
HARK/interpolation.py
|
_derX
|
cohenimhuji/HARK
|
python
|
def _derX(self, x, y):
'\n \n '
raise NotImplementedError()
|
def _derY(self, x, y):
'\n Interpolated function y-derivative evaluator, to be defined in subclasses.\n '
raise NotImplementedError()
| 4,368,658,704,194,012,000
|
Interpolated function y-derivative evaluator, to be defined in subclasses.
|
HARK/interpolation.py
|
_derY
|
cohenimhuji/HARK
|
python
|
def _derY(self, x, y):
'\n \n '
raise NotImplementedError()
|
def __call__(self, x, y, z):
'\n Evaluates the interpolated function at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n fxyz : np.array or float\n The interpolated function evaluated at x,y,z: fxyz = f(x,y,z), with\n the same shape as x, y, and z.\n '
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._evaluate(xa.flatten(), ya.flatten(), za.flatten()).reshape(xa.shape)
| 551,276,006,194,025,900
|
Evaluates the interpolated function at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
Returns
-------
fxyz : np.array or float
The interpolated function evaluated at x,y,z: fxyz = f(x,y,z), with
the same shape as x, y, and z.
|
HARK/interpolation.py
|
__call__
|
cohenimhuji/HARK
|
python
|
def __call__(self, x, y, z):
'\n Evaluates the interpolated function at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n fxyz : np.array or float\n The interpolated function evaluated at x,y,z: fxyz = f(x,y,z), with\n the same shape as x, y, and z.\n '
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._evaluate(xa.flatten(), ya.flatten(), za.flatten()).reshape(xa.shape)
|
def derivativeX(self, x, y, z):
'\n Evaluates the partial derivative of the interpolated function with respect\n to x (the first argument) at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n dfdx : np.array or float\n The derivative with respect to x of the interpolated function evaluated\n at x,y,z: dfdx = f_x(x,y,z), with the same shape as x, y, and z.\n '
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._derX(xa.flatten(), ya.flatten(), za.flatten()).reshape(xa.shape)
| 8,727,188,151,992,679,000
|
Evaluates the partial derivative of the interpolated function with respect
to x (the first argument) at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
Returns
-------
dfdx : np.array or float
The derivative with respect to x of the interpolated function evaluated
at x,y,z: dfdx = f_x(x,y,z), with the same shape as x, y, and z.
|
HARK/interpolation.py
|
derivativeX
|
cohenimhuji/HARK
|
python
|
def derivativeX(self, x, y, z):
'\n Evaluates the partial derivative of the interpolated function with respect\n to x (the first argument) at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n dfdx : np.array or float\n The derivative with respect to x of the interpolated function evaluated\n at x,y,z: dfdx = f_x(x,y,z), with the same shape as x, y, and z.\n '
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._derX(xa.flatten(), ya.flatten(), za.flatten()).reshape(xa.shape)
|
def derivativeY(self, x, y, z):
'\n Evaluates the partial derivative of the interpolated function with respect\n to y (the second argument) at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n dfdy : np.array or float\n The derivative with respect to y of the interpolated function evaluated\n at x,y,z: dfdy = f_y(x,y,z), with the same shape as x, y, and z.\n '
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._derY(xa.flatten(), ya.flatten(), za.flatten()).reshape(xa.shape)
| 4,617,695,424,958,355,000
|
Evaluates the partial derivative of the interpolated function with respect
to y (the second argument) at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
Returns
-------
dfdy : np.array or float
The derivative with respect to y of the interpolated function evaluated
at x,y,z: dfdy = f_y(x,y,z), with the same shape as x, y, and z.
|
HARK/interpolation.py
|
derivativeY
|
cohenimhuji/HARK
|
python
|
def derivativeY(self, x, y, z):
'\n Evaluates the partial derivative of the interpolated function with respect\n to y (the second argument) at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n dfdy : np.array or float\n The derivative with respect to y of the interpolated function evaluated\n at x,y,z: dfdy = f_y(x,y,z), with the same shape as x, y, and z.\n '
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._derY(xa.flatten(), ya.flatten(), za.flatten()).reshape(xa.shape)
|
def derivativeZ(self, x, y, z):
'\n Evaluates the partial derivative of the interpolated function with respect\n to z (the third argument) at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n dfdz : np.array or float\n The derivative with respect to z of the interpolated function evaluated\n at x,y,z: dfdz = f_z(x,y,z), with the same shape as x, y, and z.\n '
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._derZ(xa.flatten(), ya.flatten(), za.flatten()).reshape(xa.shape)
| -6,464,544,506,066,715,000
|
Evaluates the partial derivative of the interpolated function with respect
to z (the third argument) at the given input.
Parameters
----------
x : np.array or float
Real values to be evaluated in the interpolated function.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as x.
Returns
-------
dfdz : np.array or float
The derivative with respect to z of the interpolated function evaluated
at x,y,z: dfdz = f_z(x,y,z), with the same shape as x, y, and z.
|
HARK/interpolation.py
|
derivativeZ
|
cohenimhuji/HARK
|
python
|
def derivativeZ(self, x, y, z):
'\n Evaluates the partial derivative of the interpolated function with respect\n to z (the third argument) at the given input.\n\n Parameters\n ----------\n x : np.array or float\n Real values to be evaluated in the interpolated function.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as x.\n\n Returns\n -------\n dfdz : np.array or float\n The derivative with respect to z of the interpolated function evaluated\n at x,y,z: dfdz = f_z(x,y,z), with the same shape as x, y, and z.\n '
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._derZ(xa.flatten(), ya.flatten(), za.flatten()).reshape(xa.shape)
|
def _evaluate(self, x, y, z):
'\n Interpolated function evaluator, to be defined in subclasses.\n '
raise NotImplementedError()
| -1,267,314,504,862,056,200
|
Interpolated function evaluator, to be defined in subclasses.
|
HARK/interpolation.py
|
_evaluate
|
cohenimhuji/HARK
|
python
|
def _evaluate(self, x, y, z):
'\n \n '
raise NotImplementedError()
|
def _derX(self, x, y, z):
'\n Interpolated function x-derivative evaluator, to be defined in subclasses.\n '
raise NotImplementedError()
| -4,115,489,812,479,911,400
|
Interpolated function x-derivative evaluator, to be defined in subclasses.
|
HARK/interpolation.py
|
_derX
|
cohenimhuji/HARK
|
python
|
def _derX(self, x, y, z):
'\n \n '
raise NotImplementedError()
|
def _derY(self, x, y, z):
'\n Interpolated function y-derivative evaluator, to be defined in subclasses.\n '
raise NotImplementedError()
| -8,729,575,380,917,093,000
|
Interpolated function y-derivative evaluator, to be defined in subclasses.
|
HARK/interpolation.py
|
_derY
|
cohenimhuji/HARK
|
python
|
def _derY(self, x, y, z):
'\n \n '
raise NotImplementedError()
|
def _derZ(self, x, y, z):
'\n Interpolated function y-derivative evaluator, to be defined in subclasses.\n '
raise NotImplementedError()
| 5,043,320,771,869,896,000
|
Interpolated function y-derivative evaluator, to be defined in subclasses.
|
HARK/interpolation.py
|
_derZ
|
cohenimhuji/HARK
|
python
|
def _derZ(self, x, y, z):
'\n \n '
raise NotImplementedError()
|
def __call__(self, w, x, y, z):
'\n Evaluates the interpolated function at the given input.\n\n Parameters\n ----------\n w : np.array or float\n Real values to be evaluated in the interpolated function.\n x : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n\n Returns\n -------\n fwxyz : np.array or float\n The interpolated function evaluated at w,x,y,z: fwxyz = f(w,x,y,z),\n with the same shape as w, x, y, and z.\n '
wa = np.asarray(w)
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._evaluate(wa.flatten(), xa.flatten(), ya.flatten(), za.flatten()).reshape(wa.shape)
| -2,837,734,873,584,436,000
|
Evaluates the interpolated function at the given input.
Parameters
----------
w : np.array or float
Real values to be evaluated in the interpolated function.
x : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
Returns
-------
fwxyz : np.array or float
The interpolated function evaluated at w,x,y,z: fwxyz = f(w,x,y,z),
with the same shape as w, x, y, and z.
|
HARK/interpolation.py
|
__call__
|
cohenimhuji/HARK
|
python
|
def __call__(self, w, x, y, z):
'\n Evaluates the interpolated function at the given input.\n\n Parameters\n ----------\n w : np.array or float\n Real values to be evaluated in the interpolated function.\n x : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n\n Returns\n -------\n fwxyz : np.array or float\n The interpolated function evaluated at w,x,y,z: fwxyz = f(w,x,y,z),\n with the same shape as w, x, y, and z.\n '
wa = np.asarray(w)
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._evaluate(wa.flatten(), xa.flatten(), ya.flatten(), za.flatten()).reshape(wa.shape)
|
def derivativeW(self, w, x, y, z):
'\n Evaluates the partial derivative with respect to w (the first argument)\n of the interpolated function at the given input.\n\n Parameters\n ----------\n w : np.array or float\n Real values to be evaluated in the interpolated function.\n x : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n\n Returns\n -------\n dfdw : np.array or float\n The derivative with respect to w of the interpolated function eval-\n uated at w,x,y,z: dfdw = f_w(w,x,y,z), with the same shape as inputs.\n '
wa = np.asarray(w)
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._derW(wa.flatten(), xa.flatten(), ya.flatten(), za.flatten()).reshape(wa.shape)
| -5,169,940,638,093,548,000
|
Evaluates the partial derivative with respect to w (the first argument)
of the interpolated function at the given input.
Parameters
----------
w : np.array or float
Real values to be evaluated in the interpolated function.
x : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
Returns
-------
dfdw : np.array or float
The derivative with respect to w of the interpolated function eval-
uated at w,x,y,z: dfdw = f_w(w,x,y,z), with the same shape as inputs.
|
HARK/interpolation.py
|
derivativeW
|
cohenimhuji/HARK
|
python
|
def derivativeW(self, w, x, y, z):
'\n Evaluates the partial derivative with respect to w (the first argument)\n of the interpolated function at the given input.\n\n Parameters\n ----------\n w : np.array or float\n Real values to be evaluated in the interpolated function.\n x : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n\n Returns\n -------\n dfdw : np.array or float\n The derivative with respect to w of the interpolated function eval-\n uated at w,x,y,z: dfdw = f_w(w,x,y,z), with the same shape as inputs.\n '
wa = np.asarray(w)
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._derW(wa.flatten(), xa.flatten(), ya.flatten(), za.flatten()).reshape(wa.shape)
|
def derivativeX(self, w, x, y, z):
'\n Evaluates the partial derivative with respect to x (the second argument)\n of the interpolated function at the given input.\n\n Parameters\n ----------\n w : np.array or float\n Real values to be evaluated in the interpolated function.\n x : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n\n Returns\n -------\n dfdx : np.array or float\n The derivative with respect to x of the interpolated function eval-\n uated at w,x,y,z: dfdx = f_x(w,x,y,z), with the same shape as inputs.\n '
wa = np.asarray(w)
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._derX(wa.flatten(), xa.flatten(), ya.flatten(), za.flatten()).reshape(wa.shape)
| -5,980,737,189,385,176,000
|
Evaluates the partial derivative with respect to x (the second argument)
of the interpolated function at the given input.
Parameters
----------
w : np.array or float
Real values to be evaluated in the interpolated function.
x : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
Returns
-------
dfdx : np.array or float
The derivative with respect to x of the interpolated function eval-
uated at w,x,y,z: dfdx = f_x(w,x,y,z), with the same shape as inputs.
|
HARK/interpolation.py
|
derivativeX
|
cohenimhuji/HARK
|
python
|
def derivativeX(self, w, x, y, z):
'\n Evaluates the partial derivative with respect to x (the second argument)\n of the interpolated function at the given input.\n\n Parameters\n ----------\n w : np.array or float\n Real values to be evaluated in the interpolated function.\n x : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n\n Returns\n -------\n dfdx : np.array or float\n The derivative with respect to x of the interpolated function eval-\n uated at w,x,y,z: dfdx = f_x(w,x,y,z), with the same shape as inputs.\n '
wa = np.asarray(w)
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._derX(wa.flatten(), xa.flatten(), ya.flatten(), za.flatten()).reshape(wa.shape)
|
def derivativeY(self, w, x, y, z):
'\n Evaluates the partial derivative with respect to y (the third argument)\n of the interpolated function at the given input.\n\n Parameters\n ----------\n w : np.array or float\n Real values to be evaluated in the interpolated function.\n x : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n\n Returns\n -------\n dfdy : np.array or float\n The derivative with respect to y of the interpolated function eval-\n uated at w,x,y,z: dfdy = f_y(w,x,y,z), with the same shape as inputs.\n '
wa = np.asarray(w)
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._derY(wa.flatten(), xa.flatten(), ya.flatten(), za.flatten()).reshape(wa.shape)
| 7,829,811,567,842,157,000
|
Evaluates the partial derivative with respect to y (the third argument)
of the interpolated function at the given input.
Parameters
----------
w : np.array or float
Real values to be evaluated in the interpolated function.
x : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
Returns
-------
dfdy : np.array or float
The derivative with respect to y of the interpolated function eval-
uated at w,x,y,z: dfdy = f_y(w,x,y,z), with the same shape as inputs.
|
HARK/interpolation.py
|
derivativeY
|
cohenimhuji/HARK
|
python
|
def derivativeY(self, w, x, y, z):
'\n Evaluates the partial derivative with respect to y (the third argument)\n of the interpolated function at the given input.\n\n Parameters\n ----------\n w : np.array or float\n Real values to be evaluated in the interpolated function.\n x : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n\n Returns\n -------\n dfdy : np.array or float\n The derivative with respect to y of the interpolated function eval-\n uated at w,x,y,z: dfdy = f_y(w,x,y,z), with the same shape as inputs.\n '
wa = np.asarray(w)
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._derY(wa.flatten(), xa.flatten(), ya.flatten(), za.flatten()).reshape(wa.shape)
|
def derivativeZ(self, w, x, y, z):
'\n Evaluates the partial derivative with respect to z (the fourth argument)\n of the interpolated function at the given input.\n\n Parameters\n ----------\n w : np.array or float\n Real values to be evaluated in the interpolated function.\n x : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n\n Returns\n -------\n dfdz : np.array or float\n The derivative with respect to z of the interpolated function eval-\n uated at w,x,y,z: dfdz = f_z(w,x,y,z), with the same shape as inputs.\n '
wa = np.asarray(w)
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._derZ(wa.flatten(), xa.flatten(), ya.flatten(), za.flatten()).reshape(wa.shape)
| 4,949,201,771,310,799,000
|
Evaluates the partial derivative with respect to z (the fourth argument)
of the interpolated function at the given input.
Parameters
----------
w : np.array or float
Real values to be evaluated in the interpolated function.
x : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
y : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
z : np.array or float
Real values to be evaluated in the interpolated function; must be
the same size as w.
Returns
-------
dfdz : np.array or float
The derivative with respect to z of the interpolated function eval-
uated at w,x,y,z: dfdz = f_z(w,x,y,z), with the same shape as inputs.
|
HARK/interpolation.py
|
derivativeZ
|
cohenimhuji/HARK
|
python
|
def derivativeZ(self, w, x, y, z):
'\n Evaluates the partial derivative with respect to z (the fourth argument)\n of the interpolated function at the given input.\n\n Parameters\n ----------\n w : np.array or float\n Real values to be evaluated in the interpolated function.\n x : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n y : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n z : np.array or float\n Real values to be evaluated in the interpolated function; must be\n the same size as w.\n\n Returns\n -------\n dfdz : np.array or float\n The derivative with respect to z of the interpolated function eval-\n uated at w,x,y,z: dfdz = f_z(w,x,y,z), with the same shape as inputs.\n '
wa = np.asarray(w)
xa = np.asarray(x)
ya = np.asarray(y)
za = np.asarray(z)
return self._derZ(wa.flatten(), xa.flatten(), ya.flatten(), za.flatten()).reshape(wa.shape)
|
def _evaluate(self, w, x, y, z):
'\n Interpolated function evaluator, to be defined in subclasses.\n '
raise NotImplementedError()
| 1,283,801,718,147,212,500
|
Interpolated function evaluator, to be defined in subclasses.
|
HARK/interpolation.py
|
_evaluate
|
cohenimhuji/HARK
|
python
|
def _evaluate(self, w, x, y, z):
'\n \n '
raise NotImplementedError()
|
def _derW(self, w, x, y, z):
'\n Interpolated function w-derivative evaluator, to be defined in subclasses.\n '
raise NotImplementedError()
| 2,038,132,461,415,581,200
|
Interpolated function w-derivative evaluator, to be defined in subclasses.
|
HARK/interpolation.py
|
_derW
|
cohenimhuji/HARK
|
python
|
def _derW(self, w, x, y, z):
'\n \n '
raise NotImplementedError()
|
def _derX(self, w, x, y, z):
'\n Interpolated function w-derivative evaluator, to be defined in subclasses.\n '
raise NotImplementedError()
| -857,282,721,956,864,000
|
Interpolated function w-derivative evaluator, to be defined in subclasses.
|
HARK/interpolation.py
|
_derX
|
cohenimhuji/HARK
|
python
|
def _derX(self, w, x, y, z):
'\n \n '
raise NotImplementedError()
|
def _derY(self, w, x, y, z):
'\n Interpolated function w-derivative evaluator, to be defined in subclasses.\n '
raise NotImplementedError()
| -775,814,225,664,724,600
|
Interpolated function w-derivative evaluator, to be defined in subclasses.
|
HARK/interpolation.py
|
_derY
|
cohenimhuji/HARK
|
python
|
def _derY(self, w, x, y, z):
'\n \n '
raise NotImplementedError()
|
def _derZ(self, w, x, y, z):
'\n Interpolated function w-derivative evaluator, to be defined in subclasses.\n '
raise NotImplementedError()
| 6,938,501,573,206,768,000
|
Interpolated function w-derivative evaluator, to be defined in subclasses.
|
HARK/interpolation.py
|
_derZ
|
cohenimhuji/HARK
|
python
|
def _derZ(self, w, x, y, z):
'\n \n '
raise NotImplementedError()
|
def __init__(self, i_dim=0, n_dims=1):
'\n Constructor for a new IdentityFunction.\n\n Parameters\n ----------\n i_dim : int\n Index of the dimension on which the identity is defined. f(*x) = x[i]\n n_dims : int\n Total number of input dimensions for this function.\n\n Returns\n -------\n None\n '
self.i_dim = i_dim
self.n_dims = n_dims
| -8,362,144,971,293,894,000
|
Constructor for a new IdentityFunction.
Parameters
----------
i_dim : int
Index of the dimension on which the identity is defined. f(*x) = x[i]
n_dims : int
Total number of input dimensions for this function.
Returns
-------
None
|
HARK/interpolation.py
|
__init__
|
cohenimhuji/HARK
|
python
|
def __init__(self, i_dim=0, n_dims=1):
'\n Constructor for a new IdentityFunction.\n\n Parameters\n ----------\n i_dim : int\n Index of the dimension on which the identity is defined. f(*x) = x[i]\n n_dims : int\n Total number of input dimensions for this function.\n\n Returns\n -------\n None\n '
self.i_dim = i_dim
self.n_dims = n_dims
|
def __call__(self, *args):
'\n Evaluate the identity function.\n '
return args[self.i_dim]
| -8,692,210,192,518,422,000
|
Evaluate the identity function.
|
HARK/interpolation.py
|
__call__
|
cohenimhuji/HARK
|
python
|
def __call__(self, *args):
'\n \n '
return args[self.i_dim]
|
def derivative(self, *args):
'\n Returns the derivative of the function with respect to the first dimension.\n '
if (self.i_dim == 0):
return np.ones_like(*args[0])
else:
return np.zeros_like(*args[0])
| 6,439,382,352,633,113,000
|
Returns the derivative of the function with respect to the first dimension.
|
HARK/interpolation.py
|
derivative
|
cohenimhuji/HARK
|
python
|
def derivative(self, *args):
'\n \n '
if (self.i_dim == 0):
return np.ones_like(*args[0])
else:
return np.zeros_like(*args[0])
|
def derivativeX(self, *args):
'\n Returns the derivative of the function with respect to the X dimension.\n This is the first input whenever n_dims < 4 and the second input otherwise.\n '
if (self.n_dims >= 4):
j = 1
else:
j = 0
if (self.i_dim == j):
return np.ones_like(*args[0])
else:
return np.zeros_like(*args[0])
| -5,051,048,792,419,859,000
|
Returns the derivative of the function with respect to the X dimension.
This is the first input whenever n_dims < 4 and the second input otherwise.
|
HARK/interpolation.py
|
derivativeX
|
cohenimhuji/HARK
|
python
|
def derivativeX(self, *args):
'\n Returns the derivative of the function with respect to the X dimension.\n This is the first input whenever n_dims < 4 and the second input otherwise.\n '
if (self.n_dims >= 4):
j = 1
else:
j = 0
if (self.i_dim == j):
return np.ones_like(*args[0])
else:
return np.zeros_like(*args[0])
|
def derivativeY(self, *args):
'\n Returns the derivative of the function with respect to the Y dimension.\n This is the second input whenever n_dims < 4 and the third input otherwise.\n '
if (self.n_dims >= 4):
j = 2
else:
j = 1
if (self.i_dim == j):
return np.ones_like(*args[0])
else:
return np.zeros_like(*args[0])
| -4,896,925,797,013,075,000
|
Returns the derivative of the function with respect to the Y dimension.
This is the second input whenever n_dims < 4 and the third input otherwise.
|
HARK/interpolation.py
|
derivativeY
|
cohenimhuji/HARK
|
python
|
def derivativeY(self, *args):
'\n Returns the derivative of the function with respect to the Y dimension.\n This is the second input whenever n_dims < 4 and the third input otherwise.\n '
if (self.n_dims >= 4):
j = 2
else:
j = 1
if (self.i_dim == j):
return np.ones_like(*args[0])
else:
return np.zeros_like(*args[0])
|
def derivativeZ(self, *args):
'\n Returns the derivative of the function with respect to the Z dimension.\n This is the third input whenever n_dims < 4 and the fourth input otherwise.\n '
if (self.n_dims >= 4):
j = 3
else:
j = 2
if (self.i_dim == j):
return np.ones_like(*args[0])
else:
return np.zeros_like(*args[0])
| -6,145,434,911,555,888,000
|
Returns the derivative of the function with respect to the Z dimension.
This is the third input whenever n_dims < 4 and the fourth input otherwise.
|
HARK/interpolation.py
|
derivativeZ
|
cohenimhuji/HARK
|
python
|
def derivativeZ(self, *args):
'\n Returns the derivative of the function with respect to the Z dimension.\n This is the third input whenever n_dims < 4 and the fourth input otherwise.\n '
if (self.n_dims >= 4):
j = 3
else:
j = 2
if (self.i_dim == j):
return np.ones_like(*args[0])
else:
return np.zeros_like(*args[0])
|
def derivativeW(self, *args):
'\n Returns the derivative of the function with respect to the W dimension.\n This should only exist when n_dims >= 4.\n '
if (self.n_dims >= 4):
j = 0
else:
assert False, "Derivative with respect to W can't be called when n_dims < 4!"
if (self.i_dim == j):
return np.ones_like(*args[0])
else:
return np.zeros_like(*args[0])
| -6,151,810,293,431,523,000
|
Returns the derivative of the function with respect to the W dimension.
This should only exist when n_dims >= 4.
|
HARK/interpolation.py
|
derivativeW
|
cohenimhuji/HARK
|
python
|
def derivativeW(self, *args):
'\n Returns the derivative of the function with respect to the W dimension.\n This should only exist when n_dims >= 4.\n '
if (self.n_dims >= 4):
j = 0
else:
assert False, "Derivative with respect to W can't be called when n_dims < 4!"
if (self.i_dim == j):
return np.ones_like(*args[0])
else:
return np.zeros_like(*args[0])
|
def __init__(self, value):
'\n Make a new ConstantFunction object.\n\n Parameters\n ----------\n value : float\n The constant value that the function returns.\n\n Returns\n -------\n None\n '
self.value = float(value)
| -5,470,336,126,520,096,000
|
Make a new ConstantFunction object.
Parameters
----------
value : float
The constant value that the function returns.
Returns
-------
None
|
HARK/interpolation.py
|
__init__
|
cohenimhuji/HARK
|
python
|
def __init__(self, value):
'\n Make a new ConstantFunction object.\n\n Parameters\n ----------\n value : float\n The constant value that the function returns.\n\n Returns\n -------\n None\n '
self.value = float(value)
|
def __call__(self, *args):
'\n Evaluate the constant function. The first input must exist and should be an array.\n Returns an array of identical shape to args[0] (if it exists).\n '
if (len(args) > 0):
if _isscalar(args[0]):
return self.value
else:
shape = args[0].shape
return (self.value * np.ones(shape))
else:
return self.value
| -3,629,823,588,914,564,600
|
Evaluate the constant function. The first input must exist and should be an array.
Returns an array of identical shape to args[0] (if it exists).
|
HARK/interpolation.py
|
__call__
|
cohenimhuji/HARK
|
python
|
def __call__(self, *args):
'\n Evaluate the constant function. The first input must exist and should be an array.\n Returns an array of identical shape to args[0] (if it exists).\n '
if (len(args) > 0):
if _isscalar(args[0]):
return self.value
else:
shape = args[0].shape
return (self.value * np.ones(shape))
else:
return self.value
|
def _der(self, *args):
'\n Evaluate the derivative of the function. The first input must exist and should be an array.\n Returns an array of identical shape to args[0] (if it exists). This is an array of zeros.\n '
if (len(args) > 0):
if _isscalar(args[0]):
return 0.0
else:
shape = args[0].shape
return np.zeros(shape)
else:
return 0.0
| -1,138,811,285,084,507,900
|
Evaluate the derivative of the function. The first input must exist and should be an array.
Returns an array of identical shape to args[0] (if it exists). This is an array of zeros.
|
HARK/interpolation.py
|
_der
|
cohenimhuji/HARK
|
python
|
def _der(self, *args):
'\n Evaluate the derivative of the function. The first input must exist and should be an array.\n Returns an array of identical shape to args[0] (if it exists). This is an array of zeros.\n '
if (len(args) > 0):
if _isscalar(args[0]):
return 0.0
else:
shape = args[0].shape
return np.zeros(shape)
else:
return 0.0
|
def __init__(self, x_list, y_list, intercept_limit=None, slope_limit=None, lower_extrap=False):
'\n The interpolation constructor to make a new linear spline interpolation.\n\n Parameters\n ----------\n x_list : np.array\n List of x values composing the grid.\n y_list : np.array\n List of y values, representing f(x) at the points in x_list.\n intercept_limit : float\n Intercept of limiting linear function.\n slope_limit : float\n Slope of limiting linear function.\n lower_extrap : boolean\n Indicator for whether lower extrapolation is allowed. False means\n f(x) = NaN for x < min(x_list); True means linear extrapolation.\n\n Returns\n -------\n new instance of LinearInterp\n\n NOTE: When no input is given for the limiting linear function, linear\n extrapolation is used above the highest gridpoint.\n '
self.x_list = np.array(x_list)
self.y_list = np.array(y_list)
self.lower_extrap = lower_extrap
self.x_n = self.x_list.size
if ((intercept_limit is not None) and (slope_limit is not None)):
slope_at_top = ((y_list[(- 1)] - y_list[(- 2)]) / (x_list[(- 1)] - x_list[(- 2)]))
level_diff = ((intercept_limit + (slope_limit * x_list[(- 1)])) - y_list[(- 1)])
slope_diff = (slope_limit - slope_at_top)
self.decay_extrap_A = level_diff
self.decay_extrap_B = ((- slope_diff) / level_diff)
self.intercept_limit = intercept_limit
self.slope_limit = slope_limit
self.decay_extrap = True
else:
self.decay_extrap = False
| 52,940,877,021,593,310
|
The interpolation constructor to make a new linear spline interpolation.
Parameters
----------
x_list : np.array
List of x values composing the grid.
y_list : np.array
List of y values, representing f(x) at the points in x_list.
intercept_limit : float
Intercept of limiting linear function.
slope_limit : float
Slope of limiting linear function.
lower_extrap : boolean
Indicator for whether lower extrapolation is allowed. False means
f(x) = NaN for x < min(x_list); True means linear extrapolation.
Returns
-------
new instance of LinearInterp
NOTE: When no input is given for the limiting linear function, linear
extrapolation is used above the highest gridpoint.
|
HARK/interpolation.py
|
__init__
|
cohenimhuji/HARK
|
python
|
def __init__(self, x_list, y_list, intercept_limit=None, slope_limit=None, lower_extrap=False):
'\n The interpolation constructor to make a new linear spline interpolation.\n\n Parameters\n ----------\n x_list : np.array\n List of x values composing the grid.\n y_list : np.array\n List of y values, representing f(x) at the points in x_list.\n intercept_limit : float\n Intercept of limiting linear function.\n slope_limit : float\n Slope of limiting linear function.\n lower_extrap : boolean\n Indicator for whether lower extrapolation is allowed. False means\n f(x) = NaN for x < min(x_list); True means linear extrapolation.\n\n Returns\n -------\n new instance of LinearInterp\n\n NOTE: When no input is given for the limiting linear function, linear\n extrapolation is used above the highest gridpoint.\n '
self.x_list = np.array(x_list)
self.y_list = np.array(y_list)
self.lower_extrap = lower_extrap
self.x_n = self.x_list.size
if ((intercept_limit is not None) and (slope_limit is not None)):
slope_at_top = ((y_list[(- 1)] - y_list[(- 2)]) / (x_list[(- 1)] - x_list[(- 2)]))
level_diff = ((intercept_limit + (slope_limit * x_list[(- 1)])) - y_list[(- 1)])
slope_diff = (slope_limit - slope_at_top)
self.decay_extrap_A = level_diff
self.decay_extrap_B = ((- slope_diff) / level_diff)
self.intercept_limit = intercept_limit
self.slope_limit = slope_limit
self.decay_extrap = True
else:
self.decay_extrap = False
|
def _evalOrDer(self, x, _eval, _Der):
'\n Returns the level and/or first derivative of the function at each value in\n x. Only called internally by HARKinterpolator1D.eval_and_der (etc).\n\n Parameters\n ----------\n x_list : scalar or np.array\n Set of points where we want to evlauate the interpolated function and/or its derivative..\n _eval : boolean\n Indicator for whether to evalute the level of the interpolated function.\n _Der : boolean\n Indicator for whether to evaluate the derivative of the interpolated function.\n\n Returns\n -------\n A list including the level and/or derivative of the interpolated function where requested.\n '
i = np.maximum(np.searchsorted(self.x_list[:(- 1)], x), 1)
alpha = ((x - self.x_list[(i - 1)]) / (self.x_list[i] - self.x_list[(i - 1)]))
if _eval:
y = (((1.0 - alpha) * self.y_list[(i - 1)]) + (alpha * self.y_list[i]))
if _Der:
dydx = ((self.y_list[i] - self.y_list[(i - 1)]) / (self.x_list[i] - self.x_list[(i - 1)]))
if (not self.lower_extrap):
below_lower_bound = (x < self.x_list[0])
if _eval:
y[below_lower_bound] = np.nan
if _Der:
dydx[below_lower_bound] = np.nan
if self.decay_extrap:
above_upper_bound = (x > self.x_list[(- 1)])
x_temp = (x[above_upper_bound] - self.x_list[(- 1)])
if _eval:
y[above_upper_bound] = ((self.intercept_limit + (self.slope_limit * x[above_upper_bound])) - (self.decay_extrap_A * np.exp(((- self.decay_extrap_B) * x_temp))))
if _Der:
dydx[above_upper_bound] = (self.slope_limit + ((self.decay_extrap_B * self.decay_extrap_A) * np.exp(((- self.decay_extrap_B) * x_temp))))
output = []
if _eval:
output += [y]
if _Der:
output += [dydx]
return output
| 6,101,460,031,458,400,000
|
Returns the level and/or first derivative of the function at each value in
x. Only called internally by HARKinterpolator1D.eval_and_der (etc).
Parameters
----------
x_list : scalar or np.array
Set of points where we want to evlauate the interpolated function and/or its derivative..
_eval : boolean
Indicator for whether to evalute the level of the interpolated function.
_Der : boolean
Indicator for whether to evaluate the derivative of the interpolated function.
Returns
-------
A list including the level and/or derivative of the interpolated function where requested.
|
HARK/interpolation.py
|
_evalOrDer
|
cohenimhuji/HARK
|
python
|
def _evalOrDer(self, x, _eval, _Der):
'\n Returns the level and/or first derivative of the function at each value in\n x. Only called internally by HARKinterpolator1D.eval_and_der (etc).\n\n Parameters\n ----------\n x_list : scalar or np.array\n Set of points where we want to evlauate the interpolated function and/or its derivative..\n _eval : boolean\n Indicator for whether to evalute the level of the interpolated function.\n _Der : boolean\n Indicator for whether to evaluate the derivative of the interpolated function.\n\n Returns\n -------\n A list including the level and/or derivative of the interpolated function where requested.\n '
i = np.maximum(np.searchsorted(self.x_list[:(- 1)], x), 1)
alpha = ((x - self.x_list[(i - 1)]) / (self.x_list[i] - self.x_list[(i - 1)]))
if _eval:
y = (((1.0 - alpha) * self.y_list[(i - 1)]) + (alpha * self.y_list[i]))
if _Der:
dydx = ((self.y_list[i] - self.y_list[(i - 1)]) / (self.x_list[i] - self.x_list[(i - 1)]))
if (not self.lower_extrap):
below_lower_bound = (x < self.x_list[0])
if _eval:
y[below_lower_bound] = np.nan
if _Der:
dydx[below_lower_bound] = np.nan
if self.decay_extrap:
above_upper_bound = (x > self.x_list[(- 1)])
x_temp = (x[above_upper_bound] - self.x_list[(- 1)])
if _eval:
y[above_upper_bound] = ((self.intercept_limit + (self.slope_limit * x[above_upper_bound])) - (self.decay_extrap_A * np.exp(((- self.decay_extrap_B) * x_temp))))
if _Der:
dydx[above_upper_bound] = (self.slope_limit + ((self.decay_extrap_B * self.decay_extrap_A) * np.exp(((- self.decay_extrap_B) * x_temp))))
output = []
if _eval:
output += [y]
if _Der:
output += [dydx]
return output
|
def _evaluate(self, x, return_indices=False):
'\n Returns the level of the interpolated function at each value in x. Only\n called internally by HARKinterpolator1D.__call__ (etc).\n '
return self._evalOrDer(x, True, False)[0]
| -5,831,651,768,712,529,000
|
Returns the level of the interpolated function at each value in x. Only
called internally by HARKinterpolator1D.__call__ (etc).
|
HARK/interpolation.py
|
_evaluate
|
cohenimhuji/HARK
|
python
|
def _evaluate(self, x, return_indices=False):
'\n Returns the level of the interpolated function at each value in x. Only\n called internally by HARKinterpolator1D.__call__ (etc).\n '
return self._evalOrDer(x, True, False)[0]
|
def _der(self, x):
'\n Returns the first derivative of the interpolated function at each value\n in x. Only called internally by HARKinterpolator1D.derivative (etc).\n '
return self._evalOrDer(x, False, True)[0]
| -3,117,603,032,125,851,600
|
Returns the first derivative of the interpolated function at each value
in x. Only called internally by HARKinterpolator1D.derivative (etc).
|
HARK/interpolation.py
|
_der
|
cohenimhuji/HARK
|
python
|
def _der(self, x):
'\n Returns the first derivative of the interpolated function at each value\n in x. Only called internally by HARKinterpolator1D.derivative (etc).\n '
return self._evalOrDer(x, False, True)[0]
|
def _evalAndDer(self, x):
'\n Returns the level and first derivative of the function at each value in\n x. Only called internally by HARKinterpolator1D.eval_and_der (etc).\n '
(y, dydx) = self._evalOrDer(x, True, True)
return (y, dydx)
| 8,009,502,929,567,022,000
|
Returns the level and first derivative of the function at each value in
x. Only called internally by HARKinterpolator1D.eval_and_der (etc).
|
HARK/interpolation.py
|
_evalAndDer
|
cohenimhuji/HARK
|
python
|
def _evalAndDer(self, x):
'\n Returns the level and first derivative of the function at each value in\n x. Only called internally by HARKinterpolator1D.eval_and_der (etc).\n '
(y, dydx) = self._evalOrDer(x, True, True)
return (y, dydx)
|
def __init__(self, x_list, y_list, dydx_list, intercept_limit=None, slope_limit=None, lower_extrap=False):
"\n The interpolation constructor to make a new cubic spline interpolation.\n\n Parameters\n ----------\n x_list : np.array\n List of x values composing the grid.\n y_list : np.array\n List of y values, representing f(x) at the points in x_list.\n dydx_list : np.array\n List of dydx values, representing f'(x) at the points in x_list\n intercept_limit : float\n Intercept of limiting linear function.\n slope_limit : float\n Slope of limiting linear function.\n lower_extrap : boolean\n Indicator for whether lower extrapolation is allowed. False means\n f(x) = NaN for x < min(x_list); True means linear extrapolation.\n\n Returns\n -------\n new instance of CubicInterp\n\n NOTE: When no input is given for the limiting linear function, linear\n extrapolation is used above the highest gridpoint.\n "
self.x_list = np.asarray(x_list)
self.y_list = np.asarray(y_list)
self.dydx_list = np.asarray(dydx_list)
self.n = len(x_list)
if lower_extrap:
self.coeffs = [[y_list[0], dydx_list[0], 0, 0]]
else:
self.coeffs = [[np.nan, np.nan, np.nan, np.nan]]
for i in range((self.n - 1)):
x0 = x_list[i]
y0 = y_list[i]
x1 = x_list[(i + 1)]
y1 = y_list[(i + 1)]
Span = (x1 - x0)
dydx0 = (dydx_list[i] * Span)
dydx1 = (dydx_list[(i + 1)] * Span)
temp = [y0, dydx0, (((3 * (y1 - y0)) - (2 * dydx0)) - dydx1), (((2 * (y0 - y1)) + dydx0) + dydx1)]
self.coeffs.append(temp)
if ((slope_limit is None) and (intercept_limit is None)):
slope_limit = dydx_list[(- 1)]
intercept_limit = (y_list[(- 1)] - (slope_limit * x_list[(- 1)]))
gap = (((slope_limit * x1) + intercept_limit) - y1)
slope = (slope_limit - dydx_list[(self.n - 1)])
if ((gap != 0) and (slope <= 0)):
temp = [intercept_limit, slope_limit, gap, (slope / gap)]
elif (slope > 0):
temp = [intercept_limit, slope_limit, 0, 0]
else:
temp = [intercept_limit, slope_limit, gap, 0]
self.coeffs.append(temp)
self.coeffs = np.array(self.coeffs)
| 909,126,979,905,643,600
|
The interpolation constructor to make a new cubic spline interpolation.
Parameters
----------
x_list : np.array
List of x values composing the grid.
y_list : np.array
List of y values, representing f(x) at the points in x_list.
dydx_list : np.array
List of dydx values, representing f'(x) at the points in x_list
intercept_limit : float
Intercept of limiting linear function.
slope_limit : float
Slope of limiting linear function.
lower_extrap : boolean
Indicator for whether lower extrapolation is allowed. False means
f(x) = NaN for x < min(x_list); True means linear extrapolation.
Returns
-------
new instance of CubicInterp
NOTE: When no input is given for the limiting linear function, linear
extrapolation is used above the highest gridpoint.
|
HARK/interpolation.py
|
__init__
|
cohenimhuji/HARK
|
python
|
def __init__(self, x_list, y_list, dydx_list, intercept_limit=None, slope_limit=None, lower_extrap=False):
"\n The interpolation constructor to make a new cubic spline interpolation.\n\n Parameters\n ----------\n x_list : np.array\n List of x values composing the grid.\n y_list : np.array\n List of y values, representing f(x) at the points in x_list.\n dydx_list : np.array\n List of dydx values, representing f'(x) at the points in x_list\n intercept_limit : float\n Intercept of limiting linear function.\n slope_limit : float\n Slope of limiting linear function.\n lower_extrap : boolean\n Indicator for whether lower extrapolation is allowed. False means\n f(x) = NaN for x < min(x_list); True means linear extrapolation.\n\n Returns\n -------\n new instance of CubicInterp\n\n NOTE: When no input is given for the limiting linear function, linear\n extrapolation is used above the highest gridpoint.\n "
self.x_list = np.asarray(x_list)
self.y_list = np.asarray(y_list)
self.dydx_list = np.asarray(dydx_list)
self.n = len(x_list)
if lower_extrap:
self.coeffs = [[y_list[0], dydx_list[0], 0, 0]]
else:
self.coeffs = [[np.nan, np.nan, np.nan, np.nan]]
for i in range((self.n - 1)):
x0 = x_list[i]
y0 = y_list[i]
x1 = x_list[(i + 1)]
y1 = y_list[(i + 1)]
Span = (x1 - x0)
dydx0 = (dydx_list[i] * Span)
dydx1 = (dydx_list[(i + 1)] * Span)
temp = [y0, dydx0, (((3 * (y1 - y0)) - (2 * dydx0)) - dydx1), (((2 * (y0 - y1)) + dydx0) + dydx1)]
self.coeffs.append(temp)
if ((slope_limit is None) and (intercept_limit is None)):
slope_limit = dydx_list[(- 1)]
intercept_limit = (y_list[(- 1)] - (slope_limit * x_list[(- 1)]))
gap = (((slope_limit * x1) + intercept_limit) - y1)
slope = (slope_limit - dydx_list[(self.n - 1)])
if ((gap != 0) and (slope <= 0)):
temp = [intercept_limit, slope_limit, gap, (slope / gap)]
elif (slope > 0):
temp = [intercept_limit, slope_limit, 0, 0]
else:
temp = [intercept_limit, slope_limit, gap, 0]
self.coeffs.append(temp)
self.coeffs = np.array(self.coeffs)
|
def _evaluate(self, x):
'\n Returns the level of the interpolated function at each value in x. Only\n called internally by HARKinterpolator1D.__call__ (etc).\n '
if _isscalar(x):
pos = np.searchsorted(self.x_list, x)
if (pos == 0):
y = (self.coeffs[(0, 0)] + (self.coeffs[(0, 1)] * (x - self.x_list[0])))
elif (pos < self.n):
alpha = ((x - self.x_list[(pos - 1)]) / (self.x_list[pos] - self.x_list[(pos - 1)]))
y = (self.coeffs[(pos, 0)] + (alpha * (self.coeffs[(pos, 1)] + (alpha * (self.coeffs[(pos, 2)] + (alpha * self.coeffs[(pos, 3)]))))))
else:
alpha = (x - self.x_list[(self.n - 1)])
y = ((self.coeffs[(pos, 0)] + (x * self.coeffs[(pos, 1)])) - (self.coeffs[(pos, 2)] * np.exp((alpha * self.coeffs[(pos, 3)]))))
else:
m = len(x)
pos = np.searchsorted(self.x_list, x)
y = np.zeros(m)
if (y.size > 0):
out_bot = (pos == 0)
out_top = (pos == self.n)
in_bnds = np.logical_not(np.logical_or(out_bot, out_top))
i = pos[in_bnds]
coeffs_in = self.coeffs[i, :]
alpha = ((x[in_bnds] - self.x_list[(i - 1)]) / (self.x_list[i] - self.x_list[(i - 1)]))
y[in_bnds] = (coeffs_in[:, 0] + (alpha * (coeffs_in[:, 1] + (alpha * (coeffs_in[:, 2] + (alpha * coeffs_in[:, 3]))))))
y[out_bot] = (self.coeffs[(0, 0)] + (self.coeffs[(0, 1)] * (x[out_bot] - self.x_list[0])))
alpha = (x[out_top] - self.x_list[(self.n - 1)])
y[out_top] = ((self.coeffs[(self.n, 0)] + (x[out_top] * self.coeffs[(self.n, 1)])) - (self.coeffs[(self.n, 2)] * np.exp((alpha * self.coeffs[(self.n, 3)]))))
return y
| -5,248,127,314,382,000,000
|
Returns the level of the interpolated function at each value in x. Only
called internally by HARKinterpolator1D.__call__ (etc).
|
HARK/interpolation.py
|
_evaluate
|
cohenimhuji/HARK
|
python
|
def _evaluate(self, x):
'\n Returns the level of the interpolated function at each value in x. Only\n called internally by HARKinterpolator1D.__call__ (etc).\n '
if _isscalar(x):
pos = np.searchsorted(self.x_list, x)
if (pos == 0):
y = (self.coeffs[(0, 0)] + (self.coeffs[(0, 1)] * (x - self.x_list[0])))
elif (pos < self.n):
alpha = ((x - self.x_list[(pos - 1)]) / (self.x_list[pos] - self.x_list[(pos - 1)]))
y = (self.coeffs[(pos, 0)] + (alpha * (self.coeffs[(pos, 1)] + (alpha * (self.coeffs[(pos, 2)] + (alpha * self.coeffs[(pos, 3)]))))))
else:
alpha = (x - self.x_list[(self.n - 1)])
y = ((self.coeffs[(pos, 0)] + (x * self.coeffs[(pos, 1)])) - (self.coeffs[(pos, 2)] * np.exp((alpha * self.coeffs[(pos, 3)]))))
else:
m = len(x)
pos = np.searchsorted(self.x_list, x)
y = np.zeros(m)
if (y.size > 0):
out_bot = (pos == 0)
out_top = (pos == self.n)
in_bnds = np.logical_not(np.logical_or(out_bot, out_top))
i = pos[in_bnds]
coeffs_in = self.coeffs[i, :]
alpha = ((x[in_bnds] - self.x_list[(i - 1)]) / (self.x_list[i] - self.x_list[(i - 1)]))
y[in_bnds] = (coeffs_in[:, 0] + (alpha * (coeffs_in[:, 1] + (alpha * (coeffs_in[:, 2] + (alpha * coeffs_in[:, 3]))))))
y[out_bot] = (self.coeffs[(0, 0)] + (self.coeffs[(0, 1)] * (x[out_bot] - self.x_list[0])))
alpha = (x[out_top] - self.x_list[(self.n - 1)])
y[out_top] = ((self.coeffs[(self.n, 0)] + (x[out_top] * self.coeffs[(self.n, 1)])) - (self.coeffs[(self.n, 2)] * np.exp((alpha * self.coeffs[(self.n, 3)]))))
return y
|
def _der(self, x):
'\n Returns the first derivative of the interpolated function at each value\n in x. Only called internally by HARKinterpolator1D.derivative (etc).\n '
if _isscalar(x):
pos = np.searchsorted(self.x_list, x)
if (pos == 0):
dydx = self.coeffs[(0, 1)]
elif (pos < self.n):
alpha = ((x - self.x_list[(pos - 1)]) / (self.x_list[pos] - self.x_list[(pos - 1)]))
dydx = ((self.coeffs[(pos, 1)] + (alpha * ((2 * self.coeffs[(pos, 2)]) + ((alpha * 3) * self.coeffs[(pos, 3)])))) / (self.x_list[pos] - self.x_list[(pos - 1)]))
else:
alpha = (x - self.x_list[(self.n - 1)])
dydx = (self.coeffs[(pos, 1)] - ((self.coeffs[(pos, 2)] * self.coeffs[(pos, 3)]) * np.exp((alpha * self.coeffs[(pos, 3)]))))
else:
m = len(x)
pos = np.searchsorted(self.x_list, x)
dydx = np.zeros(m)
if (dydx.size > 0):
out_bot = (pos == 0)
out_top = (pos == self.n)
in_bnds = np.logical_not(np.logical_or(out_bot, out_top))
i = pos[in_bnds]
coeffs_in = self.coeffs[i, :]
alpha = ((x[in_bnds] - self.x_list[(i - 1)]) / (self.x_list[i] - self.x_list[(i - 1)]))
dydx[in_bnds] = ((coeffs_in[:, 1] + (alpha * ((2 * coeffs_in[:, 2]) + ((alpha * 3) * coeffs_in[:, 3])))) / (self.x_list[i] - self.x_list[(i - 1)]))
dydx[out_bot] = self.coeffs[(0, 1)]
alpha = (x[out_top] - self.x_list[(self.n - 1)])
dydx[out_top] = (self.coeffs[(self.n, 1)] - ((self.coeffs[(self.n, 2)] * self.coeffs[(self.n, 3)]) * np.exp((alpha * self.coeffs[(self.n, 3)]))))
return dydx
| -1,783,155,871,987,838,500
|
Returns the first derivative of the interpolated function at each value
in x. Only called internally by HARKinterpolator1D.derivative (etc).
|
HARK/interpolation.py
|
_der
|
cohenimhuji/HARK
|
python
|
def _der(self, x):
'\n Returns the first derivative of the interpolated function at each value\n in x. Only called internally by HARKinterpolator1D.derivative (etc).\n '
if _isscalar(x):
pos = np.searchsorted(self.x_list, x)
if (pos == 0):
dydx = self.coeffs[(0, 1)]
elif (pos < self.n):
alpha = ((x - self.x_list[(pos - 1)]) / (self.x_list[pos] - self.x_list[(pos - 1)]))
dydx = ((self.coeffs[(pos, 1)] + (alpha * ((2 * self.coeffs[(pos, 2)]) + ((alpha * 3) * self.coeffs[(pos, 3)])))) / (self.x_list[pos] - self.x_list[(pos - 1)]))
else:
alpha = (x - self.x_list[(self.n - 1)])
dydx = (self.coeffs[(pos, 1)] - ((self.coeffs[(pos, 2)] * self.coeffs[(pos, 3)]) * np.exp((alpha * self.coeffs[(pos, 3)]))))
else:
m = len(x)
pos = np.searchsorted(self.x_list, x)
dydx = np.zeros(m)
if (dydx.size > 0):
out_bot = (pos == 0)
out_top = (pos == self.n)
in_bnds = np.logical_not(np.logical_or(out_bot, out_top))
i = pos[in_bnds]
coeffs_in = self.coeffs[i, :]
alpha = ((x[in_bnds] - self.x_list[(i - 1)]) / (self.x_list[i] - self.x_list[(i - 1)]))
dydx[in_bnds] = ((coeffs_in[:, 1] + (alpha * ((2 * coeffs_in[:, 2]) + ((alpha * 3) * coeffs_in[:, 3])))) / (self.x_list[i] - self.x_list[(i - 1)]))
dydx[out_bot] = self.coeffs[(0, 1)]
alpha = (x[out_top] - self.x_list[(self.n - 1)])
dydx[out_top] = (self.coeffs[(self.n, 1)] - ((self.coeffs[(self.n, 2)] * self.coeffs[(self.n, 3)]) * np.exp((alpha * self.coeffs[(self.n, 3)]))))
return dydx
|
def _evalAndDer(self, x):
'\n Returns the level and first derivative of the function at each value in\n x. Only called internally by HARKinterpolator1D.eval_and_der (etc).\n '
if _isscalar(x):
pos = np.searchsorted(self.x_list, x)
if (pos == 0):
y = (self.coeffs[(0, 0)] + (self.coeffs[(0, 1)] * (x - self.x_list[0])))
dydx = self.coeffs[(0, 1)]
elif (pos < self.n):
alpha = ((x - self.x_list[(pos - 1)]) / (self.x_list[pos] - self.x_list[(pos - 1)]))
y = (self.coeffs[(pos, 0)] + (alpha * (self.coeffs[(pos, 1)] + (alpha * (self.coeffs[(pos, 2)] + (alpha * self.coeffs[(pos, 3)]))))))
dydx = ((self.coeffs[(pos, 1)] + (alpha * ((2 * self.coeffs[(pos, 2)]) + ((alpha * 3) * self.coeffs[(pos, 3)])))) / (self.x_list[pos] - self.x_list[(pos - 1)]))
else:
alpha = (x - self.x_list[(self.n - 1)])
y = ((self.coeffs[(pos, 0)] + (x * self.coeffs[(pos, 1)])) - (self.coeffs[(pos, 2)] * np.exp((alpha * self.coeffs[(pos, 3)]))))
dydx = (self.coeffs[(pos, 1)] - ((self.coeffs[(pos, 2)] * self.coeffs[(pos, 3)]) * np.exp((alpha * self.coeffs[(pos, 3)]))))
else:
m = len(x)
pos = np.searchsorted(self.x_list, x)
y = np.zeros(m)
dydx = np.zeros(m)
if (y.size > 0):
out_bot = (pos == 0)
out_top = (pos == self.n)
in_bnds = np.logical_not(np.logical_or(out_bot, out_top))
i = pos[in_bnds]
coeffs_in = self.coeffs[i, :]
alpha = ((x[in_bnds] - self.x_list[(i - 1)]) / (self.x_list[i] - self.x_list[(i - 1)]))
y[in_bnds] = (coeffs_in[:, 0] + (alpha * (coeffs_in[:, 1] + (alpha * (coeffs_in[:, 2] + (alpha * coeffs_in[:, 3]))))))
dydx[in_bnds] = ((coeffs_in[:, 1] + (alpha * ((2 * coeffs_in[:, 2]) + ((alpha * 3) * coeffs_in[:, 3])))) / (self.x_list[i] - self.x_list[(i - 1)]))
y[out_bot] = (self.coeffs[(0, 0)] + (self.coeffs[(0, 1)] * (x[out_bot] - self.x_list[0])))
dydx[out_bot] = self.coeffs[(0, 1)]
alpha = (x[out_top] - self.x_list[(self.n - 1)])
y[out_top] = ((self.coeffs[(self.n, 0)] + (x[out_top] * self.coeffs[(self.n, 1)])) - (self.coeffs[(self.n, 2)] * np.exp((alpha * self.coeffs[(self.n, 3)]))))
dydx[out_top] = (self.coeffs[(self.n, 1)] - ((self.coeffs[(self.n, 2)] * self.coeffs[(self.n, 3)]) * np.exp((alpha * self.coeffs[(self.n, 3)]))))
return (y, dydx)
| -2,541,531,060,344,856,000
|
Returns the level and first derivative of the function at each value in
x. Only called internally by HARKinterpolator1D.eval_and_der (etc).
|
HARK/interpolation.py
|
_evalAndDer
|
cohenimhuji/HARK
|
python
|
def _evalAndDer(self, x):
'\n Returns the level and first derivative of the function at each value in\n x. Only called internally by HARKinterpolator1D.eval_and_der (etc).\n '
if _isscalar(x):
pos = np.searchsorted(self.x_list, x)
if (pos == 0):
y = (self.coeffs[(0, 0)] + (self.coeffs[(0, 1)] * (x - self.x_list[0])))
dydx = self.coeffs[(0, 1)]
elif (pos < self.n):
alpha = ((x - self.x_list[(pos - 1)]) / (self.x_list[pos] - self.x_list[(pos - 1)]))
y = (self.coeffs[(pos, 0)] + (alpha * (self.coeffs[(pos, 1)] + (alpha * (self.coeffs[(pos, 2)] + (alpha * self.coeffs[(pos, 3)]))))))
dydx = ((self.coeffs[(pos, 1)] + (alpha * ((2 * self.coeffs[(pos, 2)]) + ((alpha * 3) * self.coeffs[(pos, 3)])))) / (self.x_list[pos] - self.x_list[(pos - 1)]))
else:
alpha = (x - self.x_list[(self.n - 1)])
y = ((self.coeffs[(pos, 0)] + (x * self.coeffs[(pos, 1)])) - (self.coeffs[(pos, 2)] * np.exp((alpha * self.coeffs[(pos, 3)]))))
dydx = (self.coeffs[(pos, 1)] - ((self.coeffs[(pos, 2)] * self.coeffs[(pos, 3)]) * np.exp((alpha * self.coeffs[(pos, 3)]))))
else:
m = len(x)
pos = np.searchsorted(self.x_list, x)
y = np.zeros(m)
dydx = np.zeros(m)
if (y.size > 0):
out_bot = (pos == 0)
out_top = (pos == self.n)
in_bnds = np.logical_not(np.logical_or(out_bot, out_top))
i = pos[in_bnds]
coeffs_in = self.coeffs[i, :]
alpha = ((x[in_bnds] - self.x_list[(i - 1)]) / (self.x_list[i] - self.x_list[(i - 1)]))
y[in_bnds] = (coeffs_in[:, 0] + (alpha * (coeffs_in[:, 1] + (alpha * (coeffs_in[:, 2] + (alpha * coeffs_in[:, 3]))))))
dydx[in_bnds] = ((coeffs_in[:, 1] + (alpha * ((2 * coeffs_in[:, 2]) + ((alpha * 3) * coeffs_in[:, 3])))) / (self.x_list[i] - self.x_list[(i - 1)]))
y[out_bot] = (self.coeffs[(0, 0)] + (self.coeffs[(0, 1)] * (x[out_bot] - self.x_list[0])))
dydx[out_bot] = self.coeffs[(0, 1)]
alpha = (x[out_top] - self.x_list[(self.n - 1)])
y[out_top] = ((self.coeffs[(self.n, 0)] + (x[out_top] * self.coeffs[(self.n, 1)])) - (self.coeffs[(self.n, 2)] * np.exp((alpha * self.coeffs[(self.n, 3)]))))
dydx[out_top] = (self.coeffs[(self.n, 1)] - ((self.coeffs[(self.n, 2)] * self.coeffs[(self.n, 3)]) * np.exp((alpha * self.coeffs[(self.n, 3)]))))
return (y, dydx)
|
def __init__(self, f_values, x_list, y_list, xSearchFunc=None, ySearchFunc=None):
'\n Constructor to make a new bilinear interpolation.\n\n Parameters\n ----------\n f_values : numpy.array\n An array of size (x_n,y_n) such that f_values[i,j] = f(x_list[i],y_list[j])\n x_list : numpy.array\n An array of x values, with length designated x_n.\n y_list : numpy.array\n An array of y values, with length designated y_n.\n xSearchFunc : function\n An optional function that returns the reference location for x values:\n indices = xSearchFunc(x_list,x). Default is np.searchsorted\n ySearchFunc : function\n An optional function that returns the reference location for y values:\n indices = ySearchFunc(y_list,y). Default is np.searchsorted\n\n Returns\n -------\n new instance of BilinearInterp\n '
self.f_values = f_values
self.x_list = x_list
self.y_list = y_list
self.x_n = x_list.size
self.y_n = y_list.size
if (xSearchFunc is None):
xSearchFunc = np.searchsorted
if (ySearchFunc is None):
ySearchFunc = np.searchsorted
self.xSearchFunc = xSearchFunc
self.ySearchFunc = ySearchFunc
| -8,735,612,400,203,041,000
|
Constructor to make a new bilinear interpolation.
Parameters
----------
f_values : numpy.array
An array of size (x_n,y_n) such that f_values[i,j] = f(x_list[i],y_list[j])
x_list : numpy.array
An array of x values, with length designated x_n.
y_list : numpy.array
An array of y values, with length designated y_n.
xSearchFunc : function
An optional function that returns the reference location for x values:
indices = xSearchFunc(x_list,x). Default is np.searchsorted
ySearchFunc : function
An optional function that returns the reference location for y values:
indices = ySearchFunc(y_list,y). Default is np.searchsorted
Returns
-------
new instance of BilinearInterp
|
HARK/interpolation.py
|
__init__
|
cohenimhuji/HARK
|
python
|
def __init__(self, f_values, x_list, y_list, xSearchFunc=None, ySearchFunc=None):
'\n Constructor to make a new bilinear interpolation.\n\n Parameters\n ----------\n f_values : numpy.array\n An array of size (x_n,y_n) such that f_values[i,j] = f(x_list[i],y_list[j])\n x_list : numpy.array\n An array of x values, with length designated x_n.\n y_list : numpy.array\n An array of y values, with length designated y_n.\n xSearchFunc : function\n An optional function that returns the reference location for x values:\n indices = xSearchFunc(x_list,x). Default is np.searchsorted\n ySearchFunc : function\n An optional function that returns the reference location for y values:\n indices = ySearchFunc(y_list,y). Default is np.searchsorted\n\n Returns\n -------\n new instance of BilinearInterp\n '
self.f_values = f_values
self.x_list = x_list
self.y_list = y_list
self.x_n = x_list.size
self.y_n = y_list.size
if (xSearchFunc is None):
xSearchFunc = np.searchsorted
if (ySearchFunc is None):
ySearchFunc = np.searchsorted
self.xSearchFunc = xSearchFunc
self.ySearchFunc = ySearchFunc
|
def _evaluate(self, x, y):
'\n Returns the level of the interpolated function at each value in x,y.\n Only called internally by HARKinterpolator2D.__call__ (etc).\n '
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
else:
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
alpha = ((x - self.x_list[(x_pos - 1)]) / (self.x_list[x_pos] - self.x_list[(x_pos - 1)]))
beta = ((y - self.y_list[(y_pos - 1)]) / (self.y_list[y_pos] - self.y_list[(y_pos - 1)]))
f = ((((((1 - alpha) * (1 - beta)) * self.f_values[((x_pos - 1), (y_pos - 1))]) + (((1 - alpha) * beta) * self.f_values[((x_pos - 1), y_pos)])) + ((alpha * (1 - beta)) * self.f_values[(x_pos, (y_pos - 1))])) + ((alpha * beta) * self.f_values[(x_pos, y_pos)]))
return f
| 6,744,523,181,384,585,000
|
Returns the level of the interpolated function at each value in x,y.
Only called internally by HARKinterpolator2D.__call__ (etc).
|
HARK/interpolation.py
|
_evaluate
|
cohenimhuji/HARK
|
python
|
def _evaluate(self, x, y):
'\n Returns the level of the interpolated function at each value in x,y.\n Only called internally by HARKinterpolator2D.__call__ (etc).\n '
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
else:
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
alpha = ((x - self.x_list[(x_pos - 1)]) / (self.x_list[x_pos] - self.x_list[(x_pos - 1)]))
beta = ((y - self.y_list[(y_pos - 1)]) / (self.y_list[y_pos] - self.y_list[(y_pos - 1)]))
f = ((((((1 - alpha) * (1 - beta)) * self.f_values[((x_pos - 1), (y_pos - 1))]) + (((1 - alpha) * beta) * self.f_values[((x_pos - 1), y_pos)])) + ((alpha * (1 - beta)) * self.f_values[(x_pos, (y_pos - 1))])) + ((alpha * beta) * self.f_values[(x_pos, y_pos)]))
return f
|
def _derX(self, x, y):
'\n Returns the derivative with respect to x of the interpolated function\n at each value in x,y. Only called internally by HARKinterpolator2D.derivativeX.\n '
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
else:
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
beta = ((y - self.y_list[(y_pos - 1)]) / (self.y_list[y_pos] - self.y_list[(y_pos - 1)]))
dfdx = (((((1 - beta) * self.f_values[(x_pos, (y_pos - 1))]) + (beta * self.f_values[(x_pos, y_pos)])) - (((1 - beta) * self.f_values[((x_pos - 1), (y_pos - 1))]) + (beta * self.f_values[((x_pos - 1), y_pos)]))) / (self.x_list[x_pos] - self.x_list[(x_pos - 1)]))
return dfdx
| 3,078,626,608,488,745,000
|
Returns the derivative with respect to x of the interpolated function
at each value in x,y. Only called internally by HARKinterpolator2D.derivativeX.
|
HARK/interpolation.py
|
_derX
|
cohenimhuji/HARK
|
python
|
def _derX(self, x, y):
'\n Returns the derivative with respect to x of the interpolated function\n at each value in x,y. Only called internally by HARKinterpolator2D.derivativeX.\n '
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
else:
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
beta = ((y - self.y_list[(y_pos - 1)]) / (self.y_list[y_pos] - self.y_list[(y_pos - 1)]))
dfdx = (((((1 - beta) * self.f_values[(x_pos, (y_pos - 1))]) + (beta * self.f_values[(x_pos, y_pos)])) - (((1 - beta) * self.f_values[((x_pos - 1), (y_pos - 1))]) + (beta * self.f_values[((x_pos - 1), y_pos)]))) / (self.x_list[x_pos] - self.x_list[(x_pos - 1)]))
return dfdx
|
def _derY(self, x, y):
'\n Returns the derivative with respect to y of the interpolated function\n at each value in x,y. Only called internally by HARKinterpolator2D.derivativeY.\n '
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
else:
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
alpha = ((x - self.x_list[(x_pos - 1)]) / (self.x_list[x_pos] - self.x_list[(x_pos - 1)]))
dfdy = (((((1 - alpha) * self.f_values[((x_pos - 1), y_pos)]) + (alpha * self.f_values[(x_pos, y_pos)])) - (((1 - alpha) * self.f_values[((x_pos - 1), (y_pos - 1))]) + (alpha * self.f_values[(x_pos, (y_pos - 1))]))) / (self.y_list[y_pos] - self.y_list[(y_pos - 1)]))
return dfdy
| -4,398,777,141,352,737,300
|
Returns the derivative with respect to y of the interpolated function
at each value in x,y. Only called internally by HARKinterpolator2D.derivativeY.
|
HARK/interpolation.py
|
_derY
|
cohenimhuji/HARK
|
python
|
def _derY(self, x, y):
'\n Returns the derivative with respect to y of the interpolated function\n at each value in x,y. Only called internally by HARKinterpolator2D.derivativeY.\n '
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
else:
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
alpha = ((x - self.x_list[(x_pos - 1)]) / (self.x_list[x_pos] - self.x_list[(x_pos - 1)]))
dfdy = (((((1 - alpha) * self.f_values[((x_pos - 1), y_pos)]) + (alpha * self.f_values[(x_pos, y_pos)])) - (((1 - alpha) * self.f_values[((x_pos - 1), (y_pos - 1))]) + (alpha * self.f_values[(x_pos, (y_pos - 1))]))) / (self.y_list[y_pos] - self.y_list[(y_pos - 1)]))
return dfdy
|
def __init__(self, f_values, x_list, y_list, z_list, xSearchFunc=None, ySearchFunc=None, zSearchFunc=None):
'\n Constructor to make a new trilinear interpolation.\n\n Parameters\n ----------\n f_values : numpy.array\n An array of size (x_n,y_n,z_n) such that f_values[i,j,k] =\n f(x_list[i],y_list[j],z_list[k])\n x_list : numpy.array\n An array of x values, with length designated x_n.\n y_list : numpy.array\n An array of y values, with length designated y_n.\n z_list : numpy.array\n An array of z values, with length designated z_n.\n xSearchFunc : function\n An optional function that returns the reference location for x values:\n indices = xSearchFunc(x_list,x). Default is np.searchsorted\n ySearchFunc : function\n An optional function that returns the reference location for y values:\n indices = ySearchFunc(y_list,y). Default is np.searchsorted\n zSearchFunc : function\n An optional function that returns the reference location for z values:\n indices = zSearchFunc(z_list,z). Default is np.searchsorted\n\n Returns\n -------\n new instance of TrilinearInterp\n '
self.f_values = f_values
self.x_list = x_list
self.y_list = y_list
self.z_list = z_list
self.x_n = x_list.size
self.y_n = y_list.size
self.z_n = z_list.size
if (xSearchFunc is None):
xSearchFunc = np.searchsorted
if (ySearchFunc is None):
ySearchFunc = np.searchsorted
if (zSearchFunc is None):
zSearchFunc = np.searchsorted
self.xSearchFunc = xSearchFunc
self.ySearchFunc = ySearchFunc
self.zSearchFunc = zSearchFunc
| 7,669,639,221,495,992,000
|
Constructor to make a new trilinear interpolation.
Parameters
----------
f_values : numpy.array
An array of size (x_n,y_n,z_n) such that f_values[i,j,k] =
f(x_list[i],y_list[j],z_list[k])
x_list : numpy.array
An array of x values, with length designated x_n.
y_list : numpy.array
An array of y values, with length designated y_n.
z_list : numpy.array
An array of z values, with length designated z_n.
xSearchFunc : function
An optional function that returns the reference location for x values:
indices = xSearchFunc(x_list,x). Default is np.searchsorted
ySearchFunc : function
An optional function that returns the reference location for y values:
indices = ySearchFunc(y_list,y). Default is np.searchsorted
zSearchFunc : function
An optional function that returns the reference location for z values:
indices = zSearchFunc(z_list,z). Default is np.searchsorted
Returns
-------
new instance of TrilinearInterp
|
HARK/interpolation.py
|
__init__
|
cohenimhuji/HARK
|
python
|
def __init__(self, f_values, x_list, y_list, z_list, xSearchFunc=None, ySearchFunc=None, zSearchFunc=None):
'\n Constructor to make a new trilinear interpolation.\n\n Parameters\n ----------\n f_values : numpy.array\n An array of size (x_n,y_n,z_n) such that f_values[i,j,k] =\n f(x_list[i],y_list[j],z_list[k])\n x_list : numpy.array\n An array of x values, with length designated x_n.\n y_list : numpy.array\n An array of y values, with length designated y_n.\n z_list : numpy.array\n An array of z values, with length designated z_n.\n xSearchFunc : function\n An optional function that returns the reference location for x values:\n indices = xSearchFunc(x_list,x). Default is np.searchsorted\n ySearchFunc : function\n An optional function that returns the reference location for y values:\n indices = ySearchFunc(y_list,y). Default is np.searchsorted\n zSearchFunc : function\n An optional function that returns the reference location for z values:\n indices = zSearchFunc(z_list,z). Default is np.searchsorted\n\n Returns\n -------\n new instance of TrilinearInterp\n '
self.f_values = f_values
self.x_list = x_list
self.y_list = y_list
self.z_list = z_list
self.x_n = x_list.size
self.y_n = y_list.size
self.z_n = z_list.size
if (xSearchFunc is None):
xSearchFunc = np.searchsorted
if (ySearchFunc is None):
ySearchFunc = np.searchsorted
if (zSearchFunc is None):
zSearchFunc = np.searchsorted
self.xSearchFunc = xSearchFunc
self.ySearchFunc = ySearchFunc
self.zSearchFunc = zSearchFunc
|
def _evaluate(self, x, y, z):
'\n Returns the level of the interpolated function at each value in x,y,z.\n Only called internally by HARKinterpolator3D.__call__ (etc).\n '
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
alpha = ((x - self.x_list[(x_pos - 1)]) / (self.x_list[x_pos] - self.x_list[(x_pos - 1)]))
beta = ((y - self.y_list[(y_pos - 1)]) / (self.y_list[y_pos] - self.y_list[(y_pos - 1)]))
gamma = ((z - self.z_list[(z_pos - 1)]) / (self.z_list[z_pos] - self.z_list[(z_pos - 1)]))
f = (((((((((((1 - alpha) * (1 - beta)) * (1 - gamma)) * self.f_values[((x_pos - 1), (y_pos - 1), (z_pos - 1))]) + ((((1 - alpha) * (1 - beta)) * gamma) * self.f_values[((x_pos - 1), (y_pos - 1), z_pos)])) + ((((1 - alpha) * beta) * (1 - gamma)) * self.f_values[((x_pos - 1), y_pos, (z_pos - 1))])) + ((((1 - alpha) * beta) * gamma) * self.f_values[((x_pos - 1), y_pos, z_pos)])) + (((alpha * (1 - beta)) * (1 - gamma)) * self.f_values[(x_pos, (y_pos - 1), (z_pos - 1))])) + (((alpha * (1 - beta)) * gamma) * self.f_values[(x_pos, (y_pos - 1), z_pos)])) + (((alpha * beta) * (1 - gamma)) * self.f_values[(x_pos, y_pos, (z_pos - 1))])) + (((alpha * beta) * gamma) * self.f_values[(x_pos, y_pos, z_pos)]))
return f
| -3,764,285,584,818,235,400
|
Returns the level of the interpolated function at each value in x,y,z.
Only called internally by HARKinterpolator3D.__call__ (etc).
|
HARK/interpolation.py
|
_evaluate
|
cohenimhuji/HARK
|
python
|
def _evaluate(self, x, y, z):
'\n Returns the level of the interpolated function at each value in x,y,z.\n Only called internally by HARKinterpolator3D.__call__ (etc).\n '
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
alpha = ((x - self.x_list[(x_pos - 1)]) / (self.x_list[x_pos] - self.x_list[(x_pos - 1)]))
beta = ((y - self.y_list[(y_pos - 1)]) / (self.y_list[y_pos] - self.y_list[(y_pos - 1)]))
gamma = ((z - self.z_list[(z_pos - 1)]) / (self.z_list[z_pos] - self.z_list[(z_pos - 1)]))
f = (((((((((((1 - alpha) * (1 - beta)) * (1 - gamma)) * self.f_values[((x_pos - 1), (y_pos - 1), (z_pos - 1))]) + ((((1 - alpha) * (1 - beta)) * gamma) * self.f_values[((x_pos - 1), (y_pos - 1), z_pos)])) + ((((1 - alpha) * beta) * (1 - gamma)) * self.f_values[((x_pos - 1), y_pos, (z_pos - 1))])) + ((((1 - alpha) * beta) * gamma) * self.f_values[((x_pos - 1), y_pos, z_pos)])) + (((alpha * (1 - beta)) * (1 - gamma)) * self.f_values[(x_pos, (y_pos - 1), (z_pos - 1))])) + (((alpha * (1 - beta)) * gamma) * self.f_values[(x_pos, (y_pos - 1), z_pos)])) + (((alpha * beta) * (1 - gamma)) * self.f_values[(x_pos, y_pos, (z_pos - 1))])) + (((alpha * beta) * gamma) * self.f_values[(x_pos, y_pos, z_pos)]))
return f
|
def _derX(self, x, y, z):
'\n Returns the derivative with respect to x of the interpolated function\n at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeX.\n '
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
beta = ((y - self.y_list[(y_pos - 1)]) / (self.y_list[y_pos] - self.y_list[(y_pos - 1)]))
gamma = ((z - self.z_list[(z_pos - 1)]) / (self.z_list[z_pos] - self.z_list[(z_pos - 1)]))
dfdx = ((((((((1 - beta) * (1 - gamma)) * self.f_values[(x_pos, (y_pos - 1), (z_pos - 1))]) + (((1 - beta) * gamma) * self.f_values[(x_pos, (y_pos - 1), z_pos)])) + ((beta * (1 - gamma)) * self.f_values[(x_pos, y_pos, (z_pos - 1))])) + ((beta * gamma) * self.f_values[(x_pos, y_pos, z_pos)])) - ((((((1 - beta) * (1 - gamma)) * self.f_values[((x_pos - 1), (y_pos - 1), (z_pos - 1))]) + (((1 - beta) * gamma) * self.f_values[((x_pos - 1), (y_pos - 1), z_pos)])) + ((beta * (1 - gamma)) * self.f_values[((x_pos - 1), y_pos, (z_pos - 1))])) + ((beta * gamma) * self.f_values[((x_pos - 1), y_pos, z_pos)]))) / (self.x_list[x_pos] - self.x_list[(x_pos - 1)]))
return dfdx
| 5,696,914,352,870,816,000
|
Returns the derivative with respect to x of the interpolated function
at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeX.
|
HARK/interpolation.py
|
_derX
|
cohenimhuji/HARK
|
python
|
def _derX(self, x, y, z):
'\n Returns the derivative with respect to x of the interpolated function\n at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeX.\n '
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
beta = ((y - self.y_list[(y_pos - 1)]) / (self.y_list[y_pos] - self.y_list[(y_pos - 1)]))
gamma = ((z - self.z_list[(z_pos - 1)]) / (self.z_list[z_pos] - self.z_list[(z_pos - 1)]))
dfdx = ((((((((1 - beta) * (1 - gamma)) * self.f_values[(x_pos, (y_pos - 1), (z_pos - 1))]) + (((1 - beta) * gamma) * self.f_values[(x_pos, (y_pos - 1), z_pos)])) + ((beta * (1 - gamma)) * self.f_values[(x_pos, y_pos, (z_pos - 1))])) + ((beta * gamma) * self.f_values[(x_pos, y_pos, z_pos)])) - ((((((1 - beta) * (1 - gamma)) * self.f_values[((x_pos - 1), (y_pos - 1), (z_pos - 1))]) + (((1 - beta) * gamma) * self.f_values[((x_pos - 1), (y_pos - 1), z_pos)])) + ((beta * (1 - gamma)) * self.f_values[((x_pos - 1), y_pos, (z_pos - 1))])) + ((beta * gamma) * self.f_values[((x_pos - 1), y_pos, z_pos)]))) / (self.x_list[x_pos] - self.x_list[(x_pos - 1)]))
return dfdx
|
def _derY(self, x, y, z):
'\n Returns the derivative with respect to y of the interpolated function\n at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeY.\n '
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
alpha = ((x - self.x_list[(x_pos - 1)]) / (self.x_list[x_pos] - self.x_list[(x_pos - 1)]))
gamma = ((z - self.z_list[(z_pos - 1)]) / (self.z_list[z_pos] - self.z_list[(z_pos - 1)]))
dfdy = ((((((((1 - alpha) * (1 - gamma)) * self.f_values[((x_pos - 1), y_pos, (z_pos - 1))]) + (((1 - alpha) * gamma) * self.f_values[((x_pos - 1), y_pos, z_pos)])) + ((alpha * (1 - gamma)) * self.f_values[(x_pos, y_pos, (z_pos - 1))])) + ((alpha * gamma) * self.f_values[(x_pos, y_pos, z_pos)])) - ((((((1 - alpha) * (1 - gamma)) * self.f_values[((x_pos - 1), (y_pos - 1), (z_pos - 1))]) + (((1 - alpha) * gamma) * self.f_values[((x_pos - 1), (y_pos - 1), z_pos)])) + ((alpha * (1 - gamma)) * self.f_values[(x_pos, (y_pos - 1), (z_pos - 1))])) + ((alpha * gamma) * self.f_values[(x_pos, (y_pos - 1), z_pos)]))) / (self.y_list[y_pos] - self.y_list[(y_pos - 1)]))
return dfdy
| -5,693,452,147,213,393,000
|
Returns the derivative with respect to y of the interpolated function
at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeY.
|
HARK/interpolation.py
|
_derY
|
cohenimhuji/HARK
|
python
|
def _derY(self, x, y, z):
'\n Returns the derivative with respect to y of the interpolated function\n at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeY.\n '
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
alpha = ((x - self.x_list[(x_pos - 1)]) / (self.x_list[x_pos] - self.x_list[(x_pos - 1)]))
gamma = ((z - self.z_list[(z_pos - 1)]) / (self.z_list[z_pos] - self.z_list[(z_pos - 1)]))
dfdy = ((((((((1 - alpha) * (1 - gamma)) * self.f_values[((x_pos - 1), y_pos, (z_pos - 1))]) + (((1 - alpha) * gamma) * self.f_values[((x_pos - 1), y_pos, z_pos)])) + ((alpha * (1 - gamma)) * self.f_values[(x_pos, y_pos, (z_pos - 1))])) + ((alpha * gamma) * self.f_values[(x_pos, y_pos, z_pos)])) - ((((((1 - alpha) * (1 - gamma)) * self.f_values[((x_pos - 1), (y_pos - 1), (z_pos - 1))]) + (((1 - alpha) * gamma) * self.f_values[((x_pos - 1), (y_pos - 1), z_pos)])) + ((alpha * (1 - gamma)) * self.f_values[(x_pos, (y_pos - 1), (z_pos - 1))])) + ((alpha * gamma) * self.f_values[(x_pos, (y_pos - 1), z_pos)]))) / (self.y_list[y_pos] - self.y_list[(y_pos - 1)]))
return dfdy
|
def _derZ(self, x, y, z):
'\n Returns the derivative with respect to z of the interpolated function\n at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeZ.\n '
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
alpha = ((x - self.x_list[(x_pos - 1)]) / (self.x_list[x_pos] - self.x_list[(x_pos - 1)]))
beta = ((y - self.y_list[(y_pos - 1)]) / (self.y_list[y_pos] - self.y_list[(y_pos - 1)]))
dfdz = ((((((((1 - alpha) * (1 - beta)) * self.f_values[((x_pos - 1), (y_pos - 1), z_pos)]) + (((1 - alpha) * beta) * self.f_values[((x_pos - 1), y_pos, z_pos)])) + ((alpha * (1 - beta)) * self.f_values[(x_pos, (y_pos - 1), z_pos)])) + ((alpha * beta) * self.f_values[(x_pos, y_pos, z_pos)])) - ((((((1 - alpha) * (1 - beta)) * self.f_values[((x_pos - 1), (y_pos - 1), (z_pos - 1))]) + (((1 - alpha) * beta) * self.f_values[((x_pos - 1), y_pos, (z_pos - 1))])) + ((alpha * (1 - beta)) * self.f_values[(x_pos, (y_pos - 1), (z_pos - 1))])) + ((alpha * beta) * self.f_values[(x_pos, y_pos, (z_pos - 1))]))) / (self.z_list[z_pos] - self.z_list[(z_pos - 1)]))
return dfdz
| -3,054,681,140,601,009,000
|
Returns the derivative with respect to z of the interpolated function
at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeZ.
|
HARK/interpolation.py
|
_derZ
|
cohenimhuji/HARK
|
python
|
def _derZ(self, x, y, z):
'\n Returns the derivative with respect to z of the interpolated function\n at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeZ.\n '
if _isscalar(x):
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
alpha = ((x - self.x_list[(x_pos - 1)]) / (self.x_list[x_pos] - self.x_list[(x_pos - 1)]))
beta = ((y - self.y_list[(y_pos - 1)]) / (self.y_list[y_pos] - self.y_list[(y_pos - 1)]))
dfdz = ((((((((1 - alpha) * (1 - beta)) * self.f_values[((x_pos - 1), (y_pos - 1), z_pos)]) + (((1 - alpha) * beta) * self.f_values[((x_pos - 1), y_pos, z_pos)])) + ((alpha * (1 - beta)) * self.f_values[(x_pos, (y_pos - 1), z_pos)])) + ((alpha * beta) * self.f_values[(x_pos, y_pos, z_pos)])) - ((((((1 - alpha) * (1 - beta)) * self.f_values[((x_pos - 1), (y_pos - 1), (z_pos - 1))]) + (((1 - alpha) * beta) * self.f_values[((x_pos - 1), y_pos, (z_pos - 1))])) + ((alpha * (1 - beta)) * self.f_values[(x_pos, (y_pos - 1), (z_pos - 1))])) + ((alpha * beta) * self.f_values[(x_pos, y_pos, (z_pos - 1))]))) / (self.z_list[z_pos] - self.z_list[(z_pos - 1)]))
return dfdz
|
def __init__(self, f_values, w_list, x_list, y_list, z_list, wSearchFunc=None, xSearchFunc=None, ySearchFunc=None, zSearchFunc=None):
'\n Constructor to make a new quadlinear interpolation.\n\n Parameters\n ----------\n f_values : numpy.array\n An array of size (w_n,x_n,y_n,z_n) such that f_values[i,j,k,l] =\n f(w_list[i],x_list[j],y_list[k],z_list[l])\n w_list : numpy.array\n An array of x values, with length designated w_n.\n x_list : numpy.array\n An array of x values, with length designated x_n.\n y_list : numpy.array\n An array of y values, with length designated y_n.\n z_list : numpy.array\n An array of z values, with length designated z_n.\n wSearchFunc : function\n An optional function that returns the reference location for w values:\n indices = wSearchFunc(w_list,w). Default is np.searchsorted\n xSearchFunc : function\n An optional function that returns the reference location for x values:\n indices = xSearchFunc(x_list,x). Default is np.searchsorted\n ySearchFunc : function\n An optional function that returns the reference location for y values:\n indices = ySearchFunc(y_list,y). Default is np.searchsorted\n zSearchFunc : function\n An optional function that returns the reference location for z values:\n indices = zSearchFunc(z_list,z). Default is np.searchsorted\n\n Returns\n -------\n new instance of QuadlinearInterp\n '
self.f_values = f_values
self.w_list = w_list
self.x_list = x_list
self.y_list = y_list
self.z_list = z_list
self.w_n = w_list.size
self.x_n = x_list.size
self.y_n = y_list.size
self.z_n = z_list.size
if (wSearchFunc is None):
wSearchFunc = np.searchsorted
if (xSearchFunc is None):
xSearchFunc = np.searchsorted
if (ySearchFunc is None):
ySearchFunc = np.searchsorted
if (zSearchFunc is None):
zSearchFunc = np.searchsorted
self.wSearchFunc = wSearchFunc
self.xSearchFunc = xSearchFunc
self.ySearchFunc = ySearchFunc
self.zSearchFunc = zSearchFunc
| 6,327,270,494,203,699,000
|
Constructor to make a new quadlinear interpolation.
Parameters
----------
f_values : numpy.array
An array of size (w_n,x_n,y_n,z_n) such that f_values[i,j,k,l] =
f(w_list[i],x_list[j],y_list[k],z_list[l])
w_list : numpy.array
An array of x values, with length designated w_n.
x_list : numpy.array
An array of x values, with length designated x_n.
y_list : numpy.array
An array of y values, with length designated y_n.
z_list : numpy.array
An array of z values, with length designated z_n.
wSearchFunc : function
An optional function that returns the reference location for w values:
indices = wSearchFunc(w_list,w). Default is np.searchsorted
xSearchFunc : function
An optional function that returns the reference location for x values:
indices = xSearchFunc(x_list,x). Default is np.searchsorted
ySearchFunc : function
An optional function that returns the reference location for y values:
indices = ySearchFunc(y_list,y). Default is np.searchsorted
zSearchFunc : function
An optional function that returns the reference location for z values:
indices = zSearchFunc(z_list,z). Default is np.searchsorted
Returns
-------
new instance of QuadlinearInterp
|
HARK/interpolation.py
|
__init__
|
cohenimhuji/HARK
|
python
|
def __init__(self, f_values, w_list, x_list, y_list, z_list, wSearchFunc=None, xSearchFunc=None, ySearchFunc=None, zSearchFunc=None):
'\n Constructor to make a new quadlinear interpolation.\n\n Parameters\n ----------\n f_values : numpy.array\n An array of size (w_n,x_n,y_n,z_n) such that f_values[i,j,k,l] =\n f(w_list[i],x_list[j],y_list[k],z_list[l])\n w_list : numpy.array\n An array of x values, with length designated w_n.\n x_list : numpy.array\n An array of x values, with length designated x_n.\n y_list : numpy.array\n An array of y values, with length designated y_n.\n z_list : numpy.array\n An array of z values, with length designated z_n.\n wSearchFunc : function\n An optional function that returns the reference location for w values:\n indices = wSearchFunc(w_list,w). Default is np.searchsorted\n xSearchFunc : function\n An optional function that returns the reference location for x values:\n indices = xSearchFunc(x_list,x). Default is np.searchsorted\n ySearchFunc : function\n An optional function that returns the reference location for y values:\n indices = ySearchFunc(y_list,y). Default is np.searchsorted\n zSearchFunc : function\n An optional function that returns the reference location for z values:\n indices = zSearchFunc(z_list,z). Default is np.searchsorted\n\n Returns\n -------\n new instance of QuadlinearInterp\n '
self.f_values = f_values
self.w_list = w_list
self.x_list = x_list
self.y_list = y_list
self.z_list = z_list
self.w_n = w_list.size
self.x_n = x_list.size
self.y_n = y_list.size
self.z_n = z_list.size
if (wSearchFunc is None):
wSearchFunc = np.searchsorted
if (xSearchFunc is None):
xSearchFunc = np.searchsorted
if (ySearchFunc is None):
ySearchFunc = np.searchsorted
if (zSearchFunc is None):
zSearchFunc = np.searchsorted
self.wSearchFunc = wSearchFunc
self.xSearchFunc = xSearchFunc
self.ySearchFunc = ySearchFunc
self.zSearchFunc = zSearchFunc
|
def _evaluate(self, w, x, y, z):
'\n Returns the level of the interpolated function at each value in x,y,z.\n Only called internally by HARKinterpolator4D.__call__ (etc).\n '
if _isscalar(w):
w_pos = max(min(self.wSearchFunc(self.w_list, w), (self.w_n - 1)), 1)
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
w_pos = self.wSearchFunc(self.w_list, w)
w_pos[(w_pos < 1)] = 1
w_pos[(w_pos > (self.w_n - 1))] = (self.w_n - 1)
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
i = w_pos
j = x_pos
k = y_pos
l = z_pos
alpha = ((w - self.w_list[(i - 1)]) / (self.w_list[i] - self.w_list[(i - 1)]))
beta = ((x - self.x_list[(j - 1)]) / (self.x_list[j] - self.x_list[(j - 1)]))
gamma = ((y - self.y_list[(k - 1)]) / (self.y_list[k] - self.y_list[(k - 1)]))
delta = ((z - self.z_list[(l - 1)]) / (self.z_list[l] - self.z_list[(l - 1)]))
f = (((1 - alpha) * (((1 - beta) * ((((((1 - gamma) * (1 - delta)) * self.f_values[((i - 1), (j - 1), (k - 1), (l - 1))]) + (((1 - gamma) * delta) * self.f_values[((i - 1), (j - 1), (k - 1), l)])) + ((gamma * (1 - delta)) * self.f_values[((i - 1), (j - 1), k, (l - 1))])) + ((gamma * delta) * self.f_values[((i - 1), (j - 1), k, l)]))) + (beta * ((((((1 - gamma) * (1 - delta)) * self.f_values[((i - 1), j, (k - 1), (l - 1))]) + (((1 - gamma) * delta) * self.f_values[((i - 1), j, (k - 1), l)])) + ((gamma * (1 - delta)) * self.f_values[((i - 1), j, k, (l - 1))])) + ((gamma * delta) * self.f_values[((i - 1), j, k, l)]))))) + (alpha * (((1 - beta) * ((((((1 - gamma) * (1 - delta)) * self.f_values[(i, (j - 1), (k - 1), (l - 1))]) + (((1 - gamma) * delta) * self.f_values[(i, (j - 1), (k - 1), l)])) + ((gamma * (1 - delta)) * self.f_values[(i, (j - 1), k, (l - 1))])) + ((gamma * delta) * self.f_values[(i, (j - 1), k, l)]))) + (beta * ((((((1 - gamma) * (1 - delta)) * self.f_values[(i, j, (k - 1), (l - 1))]) + (((1 - gamma) * delta) * self.f_values[(i, j, (k - 1), l)])) + ((gamma * (1 - delta)) * self.f_values[(i, j, k, (l - 1))])) + ((gamma * delta) * self.f_values[(i, j, k, l)]))))))
return f
| 3,892,936,717,146,668,500
|
Returns the level of the interpolated function at each value in x,y,z.
Only called internally by HARKinterpolator4D.__call__ (etc).
|
HARK/interpolation.py
|
_evaluate
|
cohenimhuji/HARK
|
python
|
def _evaluate(self, w, x, y, z):
'\n Returns the level of the interpolated function at each value in x,y,z.\n Only called internally by HARKinterpolator4D.__call__ (etc).\n '
if _isscalar(w):
w_pos = max(min(self.wSearchFunc(self.w_list, w), (self.w_n - 1)), 1)
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
w_pos = self.wSearchFunc(self.w_list, w)
w_pos[(w_pos < 1)] = 1
w_pos[(w_pos > (self.w_n - 1))] = (self.w_n - 1)
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
i = w_pos
j = x_pos
k = y_pos
l = z_pos
alpha = ((w - self.w_list[(i - 1)]) / (self.w_list[i] - self.w_list[(i - 1)]))
beta = ((x - self.x_list[(j - 1)]) / (self.x_list[j] - self.x_list[(j - 1)]))
gamma = ((y - self.y_list[(k - 1)]) / (self.y_list[k] - self.y_list[(k - 1)]))
delta = ((z - self.z_list[(l - 1)]) / (self.z_list[l] - self.z_list[(l - 1)]))
f = (((1 - alpha) * (((1 - beta) * ((((((1 - gamma) * (1 - delta)) * self.f_values[((i - 1), (j - 1), (k - 1), (l - 1))]) + (((1 - gamma) * delta) * self.f_values[((i - 1), (j - 1), (k - 1), l)])) + ((gamma * (1 - delta)) * self.f_values[((i - 1), (j - 1), k, (l - 1))])) + ((gamma * delta) * self.f_values[((i - 1), (j - 1), k, l)]))) + (beta * ((((((1 - gamma) * (1 - delta)) * self.f_values[((i - 1), j, (k - 1), (l - 1))]) + (((1 - gamma) * delta) * self.f_values[((i - 1), j, (k - 1), l)])) + ((gamma * (1 - delta)) * self.f_values[((i - 1), j, k, (l - 1))])) + ((gamma * delta) * self.f_values[((i - 1), j, k, l)]))))) + (alpha * (((1 - beta) * ((((((1 - gamma) * (1 - delta)) * self.f_values[(i, (j - 1), (k - 1), (l - 1))]) + (((1 - gamma) * delta) * self.f_values[(i, (j - 1), (k - 1), l)])) + ((gamma * (1 - delta)) * self.f_values[(i, (j - 1), k, (l - 1))])) + ((gamma * delta) * self.f_values[(i, (j - 1), k, l)]))) + (beta * ((((((1 - gamma) * (1 - delta)) * self.f_values[(i, j, (k - 1), (l - 1))]) + (((1 - gamma) * delta) * self.f_values[(i, j, (k - 1), l)])) + ((gamma * (1 - delta)) * self.f_values[(i, j, k, (l - 1))])) + ((gamma * delta) * self.f_values[(i, j, k, l)]))))))
return f
|
def _derW(self, w, x, y, z):
'\n Returns the derivative with respect to w of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeW.\n '
if _isscalar(w):
w_pos = max(min(self.wSearchFunc(self.w_list, w), (self.w_n - 1)), 1)
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
w_pos = self.wSearchFunc(self.w_list, w)
w_pos[(w_pos < 1)] = 1
w_pos[(w_pos > (self.w_n - 1))] = (self.w_n - 1)
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
i = w_pos
j = x_pos
k = y_pos
l = z_pos
beta = ((x - self.x_list[(j - 1)]) / (self.x_list[j] - self.x_list[(j - 1)]))
gamma = ((y - self.y_list[(k - 1)]) / (self.y_list[k] - self.y_list[(k - 1)]))
delta = ((z - self.z_list[(l - 1)]) / (self.z_list[l] - self.z_list[(l - 1)]))
dfdw = (((((((((((((1 - beta) * (1 - gamma)) * (1 - delta)) * self.f_values[(i, (j - 1), (k - 1), (l - 1))]) + ((((1 - beta) * (1 - gamma)) * delta) * self.f_values[(i, (j - 1), (k - 1), l)])) + ((((1 - beta) * gamma) * (1 - delta)) * self.f_values[(i, (j - 1), k, (l - 1))])) + ((((1 - beta) * gamma) * delta) * self.f_values[(i, (j - 1), k, l)])) + (((beta * (1 - gamma)) * (1 - delta)) * self.f_values[(i, j, (k - 1), (l - 1))])) + (((beta * (1 - gamma)) * delta) * self.f_values[(i, j, (k - 1), l)])) + (((beta * gamma) * (1 - delta)) * self.f_values[(i, j, k, (l - 1))])) + (((beta * gamma) * delta) * self.f_values[(i, j, k, l)])) - (((((((((((1 - beta) * (1 - gamma)) * (1 - delta)) * self.f_values[((i - 1), (j - 1), (k - 1), (l - 1))]) + ((((1 - beta) * (1 - gamma)) * delta) * self.f_values[((i - 1), (j - 1), (k - 1), l)])) + ((((1 - beta) * gamma) * (1 - delta)) * self.f_values[((i - 1), (j - 1), k, (l - 1))])) + ((((1 - beta) * gamma) * delta) * self.f_values[((i - 1), (j - 1), k, l)])) + (((beta * (1 - gamma)) * (1 - delta)) * self.f_values[((i - 1), j, (k - 1), (l - 1))])) + (((beta * (1 - gamma)) * delta) * self.f_values[((i - 1), j, (k - 1), l)])) + (((beta * gamma) * (1 - delta)) * self.f_values[((i - 1), j, k, (l - 1))])) + (((beta * gamma) * delta) * self.f_values[((i - 1), j, k, l)]))) / (self.w_list[i] - self.w_list[(i - 1)]))
return dfdw
| -9,105,297,813,256,432,000
|
Returns the derivative with respect to w of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeW.
|
HARK/interpolation.py
|
_derW
|
cohenimhuji/HARK
|
python
|
def _derW(self, w, x, y, z):
'\n Returns the derivative with respect to w of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeW.\n '
if _isscalar(w):
w_pos = max(min(self.wSearchFunc(self.w_list, w), (self.w_n - 1)), 1)
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
w_pos = self.wSearchFunc(self.w_list, w)
w_pos[(w_pos < 1)] = 1
w_pos[(w_pos > (self.w_n - 1))] = (self.w_n - 1)
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
i = w_pos
j = x_pos
k = y_pos
l = z_pos
beta = ((x - self.x_list[(j - 1)]) / (self.x_list[j] - self.x_list[(j - 1)]))
gamma = ((y - self.y_list[(k - 1)]) / (self.y_list[k] - self.y_list[(k - 1)]))
delta = ((z - self.z_list[(l - 1)]) / (self.z_list[l] - self.z_list[(l - 1)]))
dfdw = (((((((((((((1 - beta) * (1 - gamma)) * (1 - delta)) * self.f_values[(i, (j - 1), (k - 1), (l - 1))]) + ((((1 - beta) * (1 - gamma)) * delta) * self.f_values[(i, (j - 1), (k - 1), l)])) + ((((1 - beta) * gamma) * (1 - delta)) * self.f_values[(i, (j - 1), k, (l - 1))])) + ((((1 - beta) * gamma) * delta) * self.f_values[(i, (j - 1), k, l)])) + (((beta * (1 - gamma)) * (1 - delta)) * self.f_values[(i, j, (k - 1), (l - 1))])) + (((beta * (1 - gamma)) * delta) * self.f_values[(i, j, (k - 1), l)])) + (((beta * gamma) * (1 - delta)) * self.f_values[(i, j, k, (l - 1))])) + (((beta * gamma) * delta) * self.f_values[(i, j, k, l)])) - (((((((((((1 - beta) * (1 - gamma)) * (1 - delta)) * self.f_values[((i - 1), (j - 1), (k - 1), (l - 1))]) + ((((1 - beta) * (1 - gamma)) * delta) * self.f_values[((i - 1), (j - 1), (k - 1), l)])) + ((((1 - beta) * gamma) * (1 - delta)) * self.f_values[((i - 1), (j - 1), k, (l - 1))])) + ((((1 - beta) * gamma) * delta) * self.f_values[((i - 1), (j - 1), k, l)])) + (((beta * (1 - gamma)) * (1 - delta)) * self.f_values[((i - 1), j, (k - 1), (l - 1))])) + (((beta * (1 - gamma)) * delta) * self.f_values[((i - 1), j, (k - 1), l)])) + (((beta * gamma) * (1 - delta)) * self.f_values[((i - 1), j, k, (l - 1))])) + (((beta * gamma) * delta) * self.f_values[((i - 1), j, k, l)]))) / (self.w_list[i] - self.w_list[(i - 1)]))
return dfdw
|
def _derX(self, w, x, y, z):
'\n Returns the derivative with respect to x of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeX.\n '
if _isscalar(w):
w_pos = max(min(self.wSearchFunc(self.w_list, w), (self.w_n - 1)), 1)
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
w_pos = self.wSearchFunc(self.w_list, w)
w_pos[(w_pos < 1)] = 1
w_pos[(w_pos > (self.w_n - 1))] = (self.w_n - 1)
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
i = w_pos
j = x_pos
k = y_pos
l = z_pos
alpha = ((w - self.w_list[(i - 1)]) / (self.w_list[i] - self.w_list[(i - 1)]))
gamma = ((y - self.y_list[(k - 1)]) / (self.y_list[k] - self.y_list[(k - 1)]))
delta = ((z - self.z_list[(l - 1)]) / (self.z_list[l] - self.z_list[(l - 1)]))
dfdx = (((((((((((((1 - alpha) * (1 - gamma)) * (1 - delta)) * self.f_values[((i - 1), j, (k - 1), (l - 1))]) + ((((1 - alpha) * (1 - gamma)) * delta) * self.f_values[((i - 1), j, (k - 1), l)])) + ((((1 - alpha) * gamma) * (1 - delta)) * self.f_values[((i - 1), j, k, (l - 1))])) + ((((1 - alpha) * gamma) * delta) * self.f_values[((i - 1), j, k, l)])) + (((alpha * (1 - gamma)) * (1 - delta)) * self.f_values[(i, j, (k - 1), (l - 1))])) + (((alpha * (1 - gamma)) * delta) * self.f_values[(i, j, (k - 1), l)])) + (((alpha * gamma) * (1 - delta)) * self.f_values[(i, j, k, (l - 1))])) + (((alpha * gamma) * delta) * self.f_values[(i, j, k, l)])) - (((((((((((1 - alpha) * (1 - gamma)) * (1 - delta)) * self.f_values[((i - 1), (j - 1), (k - 1), (l - 1))]) + ((((1 - alpha) * (1 - gamma)) * delta) * self.f_values[((i - 1), (j - 1), (k - 1), l)])) + ((((1 - alpha) * gamma) * (1 - delta)) * self.f_values[((i - 1), (j - 1), k, (l - 1))])) + ((((1 - alpha) * gamma) * delta) * self.f_values[((i - 1), (j - 1), k, l)])) + (((alpha * (1 - gamma)) * (1 - delta)) * self.f_values[(i, (j - 1), (k - 1), (l - 1))])) + (((alpha * (1 - gamma)) * delta) * self.f_values[(i, (j - 1), (k - 1), l)])) + (((alpha * gamma) * (1 - delta)) * self.f_values[(i, (j - 1), k, (l - 1))])) + (((alpha * gamma) * delta) * self.f_values[(i, (j - 1), k, l)]))) / (self.x_list[j] - self.x_list[(j - 1)]))
return dfdx
| -5,354,617,837,738,358,000
|
Returns the derivative with respect to x of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeX.
|
HARK/interpolation.py
|
_derX
|
cohenimhuji/HARK
|
python
|
def _derX(self, w, x, y, z):
'\n Returns the derivative with respect to x of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeX.\n '
if _isscalar(w):
w_pos = max(min(self.wSearchFunc(self.w_list, w), (self.w_n - 1)), 1)
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
w_pos = self.wSearchFunc(self.w_list, w)
w_pos[(w_pos < 1)] = 1
w_pos[(w_pos > (self.w_n - 1))] = (self.w_n - 1)
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
i = w_pos
j = x_pos
k = y_pos
l = z_pos
alpha = ((w - self.w_list[(i - 1)]) / (self.w_list[i] - self.w_list[(i - 1)]))
gamma = ((y - self.y_list[(k - 1)]) / (self.y_list[k] - self.y_list[(k - 1)]))
delta = ((z - self.z_list[(l - 1)]) / (self.z_list[l] - self.z_list[(l - 1)]))
dfdx = (((((((((((((1 - alpha) * (1 - gamma)) * (1 - delta)) * self.f_values[((i - 1), j, (k - 1), (l - 1))]) + ((((1 - alpha) * (1 - gamma)) * delta) * self.f_values[((i - 1), j, (k - 1), l)])) + ((((1 - alpha) * gamma) * (1 - delta)) * self.f_values[((i - 1), j, k, (l - 1))])) + ((((1 - alpha) * gamma) * delta) * self.f_values[((i - 1), j, k, l)])) + (((alpha * (1 - gamma)) * (1 - delta)) * self.f_values[(i, j, (k - 1), (l - 1))])) + (((alpha * (1 - gamma)) * delta) * self.f_values[(i, j, (k - 1), l)])) + (((alpha * gamma) * (1 - delta)) * self.f_values[(i, j, k, (l - 1))])) + (((alpha * gamma) * delta) * self.f_values[(i, j, k, l)])) - (((((((((((1 - alpha) * (1 - gamma)) * (1 - delta)) * self.f_values[((i - 1), (j - 1), (k - 1), (l - 1))]) + ((((1 - alpha) * (1 - gamma)) * delta) * self.f_values[((i - 1), (j - 1), (k - 1), l)])) + ((((1 - alpha) * gamma) * (1 - delta)) * self.f_values[((i - 1), (j - 1), k, (l - 1))])) + ((((1 - alpha) * gamma) * delta) * self.f_values[((i - 1), (j - 1), k, l)])) + (((alpha * (1 - gamma)) * (1 - delta)) * self.f_values[(i, (j - 1), (k - 1), (l - 1))])) + (((alpha * (1 - gamma)) * delta) * self.f_values[(i, (j - 1), (k - 1), l)])) + (((alpha * gamma) * (1 - delta)) * self.f_values[(i, (j - 1), k, (l - 1))])) + (((alpha * gamma) * delta) * self.f_values[(i, (j - 1), k, l)]))) / (self.x_list[j] - self.x_list[(j - 1)]))
return dfdx
|
def _derY(self, w, x, y, z):
'\n Returns the derivative with respect to y of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeY.\n '
if _isscalar(w):
w_pos = max(min(self.wSearchFunc(self.w_list, w), (self.w_n - 1)), 1)
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
w_pos = self.wSearchFunc(self.w_list, w)
w_pos[(w_pos < 1)] = 1
w_pos[(w_pos > (self.w_n - 1))] = (self.w_n - 1)
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
i = w_pos
j = x_pos
k = y_pos
l = z_pos
alpha = ((w - self.w_list[(i - 1)]) / (self.w_list[i] - self.w_list[(i - 1)]))
beta = ((x - self.x_list[(j - 1)]) / (self.x_list[j] - self.x_list[(j - 1)]))
delta = ((z - self.z_list[(l - 1)]) / (self.z_list[l] - self.z_list[(l - 1)]))
dfdy = (((((((((((((1 - alpha) * (1 - beta)) * (1 - delta)) * self.f_values[((i - 1), (j - 1), k, (l - 1))]) + ((((1 - alpha) * (1 - beta)) * delta) * self.f_values[((i - 1), (j - 1), k, l)])) + ((((1 - alpha) * beta) * (1 - delta)) * self.f_values[((i - 1), j, k, (l - 1))])) + ((((1 - alpha) * beta) * delta) * self.f_values[((i - 1), j, k, l)])) + (((alpha * (1 - beta)) * (1 - delta)) * self.f_values[(i, (j - 1), k, (l - 1))])) + (((alpha * (1 - beta)) * delta) * self.f_values[(i, (j - 1), k, l)])) + (((alpha * beta) * (1 - delta)) * self.f_values[(i, j, k, (l - 1))])) + (((alpha * beta) * delta) * self.f_values[(i, j, k, l)])) - (((((((((((1 - alpha) * (1 - beta)) * (1 - delta)) * self.f_values[((i - 1), (j - 1), (k - 1), (l - 1))]) + ((((1 - alpha) * (1 - beta)) * delta) * self.f_values[((i - 1), (j - 1), (k - 1), l)])) + ((((1 - alpha) * beta) * (1 - delta)) * self.f_values[((i - 1), j, (k - 1), (l - 1))])) + ((((1 - alpha) * beta) * delta) * self.f_values[((i - 1), j, (k - 1), l)])) + (((alpha * (1 - beta)) * (1 - delta)) * self.f_values[(i, (j - 1), (k - 1), (l - 1))])) + (((alpha * (1 - beta)) * delta) * self.f_values[(i, (j - 1), (k - 1), l)])) + (((alpha * beta) * (1 - delta)) * self.f_values[(i, j, (k - 1), (l - 1))])) + (((alpha * beta) * delta) * self.f_values[(i, j, (k - 1), l)]))) / (self.y_list[k] - self.y_list[(k - 1)]))
return dfdy
| 5,392,812,195,969,430,000
|
Returns the derivative with respect to y of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeY.
|
HARK/interpolation.py
|
_derY
|
cohenimhuji/HARK
|
python
|
def _derY(self, w, x, y, z):
'\n Returns the derivative with respect to y of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeY.\n '
if _isscalar(w):
w_pos = max(min(self.wSearchFunc(self.w_list, w), (self.w_n - 1)), 1)
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
w_pos = self.wSearchFunc(self.w_list, w)
w_pos[(w_pos < 1)] = 1
w_pos[(w_pos > (self.w_n - 1))] = (self.w_n - 1)
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
i = w_pos
j = x_pos
k = y_pos
l = z_pos
alpha = ((w - self.w_list[(i - 1)]) / (self.w_list[i] - self.w_list[(i - 1)]))
beta = ((x - self.x_list[(j - 1)]) / (self.x_list[j] - self.x_list[(j - 1)]))
delta = ((z - self.z_list[(l - 1)]) / (self.z_list[l] - self.z_list[(l - 1)]))
dfdy = (((((((((((((1 - alpha) * (1 - beta)) * (1 - delta)) * self.f_values[((i - 1), (j - 1), k, (l - 1))]) + ((((1 - alpha) * (1 - beta)) * delta) * self.f_values[((i - 1), (j - 1), k, l)])) + ((((1 - alpha) * beta) * (1 - delta)) * self.f_values[((i - 1), j, k, (l - 1))])) + ((((1 - alpha) * beta) * delta) * self.f_values[((i - 1), j, k, l)])) + (((alpha * (1 - beta)) * (1 - delta)) * self.f_values[(i, (j - 1), k, (l - 1))])) + (((alpha * (1 - beta)) * delta) * self.f_values[(i, (j - 1), k, l)])) + (((alpha * beta) * (1 - delta)) * self.f_values[(i, j, k, (l - 1))])) + (((alpha * beta) * delta) * self.f_values[(i, j, k, l)])) - (((((((((((1 - alpha) * (1 - beta)) * (1 - delta)) * self.f_values[((i - 1), (j - 1), (k - 1), (l - 1))]) + ((((1 - alpha) * (1 - beta)) * delta) * self.f_values[((i - 1), (j - 1), (k - 1), l)])) + ((((1 - alpha) * beta) * (1 - delta)) * self.f_values[((i - 1), j, (k - 1), (l - 1))])) + ((((1 - alpha) * beta) * delta) * self.f_values[((i - 1), j, (k - 1), l)])) + (((alpha * (1 - beta)) * (1 - delta)) * self.f_values[(i, (j - 1), (k - 1), (l - 1))])) + (((alpha * (1 - beta)) * delta) * self.f_values[(i, (j - 1), (k - 1), l)])) + (((alpha * beta) * (1 - delta)) * self.f_values[(i, j, (k - 1), (l - 1))])) + (((alpha * beta) * delta) * self.f_values[(i, j, (k - 1), l)]))) / (self.y_list[k] - self.y_list[(k - 1)]))
return dfdy
|
def _derZ(self, w, x, y, z):
'\n Returns the derivative with respect to z of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeZ.\n '
if _isscalar(w):
w_pos = max(min(self.wSearchFunc(self.w_list, w), (self.w_n - 1)), 1)
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
w_pos = self.wSearchFunc(self.w_list, w)
w_pos[(w_pos < 1)] = 1
w_pos[(w_pos > (self.w_n - 1))] = (self.w_n - 1)
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
i = w_pos
j = x_pos
k = y_pos
l = z_pos
alpha = ((w - self.w_list[(i - 1)]) / (self.w_list[i] - self.w_list[(i - 1)]))
beta = ((x - self.x_list[(j - 1)]) / (self.x_list[j] - self.x_list[(j - 1)]))
gamma = ((y - self.y_list[(k - 1)]) / (self.y_list[k] - self.y_list[(k - 1)]))
dfdz = (((((((((((((1 - alpha) * (1 - beta)) * (1 - gamma)) * self.f_values[((i - 1), (j - 1), (k - 1), l)]) + ((((1 - alpha) * (1 - beta)) * gamma) * self.f_values[((i - 1), (j - 1), k, l)])) + ((((1 - alpha) * beta) * (1 - gamma)) * self.f_values[((i - 1), j, (k - 1), l)])) + ((((1 - alpha) * beta) * gamma) * self.f_values[((i - 1), j, k, l)])) + (((alpha * (1 - beta)) * (1 - gamma)) * self.f_values[(i, (j - 1), (k - 1), l)])) + (((alpha * (1 - beta)) * gamma) * self.f_values[(i, (j - 1), k, l)])) + (((alpha * beta) * (1 - gamma)) * self.f_values[(i, j, (k - 1), l)])) + (((alpha * beta) * gamma) * self.f_values[(i, j, k, l)])) - (((((((((((1 - alpha) * (1 - beta)) * (1 - gamma)) * self.f_values[((i - 1), (j - 1), (k - 1), (l - 1))]) + ((((1 - alpha) * (1 - beta)) * gamma) * self.f_values[((i - 1), (j - 1), k, (l - 1))])) + ((((1 - alpha) * beta) * (1 - gamma)) * self.f_values[((i - 1), j, (k - 1), (l - 1))])) + ((((1 - alpha) * beta) * gamma) * self.f_values[((i - 1), j, k, (l - 1))])) + (((alpha * (1 - beta)) * (1 - gamma)) * self.f_values[(i, (j - 1), (k - 1), (l - 1))])) + (((alpha * (1 - beta)) * gamma) * self.f_values[(i, (j - 1), k, (l - 1))])) + (((alpha * beta) * (1 - gamma)) * self.f_values[(i, j, (k - 1), (l - 1))])) + (((alpha * beta) * gamma) * self.f_values[(i, j, k, (l - 1))]))) / (self.z_list[l] - self.z_list[(l - 1)]))
return dfdz
| 5,830,951,684,613,317,000
|
Returns the derivative with respect to z of the interpolated function
at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeZ.
|
HARK/interpolation.py
|
_derZ
|
cohenimhuji/HARK
|
python
|
def _derZ(self, w, x, y, z):
'\n Returns the derivative with respect to z of the interpolated function\n at each value in w,x,y,z. Only called internally by HARKinterpolator4D.derivativeZ.\n '
if _isscalar(w):
w_pos = max(min(self.wSearchFunc(self.w_list, w), (self.w_n - 1)), 1)
x_pos = max(min(self.xSearchFunc(self.x_list, x), (self.x_n - 1)), 1)
y_pos = max(min(self.ySearchFunc(self.y_list, y), (self.y_n - 1)), 1)
z_pos = max(min(self.zSearchFunc(self.z_list, z), (self.z_n - 1)), 1)
else:
w_pos = self.wSearchFunc(self.w_list, w)
w_pos[(w_pos < 1)] = 1
w_pos[(w_pos > (self.w_n - 1))] = (self.w_n - 1)
x_pos = self.xSearchFunc(self.x_list, x)
x_pos[(x_pos < 1)] = 1
x_pos[(x_pos > (self.x_n - 1))] = (self.x_n - 1)
y_pos = self.ySearchFunc(self.y_list, y)
y_pos[(y_pos < 1)] = 1
y_pos[(y_pos > (self.y_n - 1))] = (self.y_n - 1)
z_pos = self.zSearchFunc(self.z_list, z)
z_pos[(z_pos < 1)] = 1
z_pos[(z_pos > (self.z_n - 1))] = (self.z_n - 1)
i = w_pos
j = x_pos
k = y_pos
l = z_pos
alpha = ((w - self.w_list[(i - 1)]) / (self.w_list[i] - self.w_list[(i - 1)]))
beta = ((x - self.x_list[(j - 1)]) / (self.x_list[j] - self.x_list[(j - 1)]))
gamma = ((y - self.y_list[(k - 1)]) / (self.y_list[k] - self.y_list[(k - 1)]))
dfdz = (((((((((((((1 - alpha) * (1 - beta)) * (1 - gamma)) * self.f_values[((i - 1), (j - 1), (k - 1), l)]) + ((((1 - alpha) * (1 - beta)) * gamma) * self.f_values[((i - 1), (j - 1), k, l)])) + ((((1 - alpha) * beta) * (1 - gamma)) * self.f_values[((i - 1), j, (k - 1), l)])) + ((((1 - alpha) * beta) * gamma) * self.f_values[((i - 1), j, k, l)])) + (((alpha * (1 - beta)) * (1 - gamma)) * self.f_values[(i, (j - 1), (k - 1), l)])) + (((alpha * (1 - beta)) * gamma) * self.f_values[(i, (j - 1), k, l)])) + (((alpha * beta) * (1 - gamma)) * self.f_values[(i, j, (k - 1), l)])) + (((alpha * beta) * gamma) * self.f_values[(i, j, k, l)])) - (((((((((((1 - alpha) * (1 - beta)) * (1 - gamma)) * self.f_values[((i - 1), (j - 1), (k - 1), (l - 1))]) + ((((1 - alpha) * (1 - beta)) * gamma) * self.f_values[((i - 1), (j - 1), k, (l - 1))])) + ((((1 - alpha) * beta) * (1 - gamma)) * self.f_values[((i - 1), j, (k - 1), (l - 1))])) + ((((1 - alpha) * beta) * gamma) * self.f_values[((i - 1), j, k, (l - 1))])) + (((alpha * (1 - beta)) * (1 - gamma)) * self.f_values[(i, (j - 1), (k - 1), (l - 1))])) + (((alpha * (1 - beta)) * gamma) * self.f_values[(i, (j - 1), k, (l - 1))])) + (((alpha * beta) * (1 - gamma)) * self.f_values[(i, j, (k - 1), (l - 1))])) + (((alpha * beta) * gamma) * self.f_values[(i, j, k, (l - 1))]))) / (self.z_list[l] - self.z_list[(l - 1)]))
return dfdz
|
def __init__(self, *functions):
'\n Constructor to make a new lower envelope iterpolation.\n\n Parameters\n ----------\n *functions : function\n Any number of real functions; often instances of HARKinterpolator1D\n\n Returns\n -------\n new instance of LowerEnvelope\n '
self.functions = []
for function in functions:
self.functions.append(function)
self.funcCount = len(self.functions)
| 98,166,836,494,282,300
|
Constructor to make a new lower envelope iterpolation.
Parameters
----------
*functions : function
Any number of real functions; often instances of HARKinterpolator1D
Returns
-------
new instance of LowerEnvelope
|
HARK/interpolation.py
|
__init__
|
cohenimhuji/HARK
|
python
|
def __init__(self, *functions):
'\n Constructor to make a new lower envelope iterpolation.\n\n Parameters\n ----------\n *functions : function\n Any number of real functions; often instances of HARKinterpolator1D\n\n Returns\n -------\n new instance of LowerEnvelope\n '
self.functions = []
for function in functions:
self.functions.append(function)
self.funcCount = len(self.functions)
|
def _evaluate(self, x):
'\n Returns the level of the function at each value in x as the minimum among\n all of the functions. Only called internally by HARKinterpolator1D.__call__.\n '
if _isscalar(x):
y = np.nanmin([f(x) for f in self.functions])
else:
m = len(x)
fx = np.zeros((m, self.funcCount))
for j in range(self.funcCount):
fx[:, j] = self.functions[j](x)
y = np.nanmin(fx, axis=1)
return y
| 6,890,667,517,272,024,000
|
Returns the level of the function at each value in x as the minimum among
all of the functions. Only called internally by HARKinterpolator1D.__call__.
|
HARK/interpolation.py
|
_evaluate
|
cohenimhuji/HARK
|
python
|
def _evaluate(self, x):
'\n Returns the level of the function at each value in x as the minimum among\n all of the functions. Only called internally by HARKinterpolator1D.__call__.\n '
if _isscalar(x):
y = np.nanmin([f(x) for f in self.functions])
else:
m = len(x)
fx = np.zeros((m, self.funcCount))
for j in range(self.funcCount):
fx[:, j] = self.functions[j](x)
y = np.nanmin(fx, axis=1)
return y
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.