body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
aaf693093d79067f0e1d02d510dd9083564cfc736ba1b4344e4c5edac01ce3ad
@freeform_tags.setter def freeform_tags(self, freeform_tags): '\n Sets the freeform_tags of this CreateContainerScanTargetDetails.\n Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.\n Example: `{"bar-key": "value"}`\n\n\n :param freeform_tags: The freeform_tags of this CreateContainerScanTargetDetails.\n :type: dict(str, str)\n ' self._freeform_tags = freeform_tags
Sets the freeform_tags of this CreateContainerScanTargetDetails. Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}` :param freeform_tags: The freeform_tags of this CreateContainerScanTargetDetails. :type: dict(str, str)
src/oci/vulnerability_scanning/models/create_container_scan_target_details.py
freeform_tags
ezequielramos/oci-python-sdk
249
python
@freeform_tags.setter def freeform_tags(self, freeform_tags): '\n Sets the freeform_tags of this CreateContainerScanTargetDetails.\n Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.\n Example: `{"bar-key": "value"}`\n\n\n :param freeform_tags: The freeform_tags of this CreateContainerScanTargetDetails.\n :type: dict(str, str)\n ' self._freeform_tags = freeform_tags
@freeform_tags.setter def freeform_tags(self, freeform_tags): '\n Sets the freeform_tags of this CreateContainerScanTargetDetails.\n Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.\n Example: `{"bar-key": "value"}`\n\n\n :param freeform_tags: The freeform_tags of this CreateContainerScanTargetDetails.\n :type: dict(str, str)\n ' self._freeform_tags = freeform_tags<|docstring|>Sets the freeform_tags of this CreateContainerScanTargetDetails. Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}` :param freeform_tags: The freeform_tags of this CreateContainerScanTargetDetails. :type: dict(str, str)<|endoftext|>
691524ea69549ec7d1b171aea8967e0dbe2a9c48dbdaa47a4b1638f03c557100
@property def defined_tags(self): '\n Gets the defined_tags of this CreateContainerScanTargetDetails.\n Defined tags for this resource. Each key is predefined and scoped to a namespace.\n Example: `{"foo-namespace": {"bar-key": "value"}}`\n\n\n :return: The defined_tags of this CreateContainerScanTargetDetails.\n :rtype: dict(str, dict(str, object))\n ' return self._defined_tags
Gets the defined_tags of this CreateContainerScanTargetDetails. Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace": {"bar-key": "value"}}` :return: The defined_tags of this CreateContainerScanTargetDetails. :rtype: dict(str, dict(str, object))
src/oci/vulnerability_scanning/models/create_container_scan_target_details.py
defined_tags
ezequielramos/oci-python-sdk
249
python
@property def defined_tags(self): '\n Gets the defined_tags of this CreateContainerScanTargetDetails.\n Defined tags for this resource. Each key is predefined and scoped to a namespace.\n Example: `{"foo-namespace": {"bar-key": "value"}}`\n\n\n :return: The defined_tags of this CreateContainerScanTargetDetails.\n :rtype: dict(str, dict(str, object))\n ' return self._defined_tags
@property def defined_tags(self): '\n Gets the defined_tags of this CreateContainerScanTargetDetails.\n Defined tags for this resource. Each key is predefined and scoped to a namespace.\n Example: `{"foo-namespace": {"bar-key": "value"}}`\n\n\n :return: The defined_tags of this CreateContainerScanTargetDetails.\n :rtype: dict(str, dict(str, object))\n ' return self._defined_tags<|docstring|>Gets the defined_tags of this CreateContainerScanTargetDetails. Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace": {"bar-key": "value"}}` :return: The defined_tags of this CreateContainerScanTargetDetails. :rtype: dict(str, dict(str, object))<|endoftext|>
06ed297ec3f3423a20aeb07043e7e390ac325520cbda087fc0449536e8b95b65
@defined_tags.setter def defined_tags(self, defined_tags): '\n Sets the defined_tags of this CreateContainerScanTargetDetails.\n Defined tags for this resource. Each key is predefined and scoped to a namespace.\n Example: `{"foo-namespace": {"bar-key": "value"}}`\n\n\n :param defined_tags: The defined_tags of this CreateContainerScanTargetDetails.\n :type: dict(str, dict(str, object))\n ' self._defined_tags = defined_tags
Sets the defined_tags of this CreateContainerScanTargetDetails. Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace": {"bar-key": "value"}}` :param defined_tags: The defined_tags of this CreateContainerScanTargetDetails. :type: dict(str, dict(str, object))
src/oci/vulnerability_scanning/models/create_container_scan_target_details.py
defined_tags
ezequielramos/oci-python-sdk
249
python
@defined_tags.setter def defined_tags(self, defined_tags): '\n Sets the defined_tags of this CreateContainerScanTargetDetails.\n Defined tags for this resource. Each key is predefined and scoped to a namespace.\n Example: `{"foo-namespace": {"bar-key": "value"}}`\n\n\n :param defined_tags: The defined_tags of this CreateContainerScanTargetDetails.\n :type: dict(str, dict(str, object))\n ' self._defined_tags = defined_tags
@defined_tags.setter def defined_tags(self, defined_tags): '\n Sets the defined_tags of this CreateContainerScanTargetDetails.\n Defined tags for this resource. Each key is predefined and scoped to a namespace.\n Example: `{"foo-namespace": {"bar-key": "value"}}`\n\n\n :param defined_tags: The defined_tags of this CreateContainerScanTargetDetails.\n :type: dict(str, dict(str, object))\n ' self._defined_tags = defined_tags<|docstring|>Sets the defined_tags of this CreateContainerScanTargetDetails. Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace": {"bar-key": "value"}}` :param defined_tags: The defined_tags of this CreateContainerScanTargetDetails. :type: dict(str, dict(str, object))<|endoftext|>
2de6c79a9468aa6b39eb62b28dbbdc2fa9e1a4f4254210f9e83c69c7d7b6b2d8
def init(self): ' Init module ' log.info('Initializing module Artifacts') from .api.buckets import Buckets from .api.artifacts import Artifacts from .api.artifact import Artifact add_resource_to_api(self.context.api, Buckets, '/artifact/<int:project_id>') add_resource_to_api(self.context.api, Artifacts, '/artifact/<int:project_id>/<string:bucket>') add_resource_to_api(self.context.api, Artifact, '/artifact/<int:project_id>/<string:bucket>/<string:filename>') add_resource_to_api(self.context.api, Artifact, '/artifacts/<int:project_id>/<string:bucket>/<string:filename>', endpoint='artifact_old') add_resource_to_api(self.context.api, ArtifactsForSecurityResults, '/artifact/security/<int:run_id>') add_resource_to_api(self.context.api, ArtifactDownload, '/artifact/security/<int:run_id>/<string:filename>')
Init module
module.py
init
Aspect13/artifacts
0
python
def init(self): ' ' log.info('Initializing module Artifacts') from .api.buckets import Buckets from .api.artifacts import Artifacts from .api.artifact import Artifact add_resource_to_api(self.context.api, Buckets, '/artifact/<int:project_id>') add_resource_to_api(self.context.api, Artifacts, '/artifact/<int:project_id>/<string:bucket>') add_resource_to_api(self.context.api, Artifact, '/artifact/<int:project_id>/<string:bucket>/<string:filename>') add_resource_to_api(self.context.api, Artifact, '/artifacts/<int:project_id>/<string:bucket>/<string:filename>', endpoint='artifact_old') add_resource_to_api(self.context.api, ArtifactsForSecurityResults, '/artifact/security/<int:run_id>') add_resource_to_api(self.context.api, ArtifactDownload, '/artifact/security/<int:run_id>/<string:filename>')
def init(self): ' ' log.info('Initializing module Artifacts') from .api.buckets import Buckets from .api.artifacts import Artifacts from .api.artifact import Artifact add_resource_to_api(self.context.api, Buckets, '/artifact/<int:project_id>') add_resource_to_api(self.context.api, Artifacts, '/artifact/<int:project_id>/<string:bucket>') add_resource_to_api(self.context.api, Artifact, '/artifact/<int:project_id>/<string:bucket>/<string:filename>') add_resource_to_api(self.context.api, Artifact, '/artifacts/<int:project_id>/<string:bucket>/<string:filename>', endpoint='artifact_old') add_resource_to_api(self.context.api, ArtifactsForSecurityResults, '/artifact/security/<int:run_id>') add_resource_to_api(self.context.api, ArtifactDownload, '/artifact/security/<int:run_id>/<string:filename>')<|docstring|>Init module<|endoftext|>
f7b25c6acd8d405673ce88696d8c1d60093b847f3f12bdc90810aa7b1ec16d88
def deinit(self): ' De-init module ' log.info('De-initializing module Artifacts')
De-init module
module.py
deinit
Aspect13/artifacts
0
python
def deinit(self): ' ' log.info('De-initializing module Artifacts')
def deinit(self): ' ' log.info('De-initializing module Artifacts')<|docstring|>De-init module<|endoftext|>
5002e781d8605f230313c19f2454fd13e738ad869b37f363e68147b2ad883deb
@app.post('/report') def generate_report(user_input: UserInput): '\n Generate Readability Report.\n ' user_input = user_input.user_input (scores, stats) = compute_score(user_input) return {'scores': scores, 'stats': stats}
Generate Readability Report.
main.py
generate_report
abhijitpai000/text-grader
0
python
@app.post('/report') def generate_report(user_input: UserInput): '\n \n ' user_input = user_input.user_input (scores, stats) = compute_score(user_input) return {'scores': scores, 'stats': stats}
@app.post('/report') def generate_report(user_input: UserInput): '\n \n ' user_input = user_input.user_input (scores, stats) = compute_score(user_input) return {'scores': scores, 'stats': stats}<|docstring|>Generate Readability Report.<|endoftext|>
ff33836205d2113cab6f2bd072f786cc6e73a98c3c51c3d392f216f923b8bfe0
def close(self): 'Used to close and clean up any underlying resources.' raise NotImplementedError()
Used to close and clean up any underlying resources.
aerostatsd/client/base.py
close
Aerobotics/pystatsd
0
python
def close(self): raise NotImplementedError()
def close(self): raise NotImplementedError()<|docstring|>Used to close and clean up any underlying resources.<|endoftext|>
6e5512b241328fd69ef5bbce82b2783efb84a8dd019c1d06d2ede775d0d6d5d7
def bind(self, tags): "\n Sets the context of the client. This binds the tags\n to the client so they're present when metrics are\n written out. Useful in reducing repeating tags in every\n function call\n " self._context = tags
Sets the context of the client. This binds the tags to the client so they're present when metrics are written out. Useful in reducing repeating tags in every function call
aerostatsd/client/base.py
bind
Aerobotics/pystatsd
0
python
def bind(self, tags): "\n Sets the context of the client. This binds the tags\n to the client so they're present when metrics are\n written out. Useful in reducing repeating tags in every\n function call\n " self._context = tags
def bind(self, tags): "\n Sets the context of the client. This binds the tags\n to the client so they're present when metrics are\n written out. Useful in reducing repeating tags in every\n function call\n " self._context = tags<|docstring|>Sets the context of the client. This binds the tags to the client so they're present when metrics are written out. Useful in reducing repeating tags in every function call<|endoftext|>
e3382f793b1a08f4f49c7ffb19a173df0b98dfd01f94c239b313dfeb6843bcfd
def _build_stat_name(self, name, tags): '\n Builds a stat name in the format Graphite expects. Tags are appended\n in the series name\n\n ```\n my.series.name;tag1=value1;tag2=value2\n ```\n \n If tags have been bound to the client through a call to `bind`, they\n will be included in the stat name\n ' if ((tags is None) and (self._context is None)): return name if ((tags is None) and (self._context is not None)): tags = self._context elif ((tags is not None) and (self._context is None)): tags = tags else: tags.update(self._context) tags = ';'.join((f'{k}={v}' for (k, v) in tags.items())) stat_name = f'{name};{tags}' return stat_name
Builds a stat name in the format Graphite expects. Tags are appended in the series name ``` my.series.name;tag1=value1;tag2=value2 ``` If tags have been bound to the client through a call to `bind`, they will be included in the stat name
aerostatsd/client/base.py
_build_stat_name
Aerobotics/pystatsd
0
python
def _build_stat_name(self, name, tags): '\n Builds a stat name in the format Graphite expects. Tags are appended\n in the series name\n\n ```\n my.series.name;tag1=value1;tag2=value2\n ```\n \n If tags have been bound to the client through a call to `bind`, they\n will be included in the stat name\n ' if ((tags is None) and (self._context is None)): return name if ((tags is None) and (self._context is not None)): tags = self._context elif ((tags is not None) and (self._context is None)): tags = tags else: tags.update(self._context) tags = ';'.join((f'{k}={v}' for (k, v) in tags.items())) stat_name = f'{name};{tags}' return stat_name
def _build_stat_name(self, name, tags): '\n Builds a stat name in the format Graphite expects. Tags are appended\n in the series name\n\n ```\n my.series.name;tag1=value1;tag2=value2\n ```\n \n If tags have been bound to the client through a call to `bind`, they\n will be included in the stat name\n ' if ((tags is None) and (self._context is None)): return name if ((tags is None) and (self._context is not None)): tags = self._context elif ((tags is not None) and (self._context is None)): tags = tags else: tags.update(self._context) tags = ';'.join((f'{k}={v}' for (k, v) in tags.items())) stat_name = f'{name};{tags}' return stat_name<|docstring|>Builds a stat name in the format Graphite expects. Tags are appended in the series name ``` my.series.name;tag1=value1;tag2=value2 ``` If tags have been bound to the client through a call to `bind`, they will be included in the stat name<|endoftext|>
b09351bf03c9aed73fe097067805c613379f77dc3afe0ad19f2bb30b16784fe1
def timing(self, stat, delta, rate=1, tags=None): '\n Send new timing information.\n\n `delta` can be either a number of milliseconds or a timedelta.\n ' if isinstance(delta, timedelta): delta = (delta.total_seconds() * 1000.0) self._send_stat(stat, tags, ('%0.6f|ms' % delta), rate)
Send new timing information. `delta` can be either a number of milliseconds or a timedelta.
aerostatsd/client/base.py
timing
Aerobotics/pystatsd
0
python
def timing(self, stat, delta, rate=1, tags=None): '\n Send new timing information.\n\n `delta` can be either a number of milliseconds or a timedelta.\n ' if isinstance(delta, timedelta): delta = (delta.total_seconds() * 1000.0) self._send_stat(stat, tags, ('%0.6f|ms' % delta), rate)
def timing(self, stat, delta, rate=1, tags=None): '\n Send new timing information.\n\n `delta` can be either a number of milliseconds or a timedelta.\n ' if isinstance(delta, timedelta): delta = (delta.total_seconds() * 1000.0) self._send_stat(stat, tags, ('%0.6f|ms' % delta), rate)<|docstring|>Send new timing information. `delta` can be either a number of milliseconds or a timedelta.<|endoftext|>
ceee56ca825eb6dab463595230f47c6a6afbfabbc20adb97cc490aaca20ab89a
def incr(self, stat, count=1, rate=1, tags=None): 'Increment a stat by `count`.' self._send_stat(stat, tags, ('%s|c' % count), rate)
Increment a stat by `count`.
aerostatsd/client/base.py
incr
Aerobotics/pystatsd
0
python
def incr(self, stat, count=1, rate=1, tags=None): self._send_stat(stat, tags, ('%s|c' % count), rate)
def incr(self, stat, count=1, rate=1, tags=None): self._send_stat(stat, tags, ('%s|c' % count), rate)<|docstring|>Increment a stat by `count`.<|endoftext|>
4282c4ac4e67d529de2937429726a08cdf3959ce5c61e40208508297d99fa9bd
def decr(self, stat, count=1, rate=1, tags=None): 'Decrement a stat by `count`.' self.incr(stat, (- count), rate, tags)
Decrement a stat by `count`.
aerostatsd/client/base.py
decr
Aerobotics/pystatsd
0
python
def decr(self, stat, count=1, rate=1, tags=None): self.incr(stat, (- count), rate, tags)
def decr(self, stat, count=1, rate=1, tags=None): self.incr(stat, (- count), rate, tags)<|docstring|>Decrement a stat by `count`.<|endoftext|>
182bc0bf2c82699b18a3c94e09cbce96f9714d3baf70023c0dbcffc62b4d4c71
def gauge(self, stat, value, rate=1, delta=False, tags=None): 'Set a gauge value.' if ((value < 0) and (not delta)): if (rate < 1): if (random.random() > rate): return with self.pipeline() as pipe: pipe._send_stat(stat, tags, '0|g', 1) pipe._send_stat(stat, tags, ('%s|g' % value), 1) else: prefix = ('+' if (delta and (value >= 0)) else '') self._send_stat(stat, tags, ('%s%s|g' % (prefix, value)), rate)
Set a gauge value.
aerostatsd/client/base.py
gauge
Aerobotics/pystatsd
0
python
def gauge(self, stat, value, rate=1, delta=False, tags=None): if ((value < 0) and (not delta)): if (rate < 1): if (random.random() > rate): return with self.pipeline() as pipe: pipe._send_stat(stat, tags, '0|g', 1) pipe._send_stat(stat, tags, ('%s|g' % value), 1) else: prefix = ('+' if (delta and (value >= 0)) else ) self._send_stat(stat, tags, ('%s%s|g' % (prefix, value)), rate)
def gauge(self, stat, value, rate=1, delta=False, tags=None): if ((value < 0) and (not delta)): if (rate < 1): if (random.random() > rate): return with self.pipeline() as pipe: pipe._send_stat(stat, tags, '0|g', 1) pipe._send_stat(stat, tags, ('%s|g' % value), 1) else: prefix = ('+' if (delta and (value >= 0)) else ) self._send_stat(stat, tags, ('%s%s|g' % (prefix, value)), rate)<|docstring|>Set a gauge value.<|endoftext|>
28278be76e21264e0e443c9cbbf9a15c5a566c23fa9f5d15120392fbf1898e7d
def set(self, stat, value, rate=1, tags=None): 'Set a set value.' self._send_stat(stat, tags, ('%s|s' % value), rate)
Set a set value.
aerostatsd/client/base.py
set
Aerobotics/pystatsd
0
python
def set(self, stat, value, rate=1, tags=None): self._send_stat(stat, tags, ('%s|s' % value), rate)
def set(self, stat, value, rate=1, tags=None): self._send_stat(stat, tags, ('%s|s' % value), rate)<|docstring|>Set a set value.<|endoftext|>
3bc92f3a29222dc166ad2ba4865f6a63d97e687a7476dc03078fda198116d49e
def __init__(self, code, message=''): '\n @param code: The error code, must be an integer.\n @param message: Custom message, must be brief and complete the error code.\n ' self.code = code self.message = message
@param code: The error code, must be an integer. @param message: Custom message, must be brief and complete the error code.
centralreport/cr/errors.py
__init__
haiyangd/CentralReport
8
python
def __init__(self, code, message=): '\n @param code: The error code, must be an integer.\n @param message: Custom message, must be brief and complete the error code.\n ' self.code = code self.message = message
def __init__(self, code, message=): '\n @param code: The error code, must be an integer.\n @param message: Custom message, must be brief and complete the error code.\n ' self.code = code self.message = message<|docstring|>@param code: The error code, must be an integer. @param message: Custom message, must be brief and complete the error code.<|endoftext|>
0a9aeea980c68b5431c33d541321c2147a4ad25b618e555536c38de2aa045e7d
def mean_field_logits(logits, covariance_matrix=None, mean_field_factor=1.0): 'Adjust the model logits so its softmax approximates the posterior mean [1].\n\n [1]: Zhiyun Lu, Eugene Ie, Fei Sha. Uncertainty Estimation with Infinitesimal\n Jackknife. _arXiv preprint arXiv:2006.07584_, 2020.\n https://arxiv.org/abs/2006.07584\n\n Arguments:\n logits: A float tensor of shape (batch_size, num_classes).\n covariance_matrix: The covariance matrix of shape (batch_size, batch_size).\n If None then it assumes the covariance_matrix is an identity matrix.\n mean_field_factor: The scale factor for mean-field approximation, used to\n adjust the influence of posterior variance in posterior mean\n approximation. If covariance_matrix=None then it is used as the\n temperature parameter for temperature scaling.\n\n Returns:\n Tensor of adjusted logits, shape (batch_size, num_classes).\n ' if ((mean_field_factor is None) or (mean_field_factor < 0)): return logits if (covariance_matrix is None): variances = 1.0 else: variances = tf.linalg.diag_part(covariance_matrix) logits_scale = tf.sqrt((1.0 + (variances * mean_field_factor))) if (len(logits.shape) > 1): logits_scale = tf.expand_dims(logits_scale, axis=(- 1)) return (logits / logits_scale)
Adjust the model logits so its softmax approximates the posterior mean [1]. [1]: Zhiyun Lu, Eugene Ie, Fei Sha. Uncertainty Estimation with Infinitesimal Jackknife. _arXiv preprint arXiv:2006.07584_, 2020. https://arxiv.org/abs/2006.07584 Arguments: logits: A float tensor of shape (batch_size, num_classes). covariance_matrix: The covariance matrix of shape (batch_size, batch_size). If None then it assumes the covariance_matrix is an identity matrix. mean_field_factor: The scale factor for mean-field approximation, used to adjust the influence of posterior variance in posterior mean approximation. If covariance_matrix=None then it is used as the temperature parameter for temperature scaling. Returns: Tensor of adjusted logits, shape (batch_size, num_classes).
official/nlp/modeling/layers/gaussian_process.py
mean_field_logits
enmanuelbt92/models
1
python
def mean_field_logits(logits, covariance_matrix=None, mean_field_factor=1.0): 'Adjust the model logits so its softmax approximates the posterior mean [1].\n\n [1]: Zhiyun Lu, Eugene Ie, Fei Sha. Uncertainty Estimation with Infinitesimal\n Jackknife. _arXiv preprint arXiv:2006.07584_, 2020.\n https://arxiv.org/abs/2006.07584\n\n Arguments:\n logits: A float tensor of shape (batch_size, num_classes).\n covariance_matrix: The covariance matrix of shape (batch_size, batch_size).\n If None then it assumes the covariance_matrix is an identity matrix.\n mean_field_factor: The scale factor for mean-field approximation, used to\n adjust the influence of posterior variance in posterior mean\n approximation. If covariance_matrix=None then it is used as the\n temperature parameter for temperature scaling.\n\n Returns:\n Tensor of adjusted logits, shape (batch_size, num_classes).\n ' if ((mean_field_factor is None) or (mean_field_factor < 0)): return logits if (covariance_matrix is None): variances = 1.0 else: variances = tf.linalg.diag_part(covariance_matrix) logits_scale = tf.sqrt((1.0 + (variances * mean_field_factor))) if (len(logits.shape) > 1): logits_scale = tf.expand_dims(logits_scale, axis=(- 1)) return (logits / logits_scale)
def mean_field_logits(logits, covariance_matrix=None, mean_field_factor=1.0): 'Adjust the model logits so its softmax approximates the posterior mean [1].\n\n [1]: Zhiyun Lu, Eugene Ie, Fei Sha. Uncertainty Estimation with Infinitesimal\n Jackknife. _arXiv preprint arXiv:2006.07584_, 2020.\n https://arxiv.org/abs/2006.07584\n\n Arguments:\n logits: A float tensor of shape (batch_size, num_classes).\n covariance_matrix: The covariance matrix of shape (batch_size, batch_size).\n If None then it assumes the covariance_matrix is an identity matrix.\n mean_field_factor: The scale factor for mean-field approximation, used to\n adjust the influence of posterior variance in posterior mean\n approximation. If covariance_matrix=None then it is used as the\n temperature parameter for temperature scaling.\n\n Returns:\n Tensor of adjusted logits, shape (batch_size, num_classes).\n ' if ((mean_field_factor is None) or (mean_field_factor < 0)): return logits if (covariance_matrix is None): variances = 1.0 else: variances = tf.linalg.diag_part(covariance_matrix) logits_scale = tf.sqrt((1.0 + (variances * mean_field_factor))) if (len(logits.shape) > 1): logits_scale = tf.expand_dims(logits_scale, axis=(- 1)) return (logits / logits_scale)<|docstring|>Adjust the model logits so its softmax approximates the posterior mean [1]. [1]: Zhiyun Lu, Eugene Ie, Fei Sha. Uncertainty Estimation with Infinitesimal Jackknife. _arXiv preprint arXiv:2006.07584_, 2020. https://arxiv.org/abs/2006.07584 Arguments: logits: A float tensor of shape (batch_size, num_classes). covariance_matrix: The covariance matrix of shape (batch_size, batch_size). If None then it assumes the covariance_matrix is an identity matrix. mean_field_factor: The scale factor for mean-field approximation, used to adjust the influence of posterior variance in posterior mean approximation. If covariance_matrix=None then it is used as the temperature parameter for temperature scaling. Returns: Tensor of adjusted logits, shape (batch_size, num_classes).<|endoftext|>
9cf3f730f42fafb3c1fc6aeac5de4d267667485b903659d6f4dac2ec3bf46246
def __init__(self, units, num_inducing=1024, gp_kernel_type='gaussian', gp_kernel_scale=1.0, gp_output_bias=0.0, normalize_input=False, gp_kernel_scale_trainable=False, gp_output_bias_trainable=False, gp_cov_momentum=0.999, gp_cov_ridge_penalty=1.0, scale_random_features=True, use_custom_random_features=True, custom_random_features_initializer=None, custom_random_features_activation=None, l2_regularization=1e-06, gp_cov_likelihood='gaussian', return_gp_cov=True, return_random_features=False, dtype=None, name='random_feature_gaussian_process', **gp_output_kwargs): "Initializes a random-feature Gaussian process layer instance.\n\n Args:\n units: (int) Number of output units.\n num_inducing: (int) Number of random Fourier features used for\n approximating the Gaussian process.\n gp_kernel_type: (string) The type of kernel function to use for Gaussian\n process. Currently default to 'gaussian' which is the Gaussian RBF\n kernel.\n gp_kernel_scale: (float) The length-scale parameter of the a\n shift-invariant kernel function, i.e., for RBF kernel:\n exp(-|x1 - x2|**2 / gp_kernel_scale).\n gp_output_bias: (float) Scalar initial value for the bias vector.\n normalize_input: (bool) Whether to normalize the input to Gaussian\n process.\n gp_kernel_scale_trainable: (bool) Whether the length scale variable is\n trainable.\n gp_output_bias_trainable: (bool) Whether the bias is trainable.\n gp_cov_momentum: (float) A discount factor used to compute the moving\n average for posterior covariance matrix.\n gp_cov_ridge_penalty: (float) Initial Ridge penalty to posterior\n covariance matrix.\n scale_random_features: (bool) Whether to scale the random feature\n by sqrt(2. / num_inducing).\n use_custom_random_features: (bool) Whether to use custom random\n features implemented using tf.keras.layers.Dense.\n custom_random_features_initializer: (str or callable) Initializer for\n the random features. Default to random normal which approximates a RBF\n kernel function if activation function is cos.\n custom_random_features_activation: (callable) Activation function for the\n random feature layer. Default to cosine which approximates a RBF\n kernel function.\n l2_regularization: (float) The strength of l2 regularization on the output\n weights.\n gp_cov_likelihood: (string) Likelihood to use for computing Laplace\n approximation for covariance matrix. Default to `gaussian`.\n return_gp_cov: (bool) Whether to also return GP covariance matrix.\n If False then no covariance learning is performed.\n return_random_features: (bool) Whether to also return random features.\n dtype: (tf.DType) Input data type.\n name: (string) Layer name.\n **gp_output_kwargs: Additional keyword arguments to dense output layer.\n " super(RandomFeatureGaussianProcess, self).__init__(name=name, dtype=dtype) self.units = units self.num_inducing = num_inducing self.normalize_input = normalize_input self.gp_input_scale = (1.0 / tf.sqrt(gp_kernel_scale)) self.gp_feature_scale = tf.sqrt((2.0 / float(num_inducing))) self.scale_random_features = scale_random_features self.return_random_features = return_random_features self.return_gp_cov = return_gp_cov self.gp_kernel_type = gp_kernel_type self.gp_kernel_scale = gp_kernel_scale self.gp_output_bias = gp_output_bias self.gp_kernel_scale_trainable = gp_kernel_scale_trainable self.gp_output_bias_trainable = gp_output_bias_trainable self.use_custom_random_features = use_custom_random_features self.custom_random_features_initializer = custom_random_features_initializer self.custom_random_features_activation = custom_random_features_activation self.l2_regularization = l2_regularization self.gp_output_kwargs = gp_output_kwargs self.gp_cov_momentum = gp_cov_momentum self.gp_cov_ridge_penalty = gp_cov_ridge_penalty self.gp_cov_likelihood = gp_cov_likelihood if self.use_custom_random_features: self.random_features_bias_initializer = tf.random_uniform_initializer(minval=0.0, maxval=(2.0 * math.pi)) if (self.custom_random_features_initializer is None): self.custom_random_features_initializer = tf.keras.initializers.RandomNormal(stddev=1.0) if (self.custom_random_features_activation is None): self.custom_random_features_activation = tf.math.cos
Initializes a random-feature Gaussian process layer instance. Args: units: (int) Number of output units. num_inducing: (int) Number of random Fourier features used for approximating the Gaussian process. gp_kernel_type: (string) The type of kernel function to use for Gaussian process. Currently default to 'gaussian' which is the Gaussian RBF kernel. gp_kernel_scale: (float) The length-scale parameter of the a shift-invariant kernel function, i.e., for RBF kernel: exp(-|x1 - x2|**2 / gp_kernel_scale). gp_output_bias: (float) Scalar initial value for the bias vector. normalize_input: (bool) Whether to normalize the input to Gaussian process. gp_kernel_scale_trainable: (bool) Whether the length scale variable is trainable. gp_output_bias_trainable: (bool) Whether the bias is trainable. gp_cov_momentum: (float) A discount factor used to compute the moving average for posterior covariance matrix. gp_cov_ridge_penalty: (float) Initial Ridge penalty to posterior covariance matrix. scale_random_features: (bool) Whether to scale the random feature by sqrt(2. / num_inducing). use_custom_random_features: (bool) Whether to use custom random features implemented using tf.keras.layers.Dense. custom_random_features_initializer: (str or callable) Initializer for the random features. Default to random normal which approximates a RBF kernel function if activation function is cos. custom_random_features_activation: (callable) Activation function for the random feature layer. Default to cosine which approximates a RBF kernel function. l2_regularization: (float) The strength of l2 regularization on the output weights. gp_cov_likelihood: (string) Likelihood to use for computing Laplace approximation for covariance matrix. Default to `gaussian`. return_gp_cov: (bool) Whether to also return GP covariance matrix. If False then no covariance learning is performed. return_random_features: (bool) Whether to also return random features. dtype: (tf.DType) Input data type. name: (string) Layer name. **gp_output_kwargs: Additional keyword arguments to dense output layer.
official/nlp/modeling/layers/gaussian_process.py
__init__
enmanuelbt92/models
1
python
def __init__(self, units, num_inducing=1024, gp_kernel_type='gaussian', gp_kernel_scale=1.0, gp_output_bias=0.0, normalize_input=False, gp_kernel_scale_trainable=False, gp_output_bias_trainable=False, gp_cov_momentum=0.999, gp_cov_ridge_penalty=1.0, scale_random_features=True, use_custom_random_features=True, custom_random_features_initializer=None, custom_random_features_activation=None, l2_regularization=1e-06, gp_cov_likelihood='gaussian', return_gp_cov=True, return_random_features=False, dtype=None, name='random_feature_gaussian_process', **gp_output_kwargs): "Initializes a random-feature Gaussian process layer instance.\n\n Args:\n units: (int) Number of output units.\n num_inducing: (int) Number of random Fourier features used for\n approximating the Gaussian process.\n gp_kernel_type: (string) The type of kernel function to use for Gaussian\n process. Currently default to 'gaussian' which is the Gaussian RBF\n kernel.\n gp_kernel_scale: (float) The length-scale parameter of the a\n shift-invariant kernel function, i.e., for RBF kernel:\n exp(-|x1 - x2|**2 / gp_kernel_scale).\n gp_output_bias: (float) Scalar initial value for the bias vector.\n normalize_input: (bool) Whether to normalize the input to Gaussian\n process.\n gp_kernel_scale_trainable: (bool) Whether the length scale variable is\n trainable.\n gp_output_bias_trainable: (bool) Whether the bias is trainable.\n gp_cov_momentum: (float) A discount factor used to compute the moving\n average for posterior covariance matrix.\n gp_cov_ridge_penalty: (float) Initial Ridge penalty to posterior\n covariance matrix.\n scale_random_features: (bool) Whether to scale the random feature\n by sqrt(2. / num_inducing).\n use_custom_random_features: (bool) Whether to use custom random\n features implemented using tf.keras.layers.Dense.\n custom_random_features_initializer: (str or callable) Initializer for\n the random features. Default to random normal which approximates a RBF\n kernel function if activation function is cos.\n custom_random_features_activation: (callable) Activation function for the\n random feature layer. Default to cosine which approximates a RBF\n kernel function.\n l2_regularization: (float) The strength of l2 regularization on the output\n weights.\n gp_cov_likelihood: (string) Likelihood to use for computing Laplace\n approximation for covariance matrix. Default to `gaussian`.\n return_gp_cov: (bool) Whether to also return GP covariance matrix.\n If False then no covariance learning is performed.\n return_random_features: (bool) Whether to also return random features.\n dtype: (tf.DType) Input data type.\n name: (string) Layer name.\n **gp_output_kwargs: Additional keyword arguments to dense output layer.\n " super(RandomFeatureGaussianProcess, self).__init__(name=name, dtype=dtype) self.units = units self.num_inducing = num_inducing self.normalize_input = normalize_input self.gp_input_scale = (1.0 / tf.sqrt(gp_kernel_scale)) self.gp_feature_scale = tf.sqrt((2.0 / float(num_inducing))) self.scale_random_features = scale_random_features self.return_random_features = return_random_features self.return_gp_cov = return_gp_cov self.gp_kernel_type = gp_kernel_type self.gp_kernel_scale = gp_kernel_scale self.gp_output_bias = gp_output_bias self.gp_kernel_scale_trainable = gp_kernel_scale_trainable self.gp_output_bias_trainable = gp_output_bias_trainable self.use_custom_random_features = use_custom_random_features self.custom_random_features_initializer = custom_random_features_initializer self.custom_random_features_activation = custom_random_features_activation self.l2_regularization = l2_regularization self.gp_output_kwargs = gp_output_kwargs self.gp_cov_momentum = gp_cov_momentum self.gp_cov_ridge_penalty = gp_cov_ridge_penalty self.gp_cov_likelihood = gp_cov_likelihood if self.use_custom_random_features: self.random_features_bias_initializer = tf.random_uniform_initializer(minval=0.0, maxval=(2.0 * math.pi)) if (self.custom_random_features_initializer is None): self.custom_random_features_initializer = tf.keras.initializers.RandomNormal(stddev=1.0) if (self.custom_random_features_activation is None): self.custom_random_features_activation = tf.math.cos
def __init__(self, units, num_inducing=1024, gp_kernel_type='gaussian', gp_kernel_scale=1.0, gp_output_bias=0.0, normalize_input=False, gp_kernel_scale_trainable=False, gp_output_bias_trainable=False, gp_cov_momentum=0.999, gp_cov_ridge_penalty=1.0, scale_random_features=True, use_custom_random_features=True, custom_random_features_initializer=None, custom_random_features_activation=None, l2_regularization=1e-06, gp_cov_likelihood='gaussian', return_gp_cov=True, return_random_features=False, dtype=None, name='random_feature_gaussian_process', **gp_output_kwargs): "Initializes a random-feature Gaussian process layer instance.\n\n Args:\n units: (int) Number of output units.\n num_inducing: (int) Number of random Fourier features used for\n approximating the Gaussian process.\n gp_kernel_type: (string) The type of kernel function to use for Gaussian\n process. Currently default to 'gaussian' which is the Gaussian RBF\n kernel.\n gp_kernel_scale: (float) The length-scale parameter of the a\n shift-invariant kernel function, i.e., for RBF kernel:\n exp(-|x1 - x2|**2 / gp_kernel_scale).\n gp_output_bias: (float) Scalar initial value for the bias vector.\n normalize_input: (bool) Whether to normalize the input to Gaussian\n process.\n gp_kernel_scale_trainable: (bool) Whether the length scale variable is\n trainable.\n gp_output_bias_trainable: (bool) Whether the bias is trainable.\n gp_cov_momentum: (float) A discount factor used to compute the moving\n average for posterior covariance matrix.\n gp_cov_ridge_penalty: (float) Initial Ridge penalty to posterior\n covariance matrix.\n scale_random_features: (bool) Whether to scale the random feature\n by sqrt(2. / num_inducing).\n use_custom_random_features: (bool) Whether to use custom random\n features implemented using tf.keras.layers.Dense.\n custom_random_features_initializer: (str or callable) Initializer for\n the random features. Default to random normal which approximates a RBF\n kernel function if activation function is cos.\n custom_random_features_activation: (callable) Activation function for the\n random feature layer. Default to cosine which approximates a RBF\n kernel function.\n l2_regularization: (float) The strength of l2 regularization on the output\n weights.\n gp_cov_likelihood: (string) Likelihood to use for computing Laplace\n approximation for covariance matrix. Default to `gaussian`.\n return_gp_cov: (bool) Whether to also return GP covariance matrix.\n If False then no covariance learning is performed.\n return_random_features: (bool) Whether to also return random features.\n dtype: (tf.DType) Input data type.\n name: (string) Layer name.\n **gp_output_kwargs: Additional keyword arguments to dense output layer.\n " super(RandomFeatureGaussianProcess, self).__init__(name=name, dtype=dtype) self.units = units self.num_inducing = num_inducing self.normalize_input = normalize_input self.gp_input_scale = (1.0 / tf.sqrt(gp_kernel_scale)) self.gp_feature_scale = tf.sqrt((2.0 / float(num_inducing))) self.scale_random_features = scale_random_features self.return_random_features = return_random_features self.return_gp_cov = return_gp_cov self.gp_kernel_type = gp_kernel_type self.gp_kernel_scale = gp_kernel_scale self.gp_output_bias = gp_output_bias self.gp_kernel_scale_trainable = gp_kernel_scale_trainable self.gp_output_bias_trainable = gp_output_bias_trainable self.use_custom_random_features = use_custom_random_features self.custom_random_features_initializer = custom_random_features_initializer self.custom_random_features_activation = custom_random_features_activation self.l2_regularization = l2_regularization self.gp_output_kwargs = gp_output_kwargs self.gp_cov_momentum = gp_cov_momentum self.gp_cov_ridge_penalty = gp_cov_ridge_penalty self.gp_cov_likelihood = gp_cov_likelihood if self.use_custom_random_features: self.random_features_bias_initializer = tf.random_uniform_initializer(minval=0.0, maxval=(2.0 * math.pi)) if (self.custom_random_features_initializer is None): self.custom_random_features_initializer = tf.keras.initializers.RandomNormal(stddev=1.0) if (self.custom_random_features_activation is None): self.custom_random_features_activation = tf.math.cos<|docstring|>Initializes a random-feature Gaussian process layer instance. Args: units: (int) Number of output units. num_inducing: (int) Number of random Fourier features used for approximating the Gaussian process. gp_kernel_type: (string) The type of kernel function to use for Gaussian process. Currently default to 'gaussian' which is the Gaussian RBF kernel. gp_kernel_scale: (float) The length-scale parameter of the a shift-invariant kernel function, i.e., for RBF kernel: exp(-|x1 - x2|**2 / gp_kernel_scale). gp_output_bias: (float) Scalar initial value for the bias vector. normalize_input: (bool) Whether to normalize the input to Gaussian process. gp_kernel_scale_trainable: (bool) Whether the length scale variable is trainable. gp_output_bias_trainable: (bool) Whether the bias is trainable. gp_cov_momentum: (float) A discount factor used to compute the moving average for posterior covariance matrix. gp_cov_ridge_penalty: (float) Initial Ridge penalty to posterior covariance matrix. scale_random_features: (bool) Whether to scale the random feature by sqrt(2. / num_inducing). use_custom_random_features: (bool) Whether to use custom random features implemented using tf.keras.layers.Dense. custom_random_features_initializer: (str or callable) Initializer for the random features. Default to random normal which approximates a RBF kernel function if activation function is cos. custom_random_features_activation: (callable) Activation function for the random feature layer. Default to cosine which approximates a RBF kernel function. l2_regularization: (float) The strength of l2 regularization on the output weights. gp_cov_likelihood: (string) Likelihood to use for computing Laplace approximation for covariance matrix. Default to `gaussian`. return_gp_cov: (bool) Whether to also return GP covariance matrix. If False then no covariance learning is performed. return_random_features: (bool) Whether to also return random features. dtype: (tf.DType) Input data type. name: (string) Layer name. **gp_output_kwargs: Additional keyword arguments to dense output layer.<|endoftext|>
0e79ebeec0d3e5d0a5dd4cf84faa7425f1cdd285b822921254d5e63d8fa793ef
def _make_random_feature_layer(self, name): 'Defines random feature layer depending on kernel type.' if (not self.use_custom_random_features): return tf.keras.layers.experimental.RandomFourierFeatures(output_dim=self.num_inducing, kernel_initializer=self.gp_kernel_type, scale=self.gp_kernel_scale, trainable=self.gp_kernel_scale_trainable, dtype=self.dtype, name=name) if (self.gp_kernel_type.lower() == 'linear'): custom_random_feature_layer = tf.keras.layers.Lambda((lambda x: x), name=name) else: custom_random_feature_layer = tf.keras.layers.Dense(units=self.num_inducing, use_bias=True, activation=self.custom_random_features_activation, kernel_initializer=self.custom_random_features_initializer, bias_initializer=self.random_features_bias_initializer, trainable=False, name=name) return custom_random_feature_layer
Defines random feature layer depending on kernel type.
official/nlp/modeling/layers/gaussian_process.py
_make_random_feature_layer
enmanuelbt92/models
1
python
def _make_random_feature_layer(self, name): if (not self.use_custom_random_features): return tf.keras.layers.experimental.RandomFourierFeatures(output_dim=self.num_inducing, kernel_initializer=self.gp_kernel_type, scale=self.gp_kernel_scale, trainable=self.gp_kernel_scale_trainable, dtype=self.dtype, name=name) if (self.gp_kernel_type.lower() == 'linear'): custom_random_feature_layer = tf.keras.layers.Lambda((lambda x: x), name=name) else: custom_random_feature_layer = tf.keras.layers.Dense(units=self.num_inducing, use_bias=True, activation=self.custom_random_features_activation, kernel_initializer=self.custom_random_features_initializer, bias_initializer=self.random_features_bias_initializer, trainable=False, name=name) return custom_random_feature_layer
def _make_random_feature_layer(self, name): if (not self.use_custom_random_features): return tf.keras.layers.experimental.RandomFourierFeatures(output_dim=self.num_inducing, kernel_initializer=self.gp_kernel_type, scale=self.gp_kernel_scale, trainable=self.gp_kernel_scale_trainable, dtype=self.dtype, name=name) if (self.gp_kernel_type.lower() == 'linear'): custom_random_feature_layer = tf.keras.layers.Lambda((lambda x: x), name=name) else: custom_random_feature_layer = tf.keras.layers.Dense(units=self.num_inducing, use_bias=True, activation=self.custom_random_features_activation, kernel_initializer=self.custom_random_features_initializer, bias_initializer=self.random_features_bias_initializer, trainable=False, name=name) return custom_random_feature_layer<|docstring|>Defines random feature layer depending on kernel type.<|endoftext|>
f716428a1faa9353e6a858bd1ae01c2401e101c06fc451fbe698c8b247118e1b
def reset_covariance_matrix(self): "Resets covariance matrix of the GP layer.\n\n This function is useful for reseting the model's covariance matrix at the\n beginning of a new epoch.\n " self._gp_cov_layer.reset_precision_matrix()
Resets covariance matrix of the GP layer. This function is useful for reseting the model's covariance matrix at the beginning of a new epoch.
official/nlp/modeling/layers/gaussian_process.py
reset_covariance_matrix
enmanuelbt92/models
1
python
def reset_covariance_matrix(self): "Resets covariance matrix of the GP layer.\n\n This function is useful for reseting the model's covariance matrix at the\n beginning of a new epoch.\n " self._gp_cov_layer.reset_precision_matrix()
def reset_covariance_matrix(self): "Resets covariance matrix of the GP layer.\n\n This function is useful for reseting the model's covariance matrix at the\n beginning of a new epoch.\n " self._gp_cov_layer.reset_precision_matrix()<|docstring|>Resets covariance matrix of the GP layer. This function is useful for reseting the model's covariance matrix at the beginning of a new epoch.<|endoftext|>
2c314cb412929c9668637dd9171f30d8d91d979a8d4d131076da1b8d0b579572
def make_precision_matrix_update_op(self, gp_feature, logits, precision_matrix): 'Defines update op for the precision matrix of feature weights.' if (self.likelihood != 'gaussian'): if (logits is None): raise ValueError(f'"logits" cannot be None when likelihood={self.likelihood}') if (logits.shape[(- 1)] != 1): raise ValueError(f'likelihood={self.likelihood} only support univariate logits.Got logits dimension: {logits.shape[(- 1)]}') batch_size = tf.shape(gp_feature)[0] batch_size = tf.cast(batch_size, dtype=gp_feature.dtype) if (self.likelihood == 'binary_logistic'): prob = tf.sigmoid(logits) prob_multiplier = (prob * (1.0 - prob)) elif (self.likelihood == 'poisson'): prob_multiplier = tf.exp(logits) else: prob_multiplier = 1.0 gp_feature_adjusted = (tf.sqrt(prob_multiplier) * gp_feature) precision_matrix_minibatch = tf.matmul(gp_feature_adjusted, gp_feature_adjusted, transpose_a=True) if (self.momentum > 0): precision_matrix_minibatch = (precision_matrix_minibatch / batch_size) precision_matrix_new = ((self.momentum * precision_matrix) + ((1.0 - self.momentum) * precision_matrix_minibatch)) else: precision_matrix_new = (precision_matrix + precision_matrix_minibatch) return precision_matrix.assign(precision_matrix_new)
Defines update op for the precision matrix of feature weights.
official/nlp/modeling/layers/gaussian_process.py
make_precision_matrix_update_op
enmanuelbt92/models
1
python
def make_precision_matrix_update_op(self, gp_feature, logits, precision_matrix): if (self.likelihood != 'gaussian'): if (logits is None): raise ValueError(f'"logits" cannot be None when likelihood={self.likelihood}') if (logits.shape[(- 1)] != 1): raise ValueError(f'likelihood={self.likelihood} only support univariate logits.Got logits dimension: {logits.shape[(- 1)]}') batch_size = tf.shape(gp_feature)[0] batch_size = tf.cast(batch_size, dtype=gp_feature.dtype) if (self.likelihood == 'binary_logistic'): prob = tf.sigmoid(logits) prob_multiplier = (prob * (1.0 - prob)) elif (self.likelihood == 'poisson'): prob_multiplier = tf.exp(logits) else: prob_multiplier = 1.0 gp_feature_adjusted = (tf.sqrt(prob_multiplier) * gp_feature) precision_matrix_minibatch = tf.matmul(gp_feature_adjusted, gp_feature_adjusted, transpose_a=True) if (self.momentum > 0): precision_matrix_minibatch = (precision_matrix_minibatch / batch_size) precision_matrix_new = ((self.momentum * precision_matrix) + ((1.0 - self.momentum) * precision_matrix_minibatch)) else: precision_matrix_new = (precision_matrix + precision_matrix_minibatch) return precision_matrix.assign(precision_matrix_new)
def make_precision_matrix_update_op(self, gp_feature, logits, precision_matrix): if (self.likelihood != 'gaussian'): if (logits is None): raise ValueError(f'"logits" cannot be None when likelihood={self.likelihood}') if (logits.shape[(- 1)] != 1): raise ValueError(f'likelihood={self.likelihood} only support univariate logits.Got logits dimension: {logits.shape[(- 1)]}') batch_size = tf.shape(gp_feature)[0] batch_size = tf.cast(batch_size, dtype=gp_feature.dtype) if (self.likelihood == 'binary_logistic'): prob = tf.sigmoid(logits) prob_multiplier = (prob * (1.0 - prob)) elif (self.likelihood == 'poisson'): prob_multiplier = tf.exp(logits) else: prob_multiplier = 1.0 gp_feature_adjusted = (tf.sqrt(prob_multiplier) * gp_feature) precision_matrix_minibatch = tf.matmul(gp_feature_adjusted, gp_feature_adjusted, transpose_a=True) if (self.momentum > 0): precision_matrix_minibatch = (precision_matrix_minibatch / batch_size) precision_matrix_new = ((self.momentum * precision_matrix) + ((1.0 - self.momentum) * precision_matrix_minibatch)) else: precision_matrix_new = (precision_matrix + precision_matrix_minibatch) return precision_matrix.assign(precision_matrix_new)<|docstring|>Defines update op for the precision matrix of feature weights.<|endoftext|>
7c630f1f2e329c8e1c3ef0520b4579e7a4b8f2a5f53bef9363895050d357004c
def reset_precision_matrix(self): "Resets precision matrix to its initial value.\n\n This function is useful for reseting the model's covariance matrix at the\n beginning of a new epoch.\n " precision_matrix_reset_op = self.precision_matrix.assign(self.initial_precision_matrix) self.add_update(precision_matrix_reset_op)
Resets precision matrix to its initial value. This function is useful for reseting the model's covariance matrix at the beginning of a new epoch.
official/nlp/modeling/layers/gaussian_process.py
reset_precision_matrix
enmanuelbt92/models
1
python
def reset_precision_matrix(self): "Resets precision matrix to its initial value.\n\n This function is useful for reseting the model's covariance matrix at the\n beginning of a new epoch.\n " precision_matrix_reset_op = self.precision_matrix.assign(self.initial_precision_matrix) self.add_update(precision_matrix_reset_op)
def reset_precision_matrix(self): "Resets precision matrix to its initial value.\n\n This function is useful for reseting the model's covariance matrix at the\n beginning of a new epoch.\n " precision_matrix_reset_op = self.precision_matrix.assign(self.initial_precision_matrix) self.add_update(precision_matrix_reset_op)<|docstring|>Resets precision matrix to its initial value. This function is useful for reseting the model's covariance matrix at the beginning of a new epoch.<|endoftext|>
24d7fb436a42ad07931a1ff9e11d0f1f884b10cbce28f5bb9c07fffc8d56ef29
def compute_predictive_covariance(self, gp_feature): 'Computes posterior predictive variance.\n\n Approximates the Gaussian process posterior using random features.\n Given training random feature Phi_tr (num_train, num_hidden) and testing\n random feature Phi_ts (batch_size, num_hidden). The predictive covariance\n matrix is computed as (assuming Gaussian likelihood):\n\n s * Phi_ts @ inv(t(Phi_tr) * Phi_tr + s * I) @ t(Phi_ts),\n\n where s is the ridge factor to be used for stablizing the inverse, and I is\n the identity matrix with shape (num_hidden, num_hidden).\n\n Args:\n gp_feature: (tf.Tensor) The random feature of testing data to be used for\n computing the covariance matrix. Shape (batch_size, gp_hidden_size).\n\n Returns:\n (tf.Tensor) Predictive covariance matrix, shape (batch_size, batch_size).\n ' feature_cov_matrix = tf.linalg.inv(self.precision_matrix) cov_feature_product = (tf.matmul(feature_cov_matrix, gp_feature, transpose_b=True) * self.ridge_penalty) gp_cov_matrix = tf.matmul(gp_feature, cov_feature_product) return gp_cov_matrix
Computes posterior predictive variance. Approximates the Gaussian process posterior using random features. Given training random feature Phi_tr (num_train, num_hidden) and testing random feature Phi_ts (batch_size, num_hidden). The predictive covariance matrix is computed as (assuming Gaussian likelihood): s * Phi_ts @ inv(t(Phi_tr) * Phi_tr + s * I) @ t(Phi_ts), where s is the ridge factor to be used for stablizing the inverse, and I is the identity matrix with shape (num_hidden, num_hidden). Args: gp_feature: (tf.Tensor) The random feature of testing data to be used for computing the covariance matrix. Shape (batch_size, gp_hidden_size). Returns: (tf.Tensor) Predictive covariance matrix, shape (batch_size, batch_size).
official/nlp/modeling/layers/gaussian_process.py
compute_predictive_covariance
enmanuelbt92/models
1
python
def compute_predictive_covariance(self, gp_feature): 'Computes posterior predictive variance.\n\n Approximates the Gaussian process posterior using random features.\n Given training random feature Phi_tr (num_train, num_hidden) and testing\n random feature Phi_ts (batch_size, num_hidden). The predictive covariance\n matrix is computed as (assuming Gaussian likelihood):\n\n s * Phi_ts @ inv(t(Phi_tr) * Phi_tr + s * I) @ t(Phi_ts),\n\n where s is the ridge factor to be used for stablizing the inverse, and I is\n the identity matrix with shape (num_hidden, num_hidden).\n\n Args:\n gp_feature: (tf.Tensor) The random feature of testing data to be used for\n computing the covariance matrix. Shape (batch_size, gp_hidden_size).\n\n Returns:\n (tf.Tensor) Predictive covariance matrix, shape (batch_size, batch_size).\n ' feature_cov_matrix = tf.linalg.inv(self.precision_matrix) cov_feature_product = (tf.matmul(feature_cov_matrix, gp_feature, transpose_b=True) * self.ridge_penalty) gp_cov_matrix = tf.matmul(gp_feature, cov_feature_product) return gp_cov_matrix
def compute_predictive_covariance(self, gp_feature): 'Computes posterior predictive variance.\n\n Approximates the Gaussian process posterior using random features.\n Given training random feature Phi_tr (num_train, num_hidden) and testing\n random feature Phi_ts (batch_size, num_hidden). The predictive covariance\n matrix is computed as (assuming Gaussian likelihood):\n\n s * Phi_ts @ inv(t(Phi_tr) * Phi_tr + s * I) @ t(Phi_ts),\n\n where s is the ridge factor to be used for stablizing the inverse, and I is\n the identity matrix with shape (num_hidden, num_hidden).\n\n Args:\n gp_feature: (tf.Tensor) The random feature of testing data to be used for\n computing the covariance matrix. Shape (batch_size, gp_hidden_size).\n\n Returns:\n (tf.Tensor) Predictive covariance matrix, shape (batch_size, batch_size).\n ' feature_cov_matrix = tf.linalg.inv(self.precision_matrix) cov_feature_product = (tf.matmul(feature_cov_matrix, gp_feature, transpose_b=True) * self.ridge_penalty) gp_cov_matrix = tf.matmul(gp_feature, cov_feature_product) return gp_cov_matrix<|docstring|>Computes posterior predictive variance. Approximates the Gaussian process posterior using random features. Given training random feature Phi_tr (num_train, num_hidden) and testing random feature Phi_ts (batch_size, num_hidden). The predictive covariance matrix is computed as (assuming Gaussian likelihood): s * Phi_ts @ inv(t(Phi_tr) * Phi_tr + s * I) @ t(Phi_ts), where s is the ridge factor to be used for stablizing the inverse, and I is the identity matrix with shape (num_hidden, num_hidden). Args: gp_feature: (tf.Tensor) The random feature of testing data to be used for computing the covariance matrix. Shape (batch_size, gp_hidden_size). Returns: (tf.Tensor) Predictive covariance matrix, shape (batch_size, batch_size).<|endoftext|>
13fa0e4524143f742660f3c730abc438eb473c3f4374d60c2b4284f33f01521d
def call(self, inputs, logits=None, training=None): "Minibatch updates the GP's posterior precision matrix estimate.\n\n Args:\n inputs: (tf.Tensor) GP random features, shape (batch_size,\n gp_hidden_size).\n logits: (tf.Tensor) Pre-activation output from the model. Needed\n for Laplace approximation under a non-Gaussian likelihood.\n training: (tf.bool) whether or not the layer is in training mode. If in\n training mode, the gp_weight covariance is updated using gp_feature.\n\n Returns:\n gp_stddev (tf.Tensor): GP posterior predictive variance,\n shape (batch_size, batch_size).\n " batch_size = tf.shape(inputs)[0] training = self._get_training_value(training) if training: precision_matrix_update_op = self.make_precision_matrix_update_op(gp_feature=inputs, logits=logits, precision_matrix=self.precision_matrix) self.add_update(precision_matrix_update_op) return tf.eye(batch_size, dtype=self.dtype) else: return self.compute_predictive_covariance(gp_feature=inputs)
Minibatch updates the GP's posterior precision matrix estimate. Args: inputs: (tf.Tensor) GP random features, shape (batch_size, gp_hidden_size). logits: (tf.Tensor) Pre-activation output from the model. Needed for Laplace approximation under a non-Gaussian likelihood. training: (tf.bool) whether or not the layer is in training mode. If in training mode, the gp_weight covariance is updated using gp_feature. Returns: gp_stddev (tf.Tensor): GP posterior predictive variance, shape (batch_size, batch_size).
official/nlp/modeling/layers/gaussian_process.py
call
enmanuelbt92/models
1
python
def call(self, inputs, logits=None, training=None): "Minibatch updates the GP's posterior precision matrix estimate.\n\n Args:\n inputs: (tf.Tensor) GP random features, shape (batch_size,\n gp_hidden_size).\n logits: (tf.Tensor) Pre-activation output from the model. Needed\n for Laplace approximation under a non-Gaussian likelihood.\n training: (tf.bool) whether or not the layer is in training mode. If in\n training mode, the gp_weight covariance is updated using gp_feature.\n\n Returns:\n gp_stddev (tf.Tensor): GP posterior predictive variance,\n shape (batch_size, batch_size).\n " batch_size = tf.shape(inputs)[0] training = self._get_training_value(training) if training: precision_matrix_update_op = self.make_precision_matrix_update_op(gp_feature=inputs, logits=logits, precision_matrix=self.precision_matrix) self.add_update(precision_matrix_update_op) return tf.eye(batch_size, dtype=self.dtype) else: return self.compute_predictive_covariance(gp_feature=inputs)
def call(self, inputs, logits=None, training=None): "Minibatch updates the GP's posterior precision matrix estimate.\n\n Args:\n inputs: (tf.Tensor) GP random features, shape (batch_size,\n gp_hidden_size).\n logits: (tf.Tensor) Pre-activation output from the model. Needed\n for Laplace approximation under a non-Gaussian likelihood.\n training: (tf.bool) whether or not the layer is in training mode. If in\n training mode, the gp_weight covariance is updated using gp_feature.\n\n Returns:\n gp_stddev (tf.Tensor): GP posterior predictive variance,\n shape (batch_size, batch_size).\n " batch_size = tf.shape(inputs)[0] training = self._get_training_value(training) if training: precision_matrix_update_op = self.make_precision_matrix_update_op(gp_feature=inputs, logits=logits, precision_matrix=self.precision_matrix) self.add_update(precision_matrix_update_op) return tf.eye(batch_size, dtype=self.dtype) else: return self.compute_predictive_covariance(gp_feature=inputs)<|docstring|>Minibatch updates the GP's posterior precision matrix estimate. Args: inputs: (tf.Tensor) GP random features, shape (batch_size, gp_hidden_size). logits: (tf.Tensor) Pre-activation output from the model. Needed for Laplace approximation under a non-Gaussian likelihood. training: (tf.bool) whether or not the layer is in training mode. If in training mode, the gp_weight covariance is updated using gp_feature. Returns: gp_stddev (tf.Tensor): GP posterior predictive variance, shape (batch_size, batch_size).<|endoftext|>
204aceaa657cc97a6f9db606bf2c0d365734b7a498e2d09f2c36d84b8e4b59ad
def door_state_to_string(door_status): 'Returns the normalized value that determine_door_state represents.' if (door_status == LockDoorStatus.OPEN): return 'dooropen' if (door_status == LockDoorStatus.CLOSED): return 'doorclosed' raise ValueError
Returns the normalized value that determine_door_state represents.
yalexs/lock.py
door_state_to_string
bdraco/yalexs
8
python
def door_state_to_string(door_status): if (door_status == LockDoorStatus.OPEN): return 'dooropen' if (door_status == LockDoorStatus.CLOSED): return 'doorclosed' raise ValueError
def door_state_to_string(door_status): if (door_status == LockDoorStatus.OPEN): return 'dooropen' if (door_status == LockDoorStatus.CLOSED): return 'doorclosed' raise ValueError<|docstring|>Returns the normalized value that determine_door_state represents.<|endoftext|>
bd1fd23c305b17fe4f4c4a366fb423337b4d63b90df27ac3950bd5a7bcf7fd74
@lock_status.setter def lock_status(self, var): 'Update the lock status (usually form the activity log).' if (var not in LockStatus): raise ValueError self._lock_status = var
Update the lock status (usually form the activity log).
yalexs/lock.py
lock_status
bdraco/yalexs
8
python
@lock_status.setter def lock_status(self, var): if (var not in LockStatus): raise ValueError self._lock_status = var
@lock_status.setter def lock_status(self, var): if (var not in LockStatus): raise ValueError self._lock_status = var<|docstring|>Update the lock status (usually form the activity log).<|endoftext|>
857703a17f9ff3e2d28d626db15d2b32d2e4d20b94be69c0d6bf6b19151551fa
@lock_status_datetime.setter def lock_status_datetime(self, var): 'Update the lock status datetime (usually form the activity log).' if (not isinstance(var, datetime.date)): raise ValueError self._lock_status_datetime = var
Update the lock status datetime (usually form the activity log).
yalexs/lock.py
lock_status_datetime
bdraco/yalexs
8
python
@lock_status_datetime.setter def lock_status_datetime(self, var): if (not isinstance(var, datetime.date)): raise ValueError self._lock_status_datetime = var
@lock_status_datetime.setter def lock_status_datetime(self, var): if (not isinstance(var, datetime.date)): raise ValueError self._lock_status_datetime = var<|docstring|>Update the lock status datetime (usually form the activity log).<|endoftext|>
2fab22a032f062b3df018bb6124c22aa86101b5b4e4ce4d28613dcaee02b6aa6
@door_state.setter def door_state(self, var): 'Update the door state (usually form the activity log).' if (var not in LockDoorStatus): raise ValueError self._door_state = var if (var != LockDoorStatus.UNKNOWN): self._doorsense = True
Update the door state (usually form the activity log).
yalexs/lock.py
door_state
bdraco/yalexs
8
python
@door_state.setter def door_state(self, var): if (var not in LockDoorStatus): raise ValueError self._door_state = var if (var != LockDoorStatus.UNKNOWN): self._doorsense = True
@door_state.setter def door_state(self, var): if (var not in LockDoorStatus): raise ValueError self._door_state = var if (var != LockDoorStatus.UNKNOWN): self._doorsense = True<|docstring|>Update the door state (usually form the activity log).<|endoftext|>
3b7a22f86feaf8bf05011a6839397d549b3f7cf9662382b5047d4a8bf0ef499c
@door_state_datetime.setter def door_state_datetime(self, var): 'Update the door state datetime (usually form the activity log).' if (not isinstance(var, datetime.date)): raise ValueError self._door_state_datetime = var
Update the door state datetime (usually form the activity log).
yalexs/lock.py
door_state_datetime
bdraco/yalexs
8
python
@door_state_datetime.setter def door_state_datetime(self, var): if (not isinstance(var, datetime.date)): raise ValueError self._door_state_datetime = var
@door_state_datetime.setter def door_state_datetime(self, var): if (not isinstance(var, datetime.date)): raise ValueError self._door_state_datetime = var<|docstring|>Update the door state datetime (usually form the activity log).<|endoftext|>
4e5baa37e00b201886d26894e9723ce1c15ba0029f8c9378cee712341b1a9105
def set_online(self, state): 'Called when the lock comes back online or goes offline.' if (not self._bridge): return self._bridge.set_online(state)
Called when the lock comes back online or goes offline.
yalexs/lock.py
set_online
bdraco/yalexs
8
python
def set_online(self, state): if (not self._bridge): return self._bridge.set_online(state)
def set_online(self, state): if (not self._bridge): return self._bridge.set_online(state)<|docstring|>Called when the lock comes back online or goes offline.<|endoftext|>
b3a9050b8108c142cef2b1c46acb87c0cc9edef1203ac55703bb10c6765ff7cf
def get_user(self, user_id): 'Lookup user data by id.' return self._data.get('users', {}).get(user_id)
Lookup user data by id.
yalexs/lock.py
get_user
bdraco/yalexs
8
python
def get_user(self, user_id): return self._data.get('users', {}).get(user_id)
def get_user(self, user_id): return self._data.get('users', {}).get(user_id)<|docstring|>Lookup user data by id.<|endoftext|>
7b017bf6e5c8bee6e97552a86f3331440dcfbfc4ff9eaf38550a0c22d4d04129
def register_dids(self, dids): '\n Register the provided dids to be cleaned up on teardown\n ' self.created_dids.extend(dids)
Register the provided dids to be cleaned up on teardown
lib/rucio/tests/temp_factories.py
register_dids
ChristophAmes/rucio
0
python
def register_dids(self, dids): '\n \n ' self.created_dids.extend(dids)
def register_dids(self, dids): '\n \n ' self.created_dids.extend(dids)<|docstring|>Register the provided dids to be cleaned up on teardown<|endoftext|>
879ace14c1c11d578ea5ec647a82d2693b9c2bcf66d3830d5f689d3ddd2c06c3
def waterfall(results, ax=None, size=(18.5, 10.5), y_limits=None, scale_y='log10', offset_y=None, start_indices=None, reference=None, colors=None, legends=None): "\n Plot waterfall plot.\n\n Parameters\n ----------\n\n results: pypesto.Result or list\n Optimization result obtained by 'optimize.py' or list of those\n\n ax: matplotlib.Axes, optional\n Axes object to use.\n\n size: tuple, optional\n Figure size (width, height) in inches. Is only applied when no ax\n object is specified\n\n y_limits: float or ndarray, optional\n maximum value to be plotted on the y-axis, or y-limits\n\n scale_y: str, optional\n May be logarithmic or linear ('log10' or 'lin')\n\n offset_y:\n offset for the y-axis, if it is supposed to be in log10-scale\n\n start_indices: list or int\n list of integers specifying the multistart to be plotted or\n int specifying up to which start index should be plotted\n\n reference: list, optional\n List of reference points for optimization results, containing et\n least a function value fval\n\n colors: list, or RGBA, optional\n list of colors, or single color\n color or list of colors for plotting. If not set, clustering is done\n and colors are assigned automatically\n\n legends: list or str\n Labels for line plots, one label per result object\n\n Returns\n -------\n\n ax: matplotlib.Axes\n The plot axes.\n " if isinstance(start_indices, int): start_indices = list(range(start_indices)) (results, colors, legends) = process_result_list(results, colors, legends) if ((offset_y is None) and (len(results) > 1) and (scale_y == 'log10')): offset_y = process_offset_for_list(results, scale_y) max_len_fvals = np.array([0]) for (j, result) in enumerate(results): (fvals, offset_y) = get_fvals(result, scale_y, offset_y, start_indices) max_len_fvals = np.max([max_len_fvals, len(fvals)]) ax = waterfall_lowlevel(fvals=fvals, scale_y=scale_y, offset_y=offset_y, ax=ax, size=size, colors=colors[j], legend_text=legends[j]) ref = create_references(references=reference) ax = handle_options(ax, max_len_fvals, ref, y_limits, offset_y) return ax
Plot waterfall plot. Parameters ---------- results: pypesto.Result or list Optimization result obtained by 'optimize.py' or list of those ax: matplotlib.Axes, optional Axes object to use. size: tuple, optional Figure size (width, height) in inches. Is only applied when no ax object is specified y_limits: float or ndarray, optional maximum value to be plotted on the y-axis, or y-limits scale_y: str, optional May be logarithmic or linear ('log10' or 'lin') offset_y: offset for the y-axis, if it is supposed to be in log10-scale start_indices: list or int list of integers specifying the multistart to be plotted or int specifying up to which start index should be plotted reference: list, optional List of reference points for optimization results, containing et least a function value fval colors: list, or RGBA, optional list of colors, or single color color or list of colors for plotting. If not set, clustering is done and colors are assigned automatically legends: list or str Labels for line plots, one label per result object Returns ------- ax: matplotlib.Axes The plot axes.
pypesto/visualize/waterfall.py
waterfall
beimbusch/pyPESTO
0
python
def waterfall(results, ax=None, size=(18.5, 10.5), y_limits=None, scale_y='log10', offset_y=None, start_indices=None, reference=None, colors=None, legends=None): "\n Plot waterfall plot.\n\n Parameters\n ----------\n\n results: pypesto.Result or list\n Optimization result obtained by 'optimize.py' or list of those\n\n ax: matplotlib.Axes, optional\n Axes object to use.\n\n size: tuple, optional\n Figure size (width, height) in inches. Is only applied when no ax\n object is specified\n\n y_limits: float or ndarray, optional\n maximum value to be plotted on the y-axis, or y-limits\n\n scale_y: str, optional\n May be logarithmic or linear ('log10' or 'lin')\n\n offset_y:\n offset for the y-axis, if it is supposed to be in log10-scale\n\n start_indices: list or int\n list of integers specifying the multistart to be plotted or\n int specifying up to which start index should be plotted\n\n reference: list, optional\n List of reference points for optimization results, containing et\n least a function value fval\n\n colors: list, or RGBA, optional\n list of colors, or single color\n color or list of colors for plotting. If not set, clustering is done\n and colors are assigned automatically\n\n legends: list or str\n Labels for line plots, one label per result object\n\n Returns\n -------\n\n ax: matplotlib.Axes\n The plot axes.\n " if isinstance(start_indices, int): start_indices = list(range(start_indices)) (results, colors, legends) = process_result_list(results, colors, legends) if ((offset_y is None) and (len(results) > 1) and (scale_y == 'log10')): offset_y = process_offset_for_list(results, scale_y) max_len_fvals = np.array([0]) for (j, result) in enumerate(results): (fvals, offset_y) = get_fvals(result, scale_y, offset_y, start_indices) max_len_fvals = np.max([max_len_fvals, len(fvals)]) ax = waterfall_lowlevel(fvals=fvals, scale_y=scale_y, offset_y=offset_y, ax=ax, size=size, colors=colors[j], legend_text=legends[j]) ref = create_references(references=reference) ax = handle_options(ax, max_len_fvals, ref, y_limits, offset_y) return ax
def waterfall(results, ax=None, size=(18.5, 10.5), y_limits=None, scale_y='log10', offset_y=None, start_indices=None, reference=None, colors=None, legends=None): "\n Plot waterfall plot.\n\n Parameters\n ----------\n\n results: pypesto.Result or list\n Optimization result obtained by 'optimize.py' or list of those\n\n ax: matplotlib.Axes, optional\n Axes object to use.\n\n size: tuple, optional\n Figure size (width, height) in inches. Is only applied when no ax\n object is specified\n\n y_limits: float or ndarray, optional\n maximum value to be plotted on the y-axis, or y-limits\n\n scale_y: str, optional\n May be logarithmic or linear ('log10' or 'lin')\n\n offset_y:\n offset for the y-axis, if it is supposed to be in log10-scale\n\n start_indices: list or int\n list of integers specifying the multistart to be plotted or\n int specifying up to which start index should be plotted\n\n reference: list, optional\n List of reference points for optimization results, containing et\n least a function value fval\n\n colors: list, or RGBA, optional\n list of colors, or single color\n color or list of colors for plotting. If not set, clustering is done\n and colors are assigned automatically\n\n legends: list or str\n Labels for line plots, one label per result object\n\n Returns\n -------\n\n ax: matplotlib.Axes\n The plot axes.\n " if isinstance(start_indices, int): start_indices = list(range(start_indices)) (results, colors, legends) = process_result_list(results, colors, legends) if ((offset_y is None) and (len(results) > 1) and (scale_y == 'log10')): offset_y = process_offset_for_list(results, scale_y) max_len_fvals = np.array([0]) for (j, result) in enumerate(results): (fvals, offset_y) = get_fvals(result, scale_y, offset_y, start_indices) max_len_fvals = np.max([max_len_fvals, len(fvals)]) ax = waterfall_lowlevel(fvals=fvals, scale_y=scale_y, offset_y=offset_y, ax=ax, size=size, colors=colors[j], legend_text=legends[j]) ref = create_references(references=reference) ax = handle_options(ax, max_len_fvals, ref, y_limits, offset_y) return ax<|docstring|>Plot waterfall plot. Parameters ---------- results: pypesto.Result or list Optimization result obtained by 'optimize.py' or list of those ax: matplotlib.Axes, optional Axes object to use. size: tuple, optional Figure size (width, height) in inches. Is only applied when no ax object is specified y_limits: float or ndarray, optional maximum value to be plotted on the y-axis, or y-limits scale_y: str, optional May be logarithmic or linear ('log10' or 'lin') offset_y: offset for the y-axis, if it is supposed to be in log10-scale start_indices: list or int list of integers specifying the multistart to be plotted or int specifying up to which start index should be plotted reference: list, optional List of reference points for optimization results, containing et least a function value fval colors: list, or RGBA, optional list of colors, or single color color or list of colors for plotting. If not set, clustering is done and colors are assigned automatically legends: list or str Labels for line plots, one label per result object Returns ------- ax: matplotlib.Axes The plot axes.<|endoftext|>
bcf17f160c4b5583eea055452425ce16248298b4e54e17c3fa97abd45dfc3d64
def waterfall_lowlevel(fvals, scale_y='log10', offset_y=0.0, ax=None, size=(18.5, 10.5), colors=None, legend_text=None): "\n Plot waterfall plot using list of function values.\n\n Parameters\n ----------\n\n fvals: numeric list or array\n Including values need to be plotted.\n\n scale_y: str, optional\n May be logarithmic or linear ('log10' or 'lin')\n\n offset_y:\n offset for the y-axis, if it is supposed to be in log10-scale\n\n ax: matplotlib.Axes, optional\n Axes object to use.\n\n size: tuple, optional\n see waterfall\n\n colors: list, or RGBA, optional\n list of colors, or single color\n color or list of colors for plotting. If not set, clustering is done\n and colors are assigned automatically\n\n legend_text: str\n Label for line plots\n\n Returns\n -------\n\n ax: matplotlib.Axes\n The plot axes.\n " if (ax is None): ax = plt.subplots()[1] fig = plt.gcf() fig.set_size_inches(*size) fvals = np.array(fvals) (_, fvals) = delete_nan_inf(fvals) n_fvals = len(fvals) start_ind = range(n_fvals) colors = assign_colors(fvals, colors=colors) indices = sorted(range(n_fvals), key=(lambda j: fvals[j])) ax.xaxis.set_major_locator(MaxNLocator(integer=True)) if (scale_y == 'log10'): ax.semilogy(start_ind, fvals, color=[0.7, 0.7, 0.7, 0.6]) else: ax.plot(start_ind, fvals, color=[0.7, 0.7, 0.7, 0.6]) for j in range(n_fvals): j_fval = indices[j] color = colors[j_fval] fval = fvals[j_fval] if (j == 0): tmp_legend = legend_text else: tmp_legend = None (y_min, y_max) = ax.get_ylim() if (scale_y == 'log10'): ax.semilogy(j, fval, color=color, marker='o', label=tmp_legend, alpha=1.0) if ((np.log10(y_max) - np.log10(y_min)) < 1.0): y_mean = (0.5 * (np.log10(y_min) + np.log10(y_max))) plt.ylim(((10.0 ** (y_mean - 0.5)), (10.0 ** (y_mean + 0.5)))) else: ax.plot(j, fval, color=color, marker='o', label=tmp_legend, alpha=1.0) if ((y_max - y_min) < 1.0): y_mean = (0.5 * (y_min + y_max)) plt.ylim(((y_mean - 0.5), (y_mean + 0.5))) ax.set_xlabel('Ordered optimizer run') if (offset_y == 0.0): ax.set_ylabel('Function value') else: ax.set_ylabel('Offsetted function value (relative to best start)') ax.set_title('Waterfall plot') if (legend_text is not None): ax.legend() return ax
Plot waterfall plot using list of function values. Parameters ---------- fvals: numeric list or array Including values need to be plotted. scale_y: str, optional May be logarithmic or linear ('log10' or 'lin') offset_y: offset for the y-axis, if it is supposed to be in log10-scale ax: matplotlib.Axes, optional Axes object to use. size: tuple, optional see waterfall colors: list, or RGBA, optional list of colors, or single color color or list of colors for plotting. If not set, clustering is done and colors are assigned automatically legend_text: str Label for line plots Returns ------- ax: matplotlib.Axes The plot axes.
pypesto/visualize/waterfall.py
waterfall_lowlevel
beimbusch/pyPESTO
0
python
def waterfall_lowlevel(fvals, scale_y='log10', offset_y=0.0, ax=None, size=(18.5, 10.5), colors=None, legend_text=None): "\n Plot waterfall plot using list of function values.\n\n Parameters\n ----------\n\n fvals: numeric list or array\n Including values need to be plotted.\n\n scale_y: str, optional\n May be logarithmic or linear ('log10' or 'lin')\n\n offset_y:\n offset for the y-axis, if it is supposed to be in log10-scale\n\n ax: matplotlib.Axes, optional\n Axes object to use.\n\n size: tuple, optional\n see waterfall\n\n colors: list, or RGBA, optional\n list of colors, or single color\n color or list of colors for plotting. If not set, clustering is done\n and colors are assigned automatically\n\n legend_text: str\n Label for line plots\n\n Returns\n -------\n\n ax: matplotlib.Axes\n The plot axes.\n " if (ax is None): ax = plt.subplots()[1] fig = plt.gcf() fig.set_size_inches(*size) fvals = np.array(fvals) (_, fvals) = delete_nan_inf(fvals) n_fvals = len(fvals) start_ind = range(n_fvals) colors = assign_colors(fvals, colors=colors) indices = sorted(range(n_fvals), key=(lambda j: fvals[j])) ax.xaxis.set_major_locator(MaxNLocator(integer=True)) if (scale_y == 'log10'): ax.semilogy(start_ind, fvals, color=[0.7, 0.7, 0.7, 0.6]) else: ax.plot(start_ind, fvals, color=[0.7, 0.7, 0.7, 0.6]) for j in range(n_fvals): j_fval = indices[j] color = colors[j_fval] fval = fvals[j_fval] if (j == 0): tmp_legend = legend_text else: tmp_legend = None (y_min, y_max) = ax.get_ylim() if (scale_y == 'log10'): ax.semilogy(j, fval, color=color, marker='o', label=tmp_legend, alpha=1.0) if ((np.log10(y_max) - np.log10(y_min)) < 1.0): y_mean = (0.5 * (np.log10(y_min) + np.log10(y_max))) plt.ylim(((10.0 ** (y_mean - 0.5)), (10.0 ** (y_mean + 0.5)))) else: ax.plot(j, fval, color=color, marker='o', label=tmp_legend, alpha=1.0) if ((y_max - y_min) < 1.0): y_mean = (0.5 * (y_min + y_max)) plt.ylim(((y_mean - 0.5), (y_mean + 0.5))) ax.set_xlabel('Ordered optimizer run') if (offset_y == 0.0): ax.set_ylabel('Function value') else: ax.set_ylabel('Offsetted function value (relative to best start)') ax.set_title('Waterfall plot') if (legend_text is not None): ax.legend() return ax
def waterfall_lowlevel(fvals, scale_y='log10', offset_y=0.0, ax=None, size=(18.5, 10.5), colors=None, legend_text=None): "\n Plot waterfall plot using list of function values.\n\n Parameters\n ----------\n\n fvals: numeric list or array\n Including values need to be plotted.\n\n scale_y: str, optional\n May be logarithmic or linear ('log10' or 'lin')\n\n offset_y:\n offset for the y-axis, if it is supposed to be in log10-scale\n\n ax: matplotlib.Axes, optional\n Axes object to use.\n\n size: tuple, optional\n see waterfall\n\n colors: list, or RGBA, optional\n list of colors, or single color\n color or list of colors for plotting. If not set, clustering is done\n and colors are assigned automatically\n\n legend_text: str\n Label for line plots\n\n Returns\n -------\n\n ax: matplotlib.Axes\n The plot axes.\n " if (ax is None): ax = plt.subplots()[1] fig = plt.gcf() fig.set_size_inches(*size) fvals = np.array(fvals) (_, fvals) = delete_nan_inf(fvals) n_fvals = len(fvals) start_ind = range(n_fvals) colors = assign_colors(fvals, colors=colors) indices = sorted(range(n_fvals), key=(lambda j: fvals[j])) ax.xaxis.set_major_locator(MaxNLocator(integer=True)) if (scale_y == 'log10'): ax.semilogy(start_ind, fvals, color=[0.7, 0.7, 0.7, 0.6]) else: ax.plot(start_ind, fvals, color=[0.7, 0.7, 0.7, 0.6]) for j in range(n_fvals): j_fval = indices[j] color = colors[j_fval] fval = fvals[j_fval] if (j == 0): tmp_legend = legend_text else: tmp_legend = None (y_min, y_max) = ax.get_ylim() if (scale_y == 'log10'): ax.semilogy(j, fval, color=color, marker='o', label=tmp_legend, alpha=1.0) if ((np.log10(y_max) - np.log10(y_min)) < 1.0): y_mean = (0.5 * (np.log10(y_min) + np.log10(y_max))) plt.ylim(((10.0 ** (y_mean - 0.5)), (10.0 ** (y_mean + 0.5)))) else: ax.plot(j, fval, color=color, marker='o', label=tmp_legend, alpha=1.0) if ((y_max - y_min) < 1.0): y_mean = (0.5 * (y_min + y_max)) plt.ylim(((y_mean - 0.5), (y_mean + 0.5))) ax.set_xlabel('Ordered optimizer run') if (offset_y == 0.0): ax.set_ylabel('Function value') else: ax.set_ylabel('Offsetted function value (relative to best start)') ax.set_title('Waterfall plot') if (legend_text is not None): ax.legend() return ax<|docstring|>Plot waterfall plot using list of function values. Parameters ---------- fvals: numeric list or array Including values need to be plotted. scale_y: str, optional May be logarithmic or linear ('log10' or 'lin') offset_y: offset for the y-axis, if it is supposed to be in log10-scale ax: matplotlib.Axes, optional Axes object to use. size: tuple, optional see waterfall colors: list, or RGBA, optional list of colors, or single color color or list of colors for plotting. If not set, clustering is done and colors are assigned automatically legend_text: str Label for line plots Returns ------- ax: matplotlib.Axes The plot axes.<|endoftext|>
9155ad946eaa02607386c60f1dcd33398d9996ed581cafbbe799d25f6118373f
def get_fvals(result: Result, scale_y: str, offset_y: float, start_indices: Optional[Iterable[int]]=None): "\n Get function values to be plotted later from results.\n\n Parameters\n ----------\n\n result: pypesto.Result\n Optimization result obtained by 'optimize.py'\n\n scale_y: str, optional\n May be logarithmic or linear ('log10' or 'lin')\n\n offset_y:\n offset for the y-axis, if it is supposed to be in log10-scale\n\n start_indices:\n list of integers specifying the multistart to be plotted or\n int specifying up to which start index should be plotted\n\n Returns\n -------\n\n fvals: ndarray\n function values\n\n offset_y:\n offset for the y-axis, if this is supposed to be in log10-scale\n " fvals = np.array(result.optimize_result.get_for_key('fval')) if (start_indices is None): start_indices = np.array(range(len(fvals))) else: start_indices = np.array(start_indices) existing_indices = np.array(range(len(fvals))) start_indices = np.intersect1d(start_indices, existing_indices) fvals = fvals[start_indices] min_val = np.nanmin(fvals[(fvals != (- np.inf))]) offset_y = process_offset_y(offset_y, scale_y, float(min_val)) if (offset_y != 0.0): fvals += (offset_y * np.ones(fvals.shape)) return (fvals, offset_y)
Get function values to be plotted later from results. Parameters ---------- result: pypesto.Result Optimization result obtained by 'optimize.py' scale_y: str, optional May be logarithmic or linear ('log10' or 'lin') offset_y: offset for the y-axis, if it is supposed to be in log10-scale start_indices: list of integers specifying the multistart to be plotted or int specifying up to which start index should be plotted Returns ------- fvals: ndarray function values offset_y: offset for the y-axis, if this is supposed to be in log10-scale
pypesto/visualize/waterfall.py
get_fvals
beimbusch/pyPESTO
0
python
def get_fvals(result: Result, scale_y: str, offset_y: float, start_indices: Optional[Iterable[int]]=None): "\n Get function values to be plotted later from results.\n\n Parameters\n ----------\n\n result: pypesto.Result\n Optimization result obtained by 'optimize.py'\n\n scale_y: str, optional\n May be logarithmic or linear ('log10' or 'lin')\n\n offset_y:\n offset for the y-axis, if it is supposed to be in log10-scale\n\n start_indices:\n list of integers specifying the multistart to be plotted or\n int specifying up to which start index should be plotted\n\n Returns\n -------\n\n fvals: ndarray\n function values\n\n offset_y:\n offset for the y-axis, if this is supposed to be in log10-scale\n " fvals = np.array(result.optimize_result.get_for_key('fval')) if (start_indices is None): start_indices = np.array(range(len(fvals))) else: start_indices = np.array(start_indices) existing_indices = np.array(range(len(fvals))) start_indices = np.intersect1d(start_indices, existing_indices) fvals = fvals[start_indices] min_val = np.nanmin(fvals[(fvals != (- np.inf))]) offset_y = process_offset_y(offset_y, scale_y, float(min_val)) if (offset_y != 0.0): fvals += (offset_y * np.ones(fvals.shape)) return (fvals, offset_y)
def get_fvals(result: Result, scale_y: str, offset_y: float, start_indices: Optional[Iterable[int]]=None): "\n Get function values to be plotted later from results.\n\n Parameters\n ----------\n\n result: pypesto.Result\n Optimization result obtained by 'optimize.py'\n\n scale_y: str, optional\n May be logarithmic or linear ('log10' or 'lin')\n\n offset_y:\n offset for the y-axis, if it is supposed to be in log10-scale\n\n start_indices:\n list of integers specifying the multistart to be plotted or\n int specifying up to which start index should be plotted\n\n Returns\n -------\n\n fvals: ndarray\n function values\n\n offset_y:\n offset for the y-axis, if this is supposed to be in log10-scale\n " fvals = np.array(result.optimize_result.get_for_key('fval')) if (start_indices is None): start_indices = np.array(range(len(fvals))) else: start_indices = np.array(start_indices) existing_indices = np.array(range(len(fvals))) start_indices = np.intersect1d(start_indices, existing_indices) fvals = fvals[start_indices] min_val = np.nanmin(fvals[(fvals != (- np.inf))]) offset_y = process_offset_y(offset_y, scale_y, float(min_val)) if (offset_y != 0.0): fvals += (offset_y * np.ones(fvals.shape)) return (fvals, offset_y)<|docstring|>Get function values to be plotted later from results. Parameters ---------- result: pypesto.Result Optimization result obtained by 'optimize.py' scale_y: str, optional May be logarithmic or linear ('log10' or 'lin') offset_y: offset for the y-axis, if it is supposed to be in log10-scale start_indices: list of integers specifying the multistart to be plotted or int specifying up to which start index should be plotted Returns ------- fvals: ndarray function values offset_y: offset for the y-axis, if this is supposed to be in log10-scale<|endoftext|>
db9dc6013b9594f5221d1934f7b37ee77ee965cd170b99fcd50632d1d9f89123
def process_offset_for_list(results: Iterable[Result], scale_y: str) -> float: "\n If we have a list of results, all should use the same offset_y,\n which is computed by this function.\n\n Parameters\n ----------\n\n results: list of pypesto.Result\n list of Optimization results obtained by 'optimize.py'\n\n scale_y: str, optional\n May be logarithmic or linear ('log10' or 'lin')\n\n Returns\n -------\n\n offset_y:\n offset for the y-axis\n " fvals = np.concatenate([np.array(result.optimize_result.get_for_key('fval')) for result in results]) min_val = np.nanmin(fvals[np.isfinite(fvals)]) offset_y = process_offset_y(None, scale_y, float(min_val)) return offset_y
If we have a list of results, all should use the same offset_y, which is computed by this function. Parameters ---------- results: list of pypesto.Result list of Optimization results obtained by 'optimize.py' scale_y: str, optional May be logarithmic or linear ('log10' or 'lin') Returns ------- offset_y: offset for the y-axis
pypesto/visualize/waterfall.py
process_offset_for_list
beimbusch/pyPESTO
0
python
def process_offset_for_list(results: Iterable[Result], scale_y: str) -> float: "\n If we have a list of results, all should use the same offset_y,\n which is computed by this function.\n\n Parameters\n ----------\n\n results: list of pypesto.Result\n list of Optimization results obtained by 'optimize.py'\n\n scale_y: str, optional\n May be logarithmic or linear ('log10' or 'lin')\n\n Returns\n -------\n\n offset_y:\n offset for the y-axis\n " fvals = np.concatenate([np.array(result.optimize_result.get_for_key('fval')) for result in results]) min_val = np.nanmin(fvals[np.isfinite(fvals)]) offset_y = process_offset_y(None, scale_y, float(min_val)) return offset_y
def process_offset_for_list(results: Iterable[Result], scale_y: str) -> float: "\n If we have a list of results, all should use the same offset_y,\n which is computed by this function.\n\n Parameters\n ----------\n\n results: list of pypesto.Result\n list of Optimization results obtained by 'optimize.py'\n\n scale_y: str, optional\n May be logarithmic or linear ('log10' or 'lin')\n\n Returns\n -------\n\n offset_y:\n offset for the y-axis\n " fvals = np.concatenate([np.array(result.optimize_result.get_for_key('fval')) for result in results]) min_val = np.nanmin(fvals[np.isfinite(fvals)]) offset_y = process_offset_y(None, scale_y, float(min_val)) return offset_y<|docstring|>If we have a list of results, all should use the same offset_y, which is computed by this function. Parameters ---------- results: list of pypesto.Result list of Optimization results obtained by 'optimize.py' scale_y: str, optional May be logarithmic or linear ('log10' or 'lin') Returns ------- offset_y: offset for the y-axis<|endoftext|>
1d4cd784d2072d1f0765319edca6ec241f2214cae9d5a1e3d95e0dfb6dd531e3
def handle_options(ax, max_len_fvals, ref, y_limits, offset_y): '\n Get the limits for the y-axis, plots the reference points, will do\n more at a later time point. This function is there to apply whatever\n kind of post-plotting transformations to the axis object.\n\n Parameters\n ----------\n\n ax: matplotlib.Axes, optional\n Axes object to use.\n\n max_len_fvals: int\n maximum number of points\n\n ref: list, optional\n List of reference points for optimization results, containing at\n least a function value fval\n\n y_limits: float or ndarray, optional\n maximum value to be plotted on the y-axis, or y-limits\n\n offset_y:\n offset for the y-axis, if it is supposed to be in log10-scale\n\n Returns\n -------\n\n ax: matplotlib.Axes\n The plot axes.\n ' for i_ref in ref: ax.plot([0, (max_len_fvals - 1)], [(i_ref.fval + offset_y), (i_ref.fval + offset_y)], '--', color=i_ref.color, label=i_ref.legend) if (i_ref.legend is not None): ax.legend() ax = process_y_limits(ax, y_limits) return ax
Get the limits for the y-axis, plots the reference points, will do more at a later time point. This function is there to apply whatever kind of post-plotting transformations to the axis object. Parameters ---------- ax: matplotlib.Axes, optional Axes object to use. max_len_fvals: int maximum number of points ref: list, optional List of reference points for optimization results, containing at least a function value fval y_limits: float or ndarray, optional maximum value to be plotted on the y-axis, or y-limits offset_y: offset for the y-axis, if it is supposed to be in log10-scale Returns ------- ax: matplotlib.Axes The plot axes.
pypesto/visualize/waterfall.py
handle_options
beimbusch/pyPESTO
0
python
def handle_options(ax, max_len_fvals, ref, y_limits, offset_y): '\n Get the limits for the y-axis, plots the reference points, will do\n more at a later time point. This function is there to apply whatever\n kind of post-plotting transformations to the axis object.\n\n Parameters\n ----------\n\n ax: matplotlib.Axes, optional\n Axes object to use.\n\n max_len_fvals: int\n maximum number of points\n\n ref: list, optional\n List of reference points for optimization results, containing at\n least a function value fval\n\n y_limits: float or ndarray, optional\n maximum value to be plotted on the y-axis, or y-limits\n\n offset_y:\n offset for the y-axis, if it is supposed to be in log10-scale\n\n Returns\n -------\n\n ax: matplotlib.Axes\n The plot axes.\n ' for i_ref in ref: ax.plot([0, (max_len_fvals - 1)], [(i_ref.fval + offset_y), (i_ref.fval + offset_y)], '--', color=i_ref.color, label=i_ref.legend) if (i_ref.legend is not None): ax.legend() ax = process_y_limits(ax, y_limits) return ax
def handle_options(ax, max_len_fvals, ref, y_limits, offset_y): '\n Get the limits for the y-axis, plots the reference points, will do\n more at a later time point. This function is there to apply whatever\n kind of post-plotting transformations to the axis object.\n\n Parameters\n ----------\n\n ax: matplotlib.Axes, optional\n Axes object to use.\n\n max_len_fvals: int\n maximum number of points\n\n ref: list, optional\n List of reference points for optimization results, containing at\n least a function value fval\n\n y_limits: float or ndarray, optional\n maximum value to be plotted on the y-axis, or y-limits\n\n offset_y:\n offset for the y-axis, if it is supposed to be in log10-scale\n\n Returns\n -------\n\n ax: matplotlib.Axes\n The plot axes.\n ' for i_ref in ref: ax.plot([0, (max_len_fvals - 1)], [(i_ref.fval + offset_y), (i_ref.fval + offset_y)], '--', color=i_ref.color, label=i_ref.legend) if (i_ref.legend is not None): ax.legend() ax = process_y_limits(ax, y_limits) return ax<|docstring|>Get the limits for the y-axis, plots the reference points, will do more at a later time point. This function is there to apply whatever kind of post-plotting transformations to the axis object. Parameters ---------- ax: matplotlib.Axes, optional Axes object to use. max_len_fvals: int maximum number of points ref: list, optional List of reference points for optimization results, containing at least a function value fval y_limits: float or ndarray, optional maximum value to be plotted on the y-axis, or y-limits offset_y: offset for the y-axis, if it is supposed to be in log10-scale Returns ------- ax: matplotlib.Axes The plot axes.<|endoftext|>
8865bc58e4c4b5c6d6018b4a77355506114a1c2fe9262996d074d2b435eb2c25
def __init__(self, limit=None, uri=None): "initialise instance of Deployment(ICPObj).\n\n It will be used to set up a sparql query, and get all metadata of Deployment from ICOS CP.\n\n Optionally we could limit the number of output:\n - limit the amount of returned results\n\n and/or select Deployment:\n - with ICOS CP 'uri'\n\n Example:\n Deployment(limit=5)\n\n :param limit: number of returned results\n :param uri: ICOS CP URI\n " super().__init__() self._uri = uri self._limit = limit self._inherit = {**self.attr} if isinstance(_attr, dict): self._attr = _attr self.attr = {**self._attr, **self._inherit} for prop in self.attr: self._addSubProperties(prop) if isinstance(_equivalentClass, list): self._equivalentClass = _equivalentClass self._object = 'http://www.w3.org/ns/ssn/Deployment' self._objtype = None if (self._object is not None): self.objtype = self._getObjectType() (filename, line_number, function_name, text) = traceback.extract_stack()[(- 2)] self._instance_name = text[:text.find('=')].strip()
initialise instance of Deployment(ICPObj). It will be used to set up a sparql query, and get all metadata of Deployment from ICOS CP. Optionally we could limit the number of output: - limit the amount of returned results and/or select Deployment: - with ICOS CP 'uri' Example: Deployment(limit=5) :param limit: number of returned results :param uri: ICOS CP URI
icp2edd/icpobj/ssn/deployment.py
__init__
BjerknesClimateDataCentre/ICOS-CP2ERDDAP
0
python
def __init__(self, limit=None, uri=None): "initialise instance of Deployment(ICPObj).\n\n It will be used to set up a sparql query, and get all metadata of Deployment from ICOS CP.\n\n Optionally we could limit the number of output:\n - limit the amount of returned results\n\n and/or select Deployment:\n - with ICOS CP 'uri'\n\n Example:\n Deployment(limit=5)\n\n :param limit: number of returned results\n :param uri: ICOS CP URI\n " super().__init__() self._uri = uri self._limit = limit self._inherit = {**self.attr} if isinstance(_attr, dict): self._attr = _attr self.attr = {**self._attr, **self._inherit} for prop in self.attr: self._addSubProperties(prop) if isinstance(_equivalentClass, list): self._equivalentClass = _equivalentClass self._object = 'http://www.w3.org/ns/ssn/Deployment' self._objtype = None if (self._object is not None): self.objtype = self._getObjectType() (filename, line_number, function_name, text) = traceback.extract_stack()[(- 2)] self._instance_name = text[:text.find('=')].strip()
def __init__(self, limit=None, uri=None): "initialise instance of Deployment(ICPObj).\n\n It will be used to set up a sparql query, and get all metadata of Deployment from ICOS CP.\n\n Optionally we could limit the number of output:\n - limit the amount of returned results\n\n and/or select Deployment:\n - with ICOS CP 'uri'\n\n Example:\n Deployment(limit=5)\n\n :param limit: number of returned results\n :param uri: ICOS CP URI\n " super().__init__() self._uri = uri self._limit = limit self._inherit = {**self.attr} if isinstance(_attr, dict): self._attr = _attr self.attr = {**self._attr, **self._inherit} for prop in self.attr: self._addSubProperties(prop) if isinstance(_equivalentClass, list): self._equivalentClass = _equivalentClass self._object = 'http://www.w3.org/ns/ssn/Deployment' self._objtype = None if (self._object is not None): self.objtype = self._getObjectType() (filename, line_number, function_name, text) = traceback.extract_stack()[(- 2)] self._instance_name = text[:text.find('=')].strip()<|docstring|>initialise instance of Deployment(ICPObj). It will be used to set up a sparql query, and get all metadata of Deployment from ICOS CP. Optionally we could limit the number of output: - limit the amount of returned results and/or select Deployment: - with ICOS CP 'uri' Example: Deployment(limit=5) :param limit: number of returned results :param uri: ICOS CP URI<|endoftext|>
18e25de58afe8c417859cc27650a8ff062c332057f7ab62cbc721685ecf948a0
def get_queues(self, account_id, filters=None): ' Get all Queues for an Account.\n\n :param account_id: ID of Account to get Queues for.\n :param filters: Kazoo Filter Parameters (see official API docs).\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type filters: dict, None\n :rtype: dict\n ' return self.rest_request.get((('accounts/' + str(account_id)) + '/queues'), filters)
Get all Queues for an Account. :param account_id: ID of Account to get Queues for. :param filters: Kazoo Filter Parameters (see official API docs). :return: Kazoo Data (see official API docs). :type account_id: str :type filters: dict, None :rtype: dict
pykazoo/queues.py
get_queues
diarmuidw/PyKazoo
3
python
def get_queues(self, account_id, filters=None): ' Get all Queues for an Account.\n\n :param account_id: ID of Account to get Queues for.\n :param filters: Kazoo Filter Parameters (see official API docs).\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type filters: dict, None\n :rtype: dict\n ' return self.rest_request.get((('accounts/' + str(account_id)) + '/queues'), filters)
def get_queues(self, account_id, filters=None): ' Get all Queues for an Account.\n\n :param account_id: ID of Account to get Queues for.\n :param filters: Kazoo Filter Parameters (see official API docs).\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type filters: dict, None\n :rtype: dict\n ' return self.rest_request.get((('accounts/' + str(account_id)) + '/queues'), filters)<|docstring|>Get all Queues for an Account. :param account_id: ID of Account to get Queues for. :param filters: Kazoo Filter Parameters (see official API docs). :return: Kazoo Data (see official API docs). :type account_id: str :type filters: dict, None :rtype: dict<|endoftext|>
b512a4250d5ea5e90ccf8ce9689d7a1e9b28778fc1fe26934b866e34e18d2c7d
def get_queue(self, account_id, queue_id, filters=None): ' Get a specific Queue for an Account.\n\n :param account_id: ID of Account to get Queues for.\n :param queue_id: ID of the Queue to get.\n :param filters: Kazoo Filter Parameters (see official API docs).\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type queue_id: str\n :type filters: dict, None\n :rtype: dict\n ' return self.rest_request.get(((('accounts/' + str(account_id)) + '/queues/') + str(queue_id)), filters)
Get a specific Queue for an Account. :param account_id: ID of Account to get Queues for. :param queue_id: ID of the Queue to get. :param filters: Kazoo Filter Parameters (see official API docs). :return: Kazoo Data (see official API docs). :type account_id: str :type queue_id: str :type filters: dict, None :rtype: dict
pykazoo/queues.py
get_queue
diarmuidw/PyKazoo
3
python
def get_queue(self, account_id, queue_id, filters=None): ' Get a specific Queue for an Account.\n\n :param account_id: ID of Account to get Queues for.\n :param queue_id: ID of the Queue to get.\n :param filters: Kazoo Filter Parameters (see official API docs).\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type queue_id: str\n :type filters: dict, None\n :rtype: dict\n ' return self.rest_request.get(((('accounts/' + str(account_id)) + '/queues/') + str(queue_id)), filters)
def get_queue(self, account_id, queue_id, filters=None): ' Get a specific Queue for an Account.\n\n :param account_id: ID of Account to get Queues for.\n :param queue_id: ID of the Queue to get.\n :param filters: Kazoo Filter Parameters (see official API docs).\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type queue_id: str\n :type filters: dict, None\n :rtype: dict\n ' return self.rest_request.get(((('accounts/' + str(account_id)) + '/queues/') + str(queue_id)), filters)<|docstring|>Get a specific Queue for an Account. :param account_id: ID of Account to get Queues for. :param queue_id: ID of the Queue to get. :param filters: Kazoo Filter Parameters (see official API docs). :return: Kazoo Data (see official API docs). :type account_id: str :type queue_id: str :type filters: dict, None :rtype: dict<|endoftext|>
268e80668ea429363db992bca544977202f576718f2f3d973f671d98d1f9f621
def create_queue(self, account_id, data): ' Create a Queue\n\n :param account_id: ID of Account to create device for.\n :param data: Kazoo Device data (see official API Docs).\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type data: dict\n :rtype: dict\n ' return self.rest_request.put((('accounts/' + str(account_id)) + '/queues'), data)
Create a Queue :param account_id: ID of Account to create device for. :param data: Kazoo Device data (see official API Docs). :return: Kazoo Data (see official API docs). :type account_id: str :type data: dict :rtype: dict
pykazoo/queues.py
create_queue
diarmuidw/PyKazoo
3
python
def create_queue(self, account_id, data): ' Create a Queue\n\n :param account_id: ID of Account to create device for.\n :param data: Kazoo Device data (see official API Docs).\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type data: dict\n :rtype: dict\n ' return self.rest_request.put((('accounts/' + str(account_id)) + '/queues'), data)
def create_queue(self, account_id, data): ' Create a Queue\n\n :param account_id: ID of Account to create device for.\n :param data: Kazoo Device data (see official API Docs).\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type data: dict\n :rtype: dict\n ' return self.rest_request.put((('accounts/' + str(account_id)) + '/queues'), data)<|docstring|>Create a Queue :param account_id: ID of Account to create device for. :param data: Kazoo Device data (see official API Docs). :return: Kazoo Data (see official API docs). :type account_id: str :type data: dict :rtype: dict<|endoftext|>
8bafffe9aa224e5f9c7635ba5a0e67ef208249b7d53d87bdc08ce53130547cc1
def update_queue(self, account_id, queue_id, data): ' Updates a Queue\n\n :param account_id: ID of Account to update Queue for.\n :param queue_id: ID of Queue to update.\n :param data: Kazoo Account data (see official API Docs).\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type queue_id: str\n :type data: dict\n :rtype: dict\n ' return self.rest_request.post(((('accounts/' + str(account_id)) + '/queues/') + str(queue_id)), data)
Updates a Queue :param account_id: ID of Account to update Queue for. :param queue_id: ID of Queue to update. :param data: Kazoo Account data (see official API Docs). :return: Kazoo Data (see official API docs). :type account_id: str :type queue_id: str :type data: dict :rtype: dict
pykazoo/queues.py
update_queue
diarmuidw/PyKazoo
3
python
def update_queue(self, account_id, queue_id, data): ' Updates a Queue\n\n :param account_id: ID of Account to update Queue for.\n :param queue_id: ID of Queue to update.\n :param data: Kazoo Account data (see official API Docs).\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type queue_id: str\n :type data: dict\n :rtype: dict\n ' return self.rest_request.post(((('accounts/' + str(account_id)) + '/queues/') + str(queue_id)), data)
def update_queue(self, account_id, queue_id, data): ' Updates a Queue\n\n :param account_id: ID of Account to update Queue for.\n :param queue_id: ID of Queue to update.\n :param data: Kazoo Account data (see official API Docs).\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type queue_id: str\n :type data: dict\n :rtype: dict\n ' return self.rest_request.post(((('accounts/' + str(account_id)) + '/queues/') + str(queue_id)), data)<|docstring|>Updates a Queue :param account_id: ID of Account to update Queue for. :param queue_id: ID of Queue to update. :param data: Kazoo Account data (see official API Docs). :return: Kazoo Data (see official API docs). :type account_id: str :type queue_id: str :type data: dict :rtype: dict<|endoftext|>
4c0cbb3f08720b9e20b021fb79966e487fb619e4e11a18b68a8bb6edd5b42f81
def delete_queue(self, account_id, queue_id): ' Deletes a Queue\n\n :param account_id: ID of Account to delete Queue from.\n :param queue_id: ID of Queue to delete.\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type queue_id: str\n :rtype: dict\n ' return self.rest_request.delete(((('accounts/' + str(account_id)) + '/queues/') + str(queue_id)))
Deletes a Queue :param account_id: ID of Account to delete Queue from. :param queue_id: ID of Queue to delete. :return: Kazoo Data (see official API docs). :type account_id: str :type queue_id: str :rtype: dict
pykazoo/queues.py
delete_queue
diarmuidw/PyKazoo
3
python
def delete_queue(self, account_id, queue_id): ' Deletes a Queue\n\n :param account_id: ID of Account to delete Queue from.\n :param queue_id: ID of Queue to delete.\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type queue_id: str\n :rtype: dict\n ' return self.rest_request.delete(((('accounts/' + str(account_id)) + '/queues/') + str(queue_id)))
def delete_queue(self, account_id, queue_id): ' Deletes a Queue\n\n :param account_id: ID of Account to delete Queue from.\n :param queue_id: ID of Queue to delete.\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type queue_id: str\n :rtype: dict\n ' return self.rest_request.delete(((('accounts/' + str(account_id)) + '/queues/') + str(queue_id)))<|docstring|>Deletes a Queue :param account_id: ID of Account to delete Queue from. :param queue_id: ID of Queue to delete. :return: Kazoo Data (see official API docs). :type account_id: str :type queue_id: str :rtype: dict<|endoftext|>
f1d278e8610e06cd78847870cd3dda2eb528be554119ecb01dbff55a80674d9f
def get_queues_stats(self, account_id, filters=None): ' Gets Devices Status\n\n :param account_id: ID of Account to get Queues stats for.\n :param filters: Kazoo Filter Parameters (see official API docs).\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type filters: dict, None\n :rtype: dict\n ' return self.rest_request.get((('accounts/' + str(account_id)) + '/queues/stats'), filters)
Gets Devices Status :param account_id: ID of Account to get Queues stats for. :param filters: Kazoo Filter Parameters (see official API docs). :return: Kazoo Data (see official API docs). :type account_id: str :type filters: dict, None :rtype: dict
pykazoo/queues.py
get_queues_stats
diarmuidw/PyKazoo
3
python
def get_queues_stats(self, account_id, filters=None): ' Gets Devices Status\n\n :param account_id: ID of Account to get Queues stats for.\n :param filters: Kazoo Filter Parameters (see official API docs).\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type filters: dict, None\n :rtype: dict\n ' return self.rest_request.get((('accounts/' + str(account_id)) + '/queues/stats'), filters)
def get_queues_stats(self, account_id, filters=None): ' Gets Devices Status\n\n :param account_id: ID of Account to get Queues stats for.\n :param filters: Kazoo Filter Parameters (see official API docs).\n :return: Kazoo Data (see official API docs).\n :type account_id: str\n :type filters: dict, None\n :rtype: dict\n ' return self.rest_request.get((('accounts/' + str(account_id)) + '/queues/stats'), filters)<|docstring|>Gets Devices Status :param account_id: ID of Account to get Queues stats for. :param filters: Kazoo Filter Parameters (see official API docs). :return: Kazoo Data (see official API docs). :type account_id: str :type filters: dict, None :rtype: dict<|endoftext|>
7380d7edb5151b55c358d1913c96c3c824914a1d5e6ff0fff2fdaaf32d3e2f65
def open(self): ' Open login url in browser\n\n :returns: this page object instance\n ' self.logger.debug('\nAtempting to open the page') self.driver.get('{}/login'.format(self.config.get('Test', 'url'))) return self
Open login url in browser :returns: this page object instance
pageobjects/search.py
open
bongadub/sprint_review_web_automation
0
python
def open(self): ' Open login url in browser\n\n :returns: this page object instance\n ' self.logger.debug('\nAtempting to open the page') self.driver.get('{}/login'.format(self.config.get('Test', 'url'))) return self
def open(self): ' Open login url in browser\n\n :returns: this page object instance\n ' self.logger.debug('\nAtempting to open the page') self.driver.get('{}/login'.format(self.config.get('Test', 'url'))) return self<|docstring|>Open login url in browser :returns: this page object instance<|endoftext|>
4aabb6ba658b965d8d2bdca89a62828942ce87903fcafaab4513eb0823046b08
def wait_until_loaded(self): ' Wait until login page is loaded\n\n :returns: this page object instance\n ' self.username.wait_until_visible() return self
Wait until login page is loaded :returns: this page object instance
pageobjects/search.py
wait_until_loaded
bongadub/sprint_review_web_automation
0
python
def wait_until_loaded(self): ' Wait until login page is loaded\n\n :returns: this page object instance\n ' self.username.wait_until_visible() return self
def wait_until_loaded(self): ' Wait until login page is loaded\n\n :returns: this page object instance\n ' self.username.wait_until_visible() return self<|docstring|>Wait until login page is loaded :returns: this page object instance<|endoftext|>
2c76787a8560e87bb15bbc05fc7e780a818ae5066c3882dd33aea186048d8a6c
def login(self, user): ' Fill login form and submit it\n\n :param user: dict with username and password values\n :returns: secure area page object instance\n ' try: self.logger.debug("Login with user '%s'", user['username']) self.username.text = user['username'] self.password.text = user['password'] self.logger.debug('\nAtempting to click login button') self.login_button.click() time.sleep(3) return True except NoSuchElementException: self.auto_log('error', 'Element {} does not exist'.format(element)) return None
Fill login form and submit it :param user: dict with username and password values :returns: secure area page object instance
pageobjects/search.py
login
bongadub/sprint_review_web_automation
0
python
def login(self, user): ' Fill login form and submit it\n\n :param user: dict with username and password values\n :returns: secure area page object instance\n ' try: self.logger.debug("Login with user '%s'", user['username']) self.username.text = user['username'] self.password.text = user['password'] self.logger.debug('\nAtempting to click login button') self.login_button.click() time.sleep(3) return True except NoSuchElementException: self.auto_log('error', 'Element {} does not exist'.format(element)) return None
def login(self, user): ' Fill login form and submit it\n\n :param user: dict with username and password values\n :returns: secure area page object instance\n ' try: self.logger.debug("Login with user '%s'", user['username']) self.username.text = user['username'] self.password.text = user['password'] self.logger.debug('\nAtempting to click login button') self.login_button.click() time.sleep(3) return True except NoSuchElementException: self.auto_log('error', 'Element {} does not exist'.format(element)) return None<|docstring|>Fill login form and submit it :param user: dict with username and password values :returns: secure area page object instance<|endoftext|>
78a4e714ade278d31aba679c18d7d1290b2f5dbcf1431f32421f61078c736a6b
def convert_to_bscode(ts_code): '\n ts_code 转换成 bs_code\n ' b = ts_code.split('.') bs_code = '{}.{}'.format(b[1].lower(), b[0]) return bs_code
ts_code 转换成 bs_code
stockquant/odl/tushare/stock_basic.py
convert_to_bscode
dabuc/StockQuant
0
python
def convert_to_bscode(ts_code): '\n \n ' b = ts_code.split('.') bs_code = '{}.{}'.format(b[1].lower(), b[0]) return bs_code
def convert_to_bscode(ts_code): '\n \n ' b = ts_code.split('.') bs_code = '{}.{}'.format(b[1].lower(), b[0]) return bs_code<|docstring|>ts_code 转换成 bs_code<|endoftext|>
320fd9afde1847a000fb14ff36349a6bdaeaa0b8f9fde1c2304f5068923f373e
def get_stock_basic(): '\n 获取TS股票列表\n ' TS_Stock_Basic.clear_table() pro = ts.pro_api(CQ_Config.TUSHARE_TOKEN) fields = 'ts_code,symbol,name,area,industry,fullname,enname,market,exchange,curr_type,list_status, list_date,delist_date,is_hs' rs_L = pro.stock_basic(exchange='', list_status='L', fields=fields) rs_D = pro.stock_basic(exchange='', list_status='D', fields=fields) rs_P = pro.stock_basic(exchange='', list_status='P', fields=fields) result = pd.concat([rs_L, rs_D, rs_P]) result['list_date'] = pd.to_datetime(result['list_date']) result['delist_date'] = pd.to_datetime(result['delist_date']) result['bs_code'] = result['ts_code'].apply(convert_to_bscode) result.to_sql('odl_ts_stock_basic', engine, schema=CQ_Config.DB_SCHEMA, if_exists='append', index=False)
获取TS股票列表
stockquant/odl/tushare/stock_basic.py
get_stock_basic
dabuc/StockQuant
0
python
def get_stock_basic(): '\n \n ' TS_Stock_Basic.clear_table() pro = ts.pro_api(CQ_Config.TUSHARE_TOKEN) fields = 'ts_code,symbol,name,area,industry,fullname,enname,market,exchange,curr_type,list_status, list_date,delist_date,is_hs' rs_L = pro.stock_basic(exchange=, list_status='L', fields=fields) rs_D = pro.stock_basic(exchange=, list_status='D', fields=fields) rs_P = pro.stock_basic(exchange=, list_status='P', fields=fields) result = pd.concat([rs_L, rs_D, rs_P]) result['list_date'] = pd.to_datetime(result['list_date']) result['delist_date'] = pd.to_datetime(result['delist_date']) result['bs_code'] = result['ts_code'].apply(convert_to_bscode) result.to_sql('odl_ts_stock_basic', engine, schema=CQ_Config.DB_SCHEMA, if_exists='append', index=False)
def get_stock_basic(): '\n \n ' TS_Stock_Basic.clear_table() pro = ts.pro_api(CQ_Config.TUSHARE_TOKEN) fields = 'ts_code,symbol,name,area,industry,fullname,enname,market,exchange,curr_type,list_status, list_date,delist_date,is_hs' rs_L = pro.stock_basic(exchange=, list_status='L', fields=fields) rs_D = pro.stock_basic(exchange=, list_status='D', fields=fields) rs_P = pro.stock_basic(exchange=, list_status='P', fields=fields) result = pd.concat([rs_L, rs_D, rs_P]) result['list_date'] = pd.to_datetime(result['list_date']) result['delist_date'] = pd.to_datetime(result['delist_date']) result['bs_code'] = result['ts_code'].apply(convert_to_bscode) result.to_sql('odl_ts_stock_basic', engine, schema=CQ_Config.DB_SCHEMA, if_exists='append', index=False)<|docstring|>获取TS股票列表<|endoftext|>
0f9db79d3a0f10188d27d07c2ed498f3188efc351a2469ac969f7305f4394a54
def run_stargan(input_dir, output_dir, dont_call_stargan_cmd=False, filter_=YOUNG, crop_size='178', stargan_dir=STARGAN_DEFAULT_DIR): 'Applies StarGAN on `frames` and returns modified frames.\n\n Args:\n input_dir - a directory containing frames as input for StarGAN\n output_dir - a directory for StarGAN results\n filter - the selected StarGAN filter (currently not in use\n and hard coded in StarGAN implementation).\n crop_size - the size for (center)-cropping from the input images.\n stargan_dir - the root directory of StarGAN Git repository.\n\n Returns:\n a list of frames after applying StarGAN with filter `filter_`.\n ' cmd = ['python', 'main.py', '--mode', 'test', '--dataset', 'VidTIMIT', '--c_dim', '5', '--image_size', '128', '--test_model', '20_3000', '--model_save_path', 'stargan_celebA/models', '--vidtimit_image_path', input_dir, '--result_path', output_dir, '--vidtimit_crop_size', crop_size] if (not dont_call_stargan_cmd): subprocess.check_call(cmd, cwd='C:\\face_swap\\StarGAN') result = [] for prefix in ('fake',): frames_dir = os.path.join(output_dir, ('%s_frames' % prefix)) files = [f for f in os.listdir(frames_dir) if f.endswith('.png')] files = [os.path.join(frames_dir, f) for f in files] assert len(files), ("StarGAN didn't generate any %s frames." % prefix) result.append(files) return result[0]
Applies StarGAN on `frames` and returns modified frames. Args: input_dir - a directory containing frames as input for StarGAN output_dir - a directory for StarGAN results filter - the selected StarGAN filter (currently not in use and hard coded in StarGAN implementation). crop_size - the size for (center)-cropping from the input images. stargan_dir - the root directory of StarGAN Git repository. Returns: a list of frames after applying StarGAN with filter `filter_`.
stargan.py
run_stargan
ofirc/video-domain-transfer
4
python
def run_stargan(input_dir, output_dir, dont_call_stargan_cmd=False, filter_=YOUNG, crop_size='178', stargan_dir=STARGAN_DEFAULT_DIR): 'Applies StarGAN on `frames` and returns modified frames.\n\n Args:\n input_dir - a directory containing frames as input for StarGAN\n output_dir - a directory for StarGAN results\n filter - the selected StarGAN filter (currently not in use\n and hard coded in StarGAN implementation).\n crop_size - the size for (center)-cropping from the input images.\n stargan_dir - the root directory of StarGAN Git repository.\n\n Returns:\n a list of frames after applying StarGAN with filter `filter_`.\n ' cmd = ['python', 'main.py', '--mode', 'test', '--dataset', 'VidTIMIT', '--c_dim', '5', '--image_size', '128', '--test_model', '20_3000', '--model_save_path', 'stargan_celebA/models', '--vidtimit_image_path', input_dir, '--result_path', output_dir, '--vidtimit_crop_size', crop_size] if (not dont_call_stargan_cmd): subprocess.check_call(cmd, cwd='C:\\face_swap\\StarGAN') result = [] for prefix in ('fake',): frames_dir = os.path.join(output_dir, ('%s_frames' % prefix)) files = [f for f in os.listdir(frames_dir) if f.endswith('.png')] files = [os.path.join(frames_dir, f) for f in files] assert len(files), ("StarGAN didn't generate any %s frames." % prefix) result.append(files) return result[0]
def run_stargan(input_dir, output_dir, dont_call_stargan_cmd=False, filter_=YOUNG, crop_size='178', stargan_dir=STARGAN_DEFAULT_DIR): 'Applies StarGAN on `frames` and returns modified frames.\n\n Args:\n input_dir - a directory containing frames as input for StarGAN\n output_dir - a directory for StarGAN results\n filter - the selected StarGAN filter (currently not in use\n and hard coded in StarGAN implementation).\n crop_size - the size for (center)-cropping from the input images.\n stargan_dir - the root directory of StarGAN Git repository.\n\n Returns:\n a list of frames after applying StarGAN with filter `filter_`.\n ' cmd = ['python', 'main.py', '--mode', 'test', '--dataset', 'VidTIMIT', '--c_dim', '5', '--image_size', '128', '--test_model', '20_3000', '--model_save_path', 'stargan_celebA/models', '--vidtimit_image_path', input_dir, '--result_path', output_dir, '--vidtimit_crop_size', crop_size] if (not dont_call_stargan_cmd): subprocess.check_call(cmd, cwd='C:\\face_swap\\StarGAN') result = [] for prefix in ('fake',): frames_dir = os.path.join(output_dir, ('%s_frames' % prefix)) files = [f for f in os.listdir(frames_dir) if f.endswith('.png')] files = [os.path.join(frames_dir, f) for f in files] assert len(files), ("StarGAN didn't generate any %s frames." % prefix) result.append(files) return result[0]<|docstring|>Applies StarGAN on `frames` and returns modified frames. Args: input_dir - a directory containing frames as input for StarGAN output_dir - a directory for StarGAN results filter - the selected StarGAN filter (currently not in use and hard coded in StarGAN implementation). crop_size - the size for (center)-cropping from the input images. stargan_dir - the root directory of StarGAN Git repository. Returns: a list of frames after applying StarGAN with filter `filter_`.<|endoftext|>
173144110417e16c990aa5e6bf860adeda4ace80efad1f9a3dfcb7d386eb3248
def get_size(self): 'Returns (height, width) tuple of slice size.' return self.get_slice('R').shape
Returns (height, width) tuple of slice size.
ginga/RGBMap.py
get_size
sosey/ginga
0
python
def get_size(self): return self.get_slice('R').shape
def get_size(self): return self.get_slice('R').shape<|docstring|>Returns (height, width) tuple of slice size.<|endoftext|>
ebd4129a5ee5b986b2108b2fca5f6685e915523ca4a18a124748f934f3b546dd
def set_cmap(self, cmap, callback=True): '\n Set the color map used by this RGBMapper.\n\n `cmap` specifies a ColorMap object. If `callback` is True, then\n any callbacks associated with this change will be invoked.\n ' self.cmap = cmap self.calc_cmap() self.recalc(callback=callback)
Set the color map used by this RGBMapper. `cmap` specifies a ColorMap object. If `callback` is True, then any callbacks associated with this change will be invoked.
ginga/RGBMap.py
set_cmap
sosey/ginga
0
python
def set_cmap(self, cmap, callback=True): '\n Set the color map used by this RGBMapper.\n\n `cmap` specifies a ColorMap object. If `callback` is True, then\n any callbacks associated with this change will be invoked.\n ' self.cmap = cmap self.calc_cmap() self.recalc(callback=callback)
def set_cmap(self, cmap, callback=True): '\n Set the color map used by this RGBMapper.\n\n `cmap` specifies a ColorMap object. If `callback` is True, then\n any callbacks associated with this change will be invoked.\n ' self.cmap = cmap self.calc_cmap() self.recalc(callback=callback)<|docstring|>Set the color map used by this RGBMapper. `cmap` specifies a ColorMap object. If `callback` is True, then any callbacks associated with this change will be invoked.<|endoftext|>
2f3f4f7df9ed9eb11995945f1c88a8ae1d1ec77fb6f7ada2390c9690d540e828
def get_cmap(self): '\n Return the color map used by this RGBMapper.\n ' return self.cmap
Return the color map used by this RGBMapper.
ginga/RGBMap.py
get_cmap
sosey/ginga
0
python
def get_cmap(self): '\n \n ' return self.cmap
def get_cmap(self): '\n \n ' return self.cmap<|docstring|>Return the color map used by this RGBMapper.<|endoftext|>
3dcb73205a2531c49ca2ce0d10e14ac7702c7bd6bffaa102b7ab56f20a2db20a
def get_rgb(self, index): '\n Return a tuple of (R, G, B) values in the 0-255 range associated\n mapped by the value of `index`.\n ' return tuple(self.arr[index])
Return a tuple of (R, G, B) values in the 0-255 range associated mapped by the value of `index`.
ginga/RGBMap.py
get_rgb
sosey/ginga
0
python
def get_rgb(self, index): '\n Return a tuple of (R, G, B) values in the 0-255 range associated\n mapped by the value of `index`.\n ' return tuple(self.arr[index])
def get_rgb(self, index): '\n Return a tuple of (R, G, B) values in the 0-255 range associated\n mapped by the value of `index`.\n ' return tuple(self.arr[index])<|docstring|>Return a tuple of (R, G, B) values in the 0-255 range associated mapped by the value of `index`.<|endoftext|>
dbb8b7315d6e1d3384f6398eca2766ac9b1ce42ea55a113e7333f7d9dbdbe8cc
def get_rgbval(self, index): '\n Return a tuple of (R, G, B) values in the 0-255 range associated\n mapped by the value of `index`.\n ' assert ((index >= 0) and (index < 256)), RGBMapError('Index must be in range 0-255 !') index = self.sarr[index].clip(0, 255) return (self.arr[0][index], self.arr[1][index], self.arr[2][index])
Return a tuple of (R, G, B) values in the 0-255 range associated mapped by the value of `index`.
ginga/RGBMap.py
get_rgbval
sosey/ginga
0
python
def get_rgbval(self, index): '\n Return a tuple of (R, G, B) values in the 0-255 range associated\n mapped by the value of `index`.\n ' assert ((index >= 0) and (index < 256)), RGBMapError('Index must be in range 0-255 !') index = self.sarr[index].clip(0, 255) return (self.arr[0][index], self.arr[1][index], self.arr[2][index])
def get_rgbval(self, index): '\n Return a tuple of (R, G, B) values in the 0-255 range associated\n mapped by the value of `index`.\n ' assert ((index >= 0) and (index < 256)), RGBMapError('Index must be in range 0-255 !') index = self.sarr[index].clip(0, 255) return (self.arr[0][index], self.arr[1][index], self.arr[2][index])<|docstring|>Return a tuple of (R, G, B) values in the 0-255 range associated mapped by the value of `index`.<|endoftext|>
8bf3e35e0f80b575f3b6365ccb31078b1824c0a7f40d7a2bf73cd3609c768410
def set_imap(self, imap, callback=True): '\n Set the intensity map used by this RGBMapper.\n\n `imap` specifies an IntensityMap object. If `callback` is True, then\n any callbacks associated with this change will be invoked.\n ' self.imap = imap self.calc_imap() self.recalc(callback=callback)
Set the intensity map used by this RGBMapper. `imap` specifies an IntensityMap object. If `callback` is True, then any callbacks associated with this change will be invoked.
ginga/RGBMap.py
set_imap
sosey/ginga
0
python
def set_imap(self, imap, callback=True): '\n Set the intensity map used by this RGBMapper.\n\n `imap` specifies an IntensityMap object. If `callback` is True, then\n any callbacks associated with this change will be invoked.\n ' self.imap = imap self.calc_imap() self.recalc(callback=callback)
def set_imap(self, imap, callback=True): '\n Set the intensity map used by this RGBMapper.\n\n `imap` specifies an IntensityMap object. If `callback` is True, then\n any callbacks associated with this change will be invoked.\n ' self.imap = imap self.calc_imap() self.recalc(callback=callback)<|docstring|>Set the intensity map used by this RGBMapper. `imap` specifies an IntensityMap object. If `callback` is True, then any callbacks associated with this change will be invoked.<|endoftext|>
a34869bff80f3a9d579cdcfae6495d1ad082a370898cdc2b95584aa3516a5996
def get_imap(self): '\n Return the intensity map used by this RGBMapper.\n ' return self.imap
Return the intensity map used by this RGBMapper.
ginga/RGBMap.py
get_imap
sosey/ginga
0
python
def get_imap(self): '\n \n ' return self.imap
def get_imap(self): '\n \n ' return self.imap<|docstring|>Return the intensity map used by this RGBMapper.<|endoftext|>
c44bd5b5d858e593ade2b64f1f1ffd3111167f7de3747e926bc1ca32c0c962c0
def get_dist(self): '\n Return the color distribution used by this RGBMapper.\n ' return self.dist
Return the color distribution used by this RGBMapper.
ginga/RGBMap.py
get_dist
sosey/ginga
0
python
def get_dist(self): '\n \n ' return self.dist
def get_dist(self): '\n \n ' return self.dist<|docstring|>Return the color distribution used by this RGBMapper.<|endoftext|>
2c12eb68917a4b2477c89735a1af2a72466a802d42dca7d09f66c54da37f59d0
def scaleNshift(self, scale_pct, shift_pct, callback=True): 'Stretch and/or shrink the color map via altering the shift map.\n ' self.reset_sarr(callback=False) scale = max(scale_pct, 0.05) work = self._stretch(self.sarr, scale) n = len(work) if (n < 256): m = (((256 - n) // 2) + 1) barr = numpy.array(([0] * m)) tarr = numpy.array(([255] * m)) work = numpy.concatenate([barr, work, tarr]) work = work[:256] n = (len(work) // 2) work = work[(n - 128):(n + 128)].astype('uint') assert (len(work) == 256), RGBMapError('scaled shift map is != 256') work = self._shift(work, shift_pct) assert (len(work) == 256), RGBMapError('shifted shift map is != 256') self.sarr = work if callback: self.make_callback('changed')
Stretch and/or shrink the color map via altering the shift map.
ginga/RGBMap.py
scaleNshift
sosey/ginga
0
python
def scaleNshift(self, scale_pct, shift_pct, callback=True): '\n ' self.reset_sarr(callback=False) scale = max(scale_pct, 0.05) work = self._stretch(self.sarr, scale) n = len(work) if (n < 256): m = (((256 - n) // 2) + 1) barr = numpy.array(([0] * m)) tarr = numpy.array(([255] * m)) work = numpy.concatenate([barr, work, tarr]) work = work[:256] n = (len(work) // 2) work = work[(n - 128):(n + 128)].astype('uint') assert (len(work) == 256), RGBMapError('scaled shift map is != 256') work = self._shift(work, shift_pct) assert (len(work) == 256), RGBMapError('shifted shift map is != 256') self.sarr = work if callback: self.make_callback('changed')
def scaleNshift(self, scale_pct, shift_pct, callback=True): '\n ' self.reset_sarr(callback=False) scale = max(scale_pct, 0.05) work = self._stretch(self.sarr, scale) n = len(work) if (n < 256): m = (((256 - n) // 2) + 1) barr = numpy.array(([0] * m)) tarr = numpy.array(([255] * m)) work = numpy.concatenate([barr, work, tarr]) work = work[:256] n = (len(work) // 2) work = work[(n - 128):(n + 128)].astype('uint') assert (len(work) == 256), RGBMapError('scaled shift map is != 256') work = self._shift(work, shift_pct) assert (len(work) == 256), RGBMapError('shifted shift map is != 256') self.sarr = work if callback: self.make_callback('changed')<|docstring|>Stretch and/or shrink the color map via altering the shift map.<|endoftext|>
fafc8eb5d34b470b7cbe0e2de2050f7cf93bc44d8421890512db6ebdb0a109e2
def _rfr_print_(self, opts='v'): 'Easy print of RooFitResult\n >>> result = ...\n >>> print result \n ' if (not valid_pointer(self)): return 'Invalid RooFitResult' return self.print_multiline(content=1, verbose=True)
Easy print of RooFitResult >>> result = ... >>> print result
ostap/fitting/roofitresult.py
_rfr_print_
Pro100Tema/ostap
0
python
def _rfr_print_(self, opts='v'): 'Easy print of RooFitResult\n >>> result = ...\n >>> print result \n ' if (not valid_pointer(self)): return 'Invalid RooFitResult' return self.print_multiline(content=1, verbose=True)
def _rfr_print_(self, opts='v'): 'Easy print of RooFitResult\n >>> result = ...\n >>> print result \n ' if (not valid_pointer(self)): return 'Invalid RooFitResult' return self.print_multiline(content=1, verbose=True)<|docstring|>Easy print of RooFitResult >>> result = ... >>> print result<|endoftext|>
1cf4c4f2724024b8e3e3ad9a985dd4d051e7146997586e61833ae8081c3d1b71
def _rfr_params_(self, float_only=True): "GetParameters from RooFitResult:\n >>> result = ...\n >>> params = results\n >>> p0 = params()['A'][0] ## get the value\n >>> p0s = params()['A'][1] ## get the parameter itself \n " pars = self.floatParsFinal() pars_ = {} for p in pars: pars_[p.GetName()] = (p.as_VE(), p) if (not float_only): fixed = self.constPars() for p in fixed: pars_[p.GetName()] = (p.as_VE(), p) return pars_
GetParameters from RooFitResult: >>> result = ... >>> params = results >>> p0 = params()['A'][0] ## get the value >>> p0s = params()['A'][1] ## get the parameter itself
ostap/fitting/roofitresult.py
_rfr_params_
Pro100Tema/ostap
0
python
def _rfr_params_(self, float_only=True): "GetParameters from RooFitResult:\n >>> result = ...\n >>> params = results\n >>> p0 = params()['A'][0] ## get the value\n >>> p0s = params()['A'][1] ## get the parameter itself \n " pars = self.floatParsFinal() pars_ = {} for p in pars: pars_[p.GetName()] = (p.as_VE(), p) if (not float_only): fixed = self.constPars() for p in fixed: pars_[p.GetName()] = (p.as_VE(), p) return pars_
def _rfr_params_(self, float_only=True): "GetParameters from RooFitResult:\n >>> result = ...\n >>> params = results\n >>> p0 = params()['A'][0] ## get the value\n >>> p0s = params()['A'][1] ## get the parameter itself \n " pars = self.floatParsFinal() pars_ = {} for p in pars: pars_[p.GetName()] = (p.as_VE(), p) if (not float_only): fixed = self.constPars() for p in fixed: pars_[p.GetName()] = (p.as_VE(), p) return pars_<|docstring|>GetParameters from RooFitResult: >>> result = ... >>> params = results >>> p0 = params()['A'][0] ## get the value >>> p0s = params()['A'][1] ## get the parameter itself<|endoftext|>
4634bf1c2bbb88a7067dd617c922bcd6ca61feae6d970f71bcc86372f3ed412c
def _rfr_param_(self, pname, float_only=False): "Get Parameter from RooFitResult by name \n >>> result = ...\n >>> signal = results.param('Signal')\n >>> print signal\n " if (not isinstance(pname, str)): if hasattr(pname, 'GetName'): pname = pname.GetName() elif hasattr(pname, 'getName'): pname = pname.getName() elif hasattr(pname, 'name'): pname = pname.name() p = self.parameters(float_only)[pname] return p
Get Parameter from RooFitResult by name >>> result = ... >>> signal = results.param('Signal') >>> print signal
ostap/fitting/roofitresult.py
_rfr_param_
Pro100Tema/ostap
0
python
def _rfr_param_(self, pname, float_only=False): "Get Parameter from RooFitResult by name \n >>> result = ...\n >>> signal = results.param('Signal')\n >>> print signal\n " if (not isinstance(pname, str)): if hasattr(pname, 'GetName'): pname = pname.GetName() elif hasattr(pname, 'getName'): pname = pname.getName() elif hasattr(pname, 'name'): pname = pname.name() p = self.parameters(float_only)[pname] return p
def _rfr_param_(self, pname, float_only=False): "Get Parameter from RooFitResult by name \n >>> result = ...\n >>> signal = results.param('Signal')\n >>> print signal\n " if (not isinstance(pname, str)): if hasattr(pname, 'GetName'): pname = pname.GetName() elif hasattr(pname, 'getName'): pname = pname.getName() elif hasattr(pname, 'name'): pname = pname.name() p = self.parameters(float_only)[pname] return p<|docstring|>Get Parameter from RooFitResult by name >>> result = ... >>> signal = results.param('Signal') >>> print signal<|endoftext|>
d42dd01c20f6099286c66d67057fdf2d8aeec2a245616e4862a844ecd57265a1
def _rfr_iter_(self): 'Iterator over fit results :\n >>> fit_result = ...\n >>> for i in fit_results : print i \n ' pars = self.floatParsFinal() for p in pars: (yield p) fixed = self.constPars() for f in fixed: (yield f)
Iterator over fit results : >>> fit_result = ... >>> for i in fit_results : print i
ostap/fitting/roofitresult.py
_rfr_iter_
Pro100Tema/ostap
0
python
def _rfr_iter_(self): 'Iterator over fit results :\n >>> fit_result = ...\n >>> for i in fit_results : print i \n ' pars = self.floatParsFinal() for p in pars: (yield p) fixed = self.constPars() for f in fixed: (yield f)
def _rfr_iter_(self): 'Iterator over fit results :\n >>> fit_result = ...\n >>> for i in fit_results : print i \n ' pars = self.floatParsFinal() for p in pars: (yield p) fixed = self.constPars() for f in fixed: (yield f)<|docstring|>Iterator over fit results : >>> fit_result = ... >>> for i in fit_results : print i<|endoftext|>
6f0cf4742d585eb9390ebcdd3ecdadea8344a5e5d1cfee2c99ac23b7ea2e1d2b
def _rfr_iteritems_(self, float_only=False): 'Iterator over fit items:\n >>> fit_result = ...\n >>> for name,var in fit_results.iteritems() :\n ... print name,var.as_VE() \n ' pars = self.floatParsFinal() for p in pars: (yield (p.GetName(), p)) if (not float_only): fixed = self.constPars() for f in fixed: (yield (f.GetName(), f))
Iterator over fit items: >>> fit_result = ... >>> for name,var in fit_results.iteritems() : ... print name,var.as_VE()
ostap/fitting/roofitresult.py
_rfr_iteritems_
Pro100Tema/ostap
0
python
def _rfr_iteritems_(self, float_only=False): 'Iterator over fit items:\n >>> fit_result = ...\n >>> for name,var in fit_results.iteritems() :\n ... print name,var.as_VE() \n ' pars = self.floatParsFinal() for p in pars: (yield (p.GetName(), p)) if (not float_only): fixed = self.constPars() for f in fixed: (yield (f.GetName(), f))
def _rfr_iteritems_(self, float_only=False): 'Iterator over fit items:\n >>> fit_result = ...\n >>> for name,var in fit_results.iteritems() :\n ... print name,var.as_VE() \n ' pars = self.floatParsFinal() for p in pars: (yield (p.GetName(), p)) if (not float_only): fixed = self.constPars() for f in fixed: (yield (f.GetName(), f))<|docstring|>Iterator over fit items: >>> fit_result = ... >>> for name,var in fit_results.iteritems() : ... print name,var.as_VE()<|endoftext|>
b77330507d29c793c971fd2769d2611542cf2f90bb4ef926785924b48d12d154
def _rfr_corr_(self, var1, var2): "Get correlation coefficient for two parameters \n >>> result = ...\n >>> corr = results.corr('Signal', 'Background')\n >>> print corr\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] if (var1 in self.constPars()): return 0.0 if (var2 in self.constPars()): return 0.0 return self.correlation(var1, var2)
Get correlation coefficient for two parameters >>> result = ... >>> corr = results.corr('Signal', 'Background') >>> print corr
ostap/fitting/roofitresult.py
_rfr_corr_
Pro100Tema/ostap
0
python
def _rfr_corr_(self, var1, var2): "Get correlation coefficient for two parameters \n >>> result = ...\n >>> corr = results.corr('Signal', 'Background')\n >>> print corr\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] if (var1 in self.constPars()): return 0.0 if (var2 in self.constPars()): return 0.0 return self.correlation(var1, var2)
def _rfr_corr_(self, var1, var2): "Get correlation coefficient for two parameters \n >>> result = ...\n >>> corr = results.corr('Signal', 'Background')\n >>> print corr\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] if (var1 in self.constPars()): return 0.0 if (var2 in self.constPars()): return 0.0 return self.correlation(var1, var2)<|docstring|>Get correlation coefficient for two parameters >>> result = ... >>> corr = results.corr('Signal', 'Background') >>> print corr<|endoftext|>
9ba21fa1aece99f84faa06554869b0e82ed513f65aa873df3b6312330a6b82fa
def _rfr_cov_matrix_(self, var1, var2, *vars): "Get covariance (sub) matrix \n >>> result = ...\n >>> cov = results.cov_matrix('Signal', 'Background')\n >>> print corr\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] args = ROOT.RooArgList(var1, var2) for v in vars: if isinstance(v, str): v = self.param(v)[1] args.add(v) cm = self.reducedCovarianceMatrix(args) N = cm.GetNrows() import ostap.math.linalg m = Ostap.Math.SymMatrix(N)() for i in range(N): for j in range(i, N): m[(i, j)] = cm(i, j) return m
Get covariance (sub) matrix >>> result = ... >>> cov = results.cov_matrix('Signal', 'Background') >>> print corr
ostap/fitting/roofitresult.py
_rfr_cov_matrix_
Pro100Tema/ostap
0
python
def _rfr_cov_matrix_(self, var1, var2, *vars): "Get covariance (sub) matrix \n >>> result = ...\n >>> cov = results.cov_matrix('Signal', 'Background')\n >>> print corr\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] args = ROOT.RooArgList(var1, var2) for v in vars: if isinstance(v, str): v = self.param(v)[1] args.add(v) cm = self.reducedCovarianceMatrix(args) N = cm.GetNrows() import ostap.math.linalg m = Ostap.Math.SymMatrix(N)() for i in range(N): for j in range(i, N): m[(i, j)] = cm(i, j) return m
def _rfr_cov_matrix_(self, var1, var2, *vars): "Get covariance (sub) matrix \n >>> result = ...\n >>> cov = results.cov_matrix('Signal', 'Background')\n >>> print corr\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] args = ROOT.RooArgList(var1, var2) for v in vars: if isinstance(v, str): v = self.param(v)[1] args.add(v) cm = self.reducedCovarianceMatrix(args) N = cm.GetNrows() import ostap.math.linalg m = Ostap.Math.SymMatrix(N)() for i in range(N): for j in range(i, N): m[(i, j)] = cm(i, j) return m<|docstring|>Get covariance (sub) matrix >>> result = ... >>> cov = results.cov_matrix('Signal', 'Background') >>> print corr<|endoftext|>
5c3e8ecfdcb7bbfbf3c2de72131987ab4eb91e6bcf7e0f89dacbe247f3d77e40
def _rfr_cov_(self, var1, var2): "Get covariance (sub) matrix \n >>> result = ...\n >>> cov = results.cov('Signal', 'Background')\n >>> print corr\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] if (var1 in self.constPars()): return 0.0 if (var2 in self.constPars()): return 0.0 r = self.correlation(var1, var2) v1 = var1.error v2 = var2.error return ((v1 * v2) * r)
Get covariance (sub) matrix >>> result = ... >>> cov = results.cov('Signal', 'Background') >>> print corr
ostap/fitting/roofitresult.py
_rfr_cov_
Pro100Tema/ostap
0
python
def _rfr_cov_(self, var1, var2): "Get covariance (sub) matrix \n >>> result = ...\n >>> cov = results.cov('Signal', 'Background')\n >>> print corr\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] if (var1 in self.constPars()): return 0.0 if (var2 in self.constPars()): return 0.0 r = self.correlation(var1, var2) v1 = var1.error v2 = var2.error return ((v1 * v2) * r)
def _rfr_cov_(self, var1, var2): "Get covariance (sub) matrix \n >>> result = ...\n >>> cov = results.cov('Signal', 'Background')\n >>> print corr\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] if (var1 in self.constPars()): return 0.0 if (var2 in self.constPars()): return 0.0 r = self.correlation(var1, var2) v1 = var1.error v2 = var2.error return ((v1 * v2) * r)<|docstring|>Get covariance (sub) matrix >>> result = ... >>> cov = results.cov('Signal', 'Background') >>> print corr<|endoftext|>
bbcc7c818527039a0dc3074070c55c67781e79267334c42345875dfa800f9e6e
def _rfr_getattr_(self, att): 'Get fit-parameter as attribute\n >>> r = ....\n >>> print r.sigma \n ' pars = self.floatParsFinal() for p in pars: if (att == p.GetName()): return p pars = self.constPars() for p in pars: if (att == p.GetName()): return p raise AttributeError(('RooFitResult: invalid attribute %s ' % att))
Get fit-parameter as attribute >>> r = .... >>> print r.sigma
ostap/fitting/roofitresult.py
_rfr_getattr_
Pro100Tema/ostap
0
python
def _rfr_getattr_(self, att): 'Get fit-parameter as attribute\n >>> r = ....\n >>> print r.sigma \n ' pars = self.floatParsFinal() for p in pars: if (att == p.GetName()): return p pars = self.constPars() for p in pars: if (att == p.GetName()): return p raise AttributeError(('RooFitResult: invalid attribute %s ' % att))
def _rfr_getattr_(self, att): 'Get fit-parameter as attribute\n >>> r = ....\n >>> print r.sigma \n ' pars = self.floatParsFinal() for p in pars: if (att == p.GetName()): return p pars = self.constPars() for p in pars: if (att == p.GetName()): return p raise AttributeError(('RooFitResult: invalid attribute %s ' % att))<|docstring|>Get fit-parameter as attribute >>> r = .... >>> print r.sigma<|endoftext|>
1574b7165c89ceb5df2d9623ab1e71d23a0821f1767bf4b01aaf943bfe21c93c
def _rfr_sum_(self, var1, var2, *vars): "Get correct estimate of sum of two or more variables,\n taking into account correlations\n >>> r = ...\n >>> print r.sum( 'S' , 'B' ) ## S+B\n " allvars = ((var1, var2) + vars) n = len(allvars) s = 0 c2 = 0 for i in range(n): vi = allvars[i] if isinstance(vi, str): vi = self.param(vi)[1] v = vi.value v = VE(v) s += v.value() vc = v.cov2() if ((0 >= vc) or (vi in self.constPars())): continue c2 += vc for j in range((i + 1), n): vj = allvars[j] if isinstance(vj, str): vj = self.param(vj)[1] if (vj in self.constPars()): continue c2 += (2 * self.correlation(vi, vj)) return VE(s, c2)
Get correct estimate of sum of two or more variables, taking into account correlations >>> r = ... >>> print r.sum( 'S' , 'B' ) ## S+B
ostap/fitting/roofitresult.py
_rfr_sum_
Pro100Tema/ostap
0
python
def _rfr_sum_(self, var1, var2, *vars): "Get correct estimate of sum of two or more variables,\n taking into account correlations\n >>> r = ...\n >>> print r.sum( 'S' , 'B' ) ## S+B\n " allvars = ((var1, var2) + vars) n = len(allvars) s = 0 c2 = 0 for i in range(n): vi = allvars[i] if isinstance(vi, str): vi = self.param(vi)[1] v = vi.value v = VE(v) s += v.value() vc = v.cov2() if ((0 >= vc) or (vi in self.constPars())): continue c2 += vc for j in range((i + 1), n): vj = allvars[j] if isinstance(vj, str): vj = self.param(vj)[1] if (vj in self.constPars()): continue c2 += (2 * self.correlation(vi, vj)) return VE(s, c2)
def _rfr_sum_(self, var1, var2, *vars): "Get correct estimate of sum of two or more variables,\n taking into account correlations\n >>> r = ...\n >>> print r.sum( 'S' , 'B' ) ## S+B\n " allvars = ((var1, var2) + vars) n = len(allvars) s = 0 c2 = 0 for i in range(n): vi = allvars[i] if isinstance(vi, str): vi = self.param(vi)[1] v = vi.value v = VE(v) s += v.value() vc = v.cov2() if ((0 >= vc) or (vi in self.constPars())): continue c2 += vc for j in range((i + 1), n): vj = allvars[j] if isinstance(vj, str): vj = self.param(vj)[1] if (vj in self.constPars()): continue c2 += (2 * self.correlation(vi, vj)) return VE(s, c2)<|docstring|>Get correct estimate of sum of two or more variables, taking into account correlations >>> r = ... >>> print r.sum( 'S' , 'B' ) ## S+B<|endoftext|>
85746bb9ecb26f3fa27d926af6dcc117297d6e5a7e261e122047fe5e8e5dbfce
def _rfr_multiply_(self, var1, var2, *vars): "Get correct estimate of product of two variables,\n taking into account correlations\n >>> r = ...\n >>> print r.multiply( 'S' , 'B' ) ## S*B\n " allvars = ((var1, var2) + vars) n = len(allvars) m = 1.0 c2 = 0 for i in range(n): vi = allvars[i] if isinstance(vi, str): vi = self.param(vi)[1] v = vi.value v = VE(v) vv = v.value() if (iszero(vv) or iszero(m)): return VE(0.0, 0.0) m *= vv vc = v.cov2() if ((0 >= vc) or (vi in self.constPars())): continue c2 += (vc / (vv * vv)) for j in range((i + 1), n): vj = allvars[j] if isinstance(vj, str): vj = self.param(vj)[1] if (vj in self.constPars()): continue w = vj.value w = VE(w) ww = w.value() c2 += ((2 * self.correlation(vi, vj)) / (vv * ww)) return VE(m, ((c2 * m) * m))
Get correct estimate of product of two variables, taking into account correlations >>> r = ... >>> print r.multiply( 'S' , 'B' ) ## S*B
ostap/fitting/roofitresult.py
_rfr_multiply_
Pro100Tema/ostap
0
python
def _rfr_multiply_(self, var1, var2, *vars): "Get correct estimate of product of two variables,\n taking into account correlations\n >>> r = ...\n >>> print r.multiply( 'S' , 'B' ) ## S*B\n " allvars = ((var1, var2) + vars) n = len(allvars) m = 1.0 c2 = 0 for i in range(n): vi = allvars[i] if isinstance(vi, str): vi = self.param(vi)[1] v = vi.value v = VE(v) vv = v.value() if (iszero(vv) or iszero(m)): return VE(0.0, 0.0) m *= vv vc = v.cov2() if ((0 >= vc) or (vi in self.constPars())): continue c2 += (vc / (vv * vv)) for j in range((i + 1), n): vj = allvars[j] if isinstance(vj, str): vj = self.param(vj)[1] if (vj in self.constPars()): continue w = vj.value w = VE(w) ww = w.value() c2 += ((2 * self.correlation(vi, vj)) / (vv * ww)) return VE(m, ((c2 * m) * m))
def _rfr_multiply_(self, var1, var2, *vars): "Get correct estimate of product of two variables,\n taking into account correlations\n >>> r = ...\n >>> print r.multiply( 'S' , 'B' ) ## S*B\n " allvars = ((var1, var2) + vars) n = len(allvars) m = 1.0 c2 = 0 for i in range(n): vi = allvars[i] if isinstance(vi, str): vi = self.param(vi)[1] v = vi.value v = VE(v) vv = v.value() if (iszero(vv) or iszero(m)): return VE(0.0, 0.0) m *= vv vc = v.cov2() if ((0 >= vc) or (vi in self.constPars())): continue c2 += (vc / (vv * vv)) for j in range((i + 1), n): vj = allvars[j] if isinstance(vj, str): vj = self.param(vj)[1] if (vj in self.constPars()): continue w = vj.value w = VE(w) ww = w.value() c2 += ((2 * self.correlation(vi, vj)) / (vv * ww)) return VE(m, ((c2 * m) * m))<|docstring|>Get correct estimate of product of two variables, taking into account correlations >>> r = ... >>> print r.multiply( 'S' , 'B' ) ## S*B<|endoftext|>
f355a82f7a3f118d9bdbf5c3ef9943a0265bc0dbb732844d1677b1276037a5ea
def _rfr_divide_(self, var1, var2): "Get correct estimate of division of two variables,\n taking into account correlations\n >>> r = ...\n >>> print r.divide( 'S' , 'B' ) ## S/B\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] _v1 = var1.value _v2 = var2.value _cor = self.corr(var1, var2) return Ostap.Math.divide(_v1, _v2, _cor)
Get correct estimate of division of two variables, taking into account correlations >>> r = ... >>> print r.divide( 'S' , 'B' ) ## S/B
ostap/fitting/roofitresult.py
_rfr_divide_
Pro100Tema/ostap
0
python
def _rfr_divide_(self, var1, var2): "Get correct estimate of division of two variables,\n taking into account correlations\n >>> r = ...\n >>> print r.divide( 'S' , 'B' ) ## S/B\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] _v1 = var1.value _v2 = var2.value _cor = self.corr(var1, var2) return Ostap.Math.divide(_v1, _v2, _cor)
def _rfr_divide_(self, var1, var2): "Get correct estimate of division of two variables,\n taking into account correlations\n >>> r = ...\n >>> print r.divide( 'S' , 'B' ) ## S/B\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] _v1 = var1.value _v2 = var2.value _cor = self.corr(var1, var2) return Ostap.Math.divide(_v1, _v2, _cor)<|docstring|>Get correct estimate of division of two variables, taking into account correlations >>> r = ... >>> print r.divide( 'S' , 'B' ) ## S/B<|endoftext|>
79700b92bb457c25b764b161cecdd37b752524e60869a1fb29416a60821c1139
def _rfr_subtract_(self, var1, var2): "Get correct estimate of subtraction of two variables,\n taking into account correlations\n >>> r = ...\n >>> print r.subtract( 'S' , 'B' ) ## S-B\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] _v1 = var1.value _v2 = var2.value _cor = self.corr(var1, var2) return Ostap.Math.subtract(_v1, _v2, _cor)
Get correct estimate of subtraction of two variables, taking into account correlations >>> r = ... >>> print r.subtract( 'S' , 'B' ) ## S-B
ostap/fitting/roofitresult.py
_rfr_subtract_
Pro100Tema/ostap
0
python
def _rfr_subtract_(self, var1, var2): "Get correct estimate of subtraction of two variables,\n taking into account correlations\n >>> r = ...\n >>> print r.subtract( 'S' , 'B' ) ## S-B\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] _v1 = var1.value _v2 = var2.value _cor = self.corr(var1, var2) return Ostap.Math.subtract(_v1, _v2, _cor)
def _rfr_subtract_(self, var1, var2): "Get correct estimate of subtraction of two variables,\n taking into account correlations\n >>> r = ...\n >>> print r.subtract( 'S' , 'B' ) ## S-B\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] _v1 = var1.value _v2 = var2.value _cor = self.corr(var1, var2) return Ostap.Math.subtract(_v1, _v2, _cor)<|docstring|>Get correct estimate of subtraction of two variables, taking into account correlations >>> r = ... >>> print r.subtract( 'S' , 'B' ) ## S-B<|endoftext|>
2b5e0b11ec0ef33f8d68ca103ea5fa37f352c3d0079a5753acd77884929d46e3
def _rfr_fraction_(self, var1, var2): "Get correct estimate of fraction of two variables,\n taking into account correlations\n >>> r = ...\n >>> print r.fraction( 'S' , 'B' ) ## S/(S+B)\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] _av1 = abs(var1.value.value()) _av2 = abs(var2.value.value()) if (_av1 > _av2): return (1 / (1 + self.ratio(var2, var1))) return (1.0 - self.fraction(var2, var1))
Get correct estimate of fraction of two variables, taking into account correlations >>> r = ... >>> print r.fraction( 'S' , 'B' ) ## S/(S+B)
ostap/fitting/roofitresult.py
_rfr_fraction_
Pro100Tema/ostap
0
python
def _rfr_fraction_(self, var1, var2): "Get correct estimate of fraction of two variables,\n taking into account correlations\n >>> r = ...\n >>> print r.fraction( 'S' , 'B' ) ## S/(S+B)\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] _av1 = abs(var1.value.value()) _av2 = abs(var2.value.value()) if (_av1 > _av2): return (1 / (1 + self.ratio(var2, var1))) return (1.0 - self.fraction(var2, var1))
def _rfr_fraction_(self, var1, var2): "Get correct estimate of fraction of two variables,\n taking into account correlations\n >>> r = ...\n >>> print r.fraction( 'S' , 'B' ) ## S/(S+B)\n " if isinstance(var1, str): var1 = self.param(var1)[1] if isinstance(var2, str): var2 = self.param(var2)[1] _av1 = abs(var1.value.value()) _av2 = abs(var2.value.value()) if (_av1 > _av2): return (1 / (1 + self.ratio(var2, var1))) return (1.0 - self.fraction(var2, var1))<|docstring|>Get correct estimate of fraction of two variables, taking into account correlations >>> r = ... >>> print r.fraction( 'S' , 'B' ) ## S/(S+B)<|endoftext|>
5efa494e4622a47a1b2a8d41ce4dc58ecb8b648a987582213b6c95edfbbedd39
def _rfr_results_(self, *vars): "Get the required results in form of SVectorWithError object\n >>> fit_resuts = ...\n >>> res = fit_results.results( 'A', 'B' , 'C' )\n >>> print res, res.cov2() \n " _n = len(vars) _r = Ostap.Math.SVectorWithError(_n, 'double')() _i = 0 for _i1 in range(0, _n): _v1 = vars[_i1] _vv = self.param(_v1)[0] _r[_i1] = _vv _r.cov2()[(_i1, _i1)] = _vv.cov2() for _i2 in range((_i1 + 1), _n): _v2 = vars[_i2] _c12 = self.cov(_v1, _v2)(0, 1) _r.cov2()[(_i1, _i2)] = _c12 return _r
Get the required results in form of SVectorWithError object >>> fit_resuts = ... >>> res = fit_results.results( 'A', 'B' , 'C' ) >>> print res, res.cov2()
ostap/fitting/roofitresult.py
_rfr_results_
Pro100Tema/ostap
0
python
def _rfr_results_(self, *vars): "Get the required results in form of SVectorWithError object\n >>> fit_resuts = ...\n >>> res = fit_results.results( 'A', 'B' , 'C' )\n >>> print res, res.cov2() \n " _n = len(vars) _r = Ostap.Math.SVectorWithError(_n, 'double')() _i = 0 for _i1 in range(0, _n): _v1 = vars[_i1] _vv = self.param(_v1)[0] _r[_i1] = _vv _r.cov2()[(_i1, _i1)] = _vv.cov2() for _i2 in range((_i1 + 1), _n): _v2 = vars[_i2] _c12 = self.cov(_v1, _v2)(0, 1) _r.cov2()[(_i1, _i2)] = _c12 return _r
def _rfr_results_(self, *vars): "Get the required results in form of SVectorWithError object\n >>> fit_resuts = ...\n >>> res = fit_results.results( 'A', 'B' , 'C' )\n >>> print res, res.cov2() \n " _n = len(vars) _r = Ostap.Math.SVectorWithError(_n, 'double')() _i = 0 for _i1 in range(0, _n): _v1 = vars[_i1] _vv = self.param(_v1)[0] _r[_i1] = _vv _r.cov2()[(_i1, _i1)] = _vv.cov2() for _i2 in range((_i1 + 1), _n): _v2 = vars[_i2] _c12 = self.cov(_v1, _v2)(0, 1) _r.cov2()[(_i1, _i2)] = _c12 return _r<|docstring|>Get the required results in form of SVectorWithError object >>> fit_resuts = ... >>> res = fit_results.results( 'A', 'B' , 'C' ) >>> print res, res.cov2()<|endoftext|>
e186360c82d0ec40dbbc26687a088d7697f12ec02b6ed8effe40492d1fa6e57c
def _rm_migrad_(self): 'Run MIGRAD for RooMinimizer\n - see ROOT.RooMinimizer \n >>> pdf = ...\n >>> minuit = pdf.minuit()\n >>> minuit.migrad() \n ' status = self._old_migrad_() if (0 != status): from ostap.fitting.utils import fit_status logger.error(('MIGRAD status %s' % fit_status(status))) return status
Run MIGRAD for RooMinimizer - see ROOT.RooMinimizer >>> pdf = ... >>> minuit = pdf.minuit() >>> minuit.migrad()
ostap/fitting/roofitresult.py
_rm_migrad_
Pro100Tema/ostap
0
python
def _rm_migrad_(self): 'Run MIGRAD for RooMinimizer\n - see ROOT.RooMinimizer \n >>> pdf = ...\n >>> minuit = pdf.minuit()\n >>> minuit.migrad() \n ' status = self._old_migrad_() if (0 != status): from ostap.fitting.utils import fit_status logger.error(('MIGRAD status %s' % fit_status(status))) return status
def _rm_migrad_(self): 'Run MIGRAD for RooMinimizer\n - see ROOT.RooMinimizer \n >>> pdf = ...\n >>> minuit = pdf.minuit()\n >>> minuit.migrad() \n ' status = self._old_migrad_() if (0 != status): from ostap.fitting.utils import fit_status logger.error(('MIGRAD status %s' % fit_status(status))) return status<|docstring|>Run MIGRAD for RooMinimizer - see ROOT.RooMinimizer >>> pdf = ... >>> minuit = pdf.minuit() >>> minuit.migrad()<|endoftext|>
6e3db2da554ebdf8e407a9587a4d6d444c50a55bb97cc12a2e2821efb4834663
def _rm_minos_(self, *variables): "Run MINOS for set of parameters\n >>> pdf = ...\n >>> minuit = pdf.minuit()\n >>> minuit.migrad() \n >>> minuit.minos ( 'sigma' , 'mean' ) \n " if (not variables): status = self._old_minos_() if (0 != status): from ostap.fitting.utils import fit_status logger.error(('MINOS status %s' % fit_status(status))) return status aset = ROOT.RooArgSet() res = self.save() for v in variables: if (isinstance(v, string_types) or isinstance(v, ROOT.RooAbsReal)): par = res.param(v)[1] if par: aset.add(par) elif (isinstance(v, ROOT.RooAbsCollection) or isinstance(v, list_types)): for a in v: if isinstance(a, ROOT.RooAbsReal): par = res.param(a)[1] if par: aset.add(par) del res if aset: status = self._old_minos_(aset) else: status = self._old_minos_(aset) if (0 != status): from ostap.fitting.utils import fit_status logger.error(('MINOS status %s' % fit_status(status))) return status
Run MINOS for set of parameters >>> pdf = ... >>> minuit = pdf.minuit() >>> minuit.migrad() >>> minuit.minos ( 'sigma' , 'mean' )
ostap/fitting/roofitresult.py
_rm_minos_
Pro100Tema/ostap
0
python
def _rm_minos_(self, *variables): "Run MINOS for set of parameters\n >>> pdf = ...\n >>> minuit = pdf.minuit()\n >>> minuit.migrad() \n >>> minuit.minos ( 'sigma' , 'mean' ) \n " if (not variables): status = self._old_minos_() if (0 != status): from ostap.fitting.utils import fit_status logger.error(('MINOS status %s' % fit_status(status))) return status aset = ROOT.RooArgSet() res = self.save() for v in variables: if (isinstance(v, string_types) or isinstance(v, ROOT.RooAbsReal)): par = res.param(v)[1] if par: aset.add(par) elif (isinstance(v, ROOT.RooAbsCollection) or isinstance(v, list_types)): for a in v: if isinstance(a, ROOT.RooAbsReal): par = res.param(a)[1] if par: aset.add(par) del res if aset: status = self._old_minos_(aset) else: status = self._old_minos_(aset) if (0 != status): from ostap.fitting.utils import fit_status logger.error(('MINOS status %s' % fit_status(status))) return status
def _rm_minos_(self, *variables): "Run MINOS for set of parameters\n >>> pdf = ...\n >>> minuit = pdf.minuit()\n >>> minuit.migrad() \n >>> minuit.minos ( 'sigma' , 'mean' ) \n " if (not variables): status = self._old_minos_() if (0 != status): from ostap.fitting.utils import fit_status logger.error(('MINOS status %s' % fit_status(status))) return status aset = ROOT.RooArgSet() res = self.save() for v in variables: if (isinstance(v, string_types) or isinstance(v, ROOT.RooAbsReal)): par = res.param(v)[1] if par: aset.add(par) elif (isinstance(v, ROOT.RooAbsCollection) or isinstance(v, list_types)): for a in v: if isinstance(a, ROOT.RooAbsReal): par = res.param(a)[1] if par: aset.add(par) del res if aset: status = self._old_minos_(aset) else: status = self._old_minos_(aset) if (0 != status): from ostap.fitting.utils import fit_status logger.error(('MINOS status %s' % fit_status(status))) return status<|docstring|>Run MINOS for set of parameters >>> pdf = ... >>> minuit = pdf.minuit() >>> minuit.migrad() >>> minuit.minos ( 'sigma' , 'mean' )<|endoftext|>
8bd34cd3be2cf128efdfa27a1bbe80a52ff1b10782dd8f62ae6c27cdba716147
@property def _cachedir(self) -> pathlib.Path: 'Root directory for this origin cache.' return (cache.DIR / self.__class__.__qualname__.lower())
Root directory for this origin cache.
openlake/provider/__init__.py
_cachedir
formlio/openlake
0
python
@property def _cachedir(self) -> pathlib.Path: return (cache.DIR / self.__class__.__qualname__.lower())
@property def _cachedir(self) -> pathlib.Path: return (cache.DIR / self.__class__.__qualname__.lower())<|docstring|>Root directory for this origin cache.<|endoftext|>
980eb5e1352843a3f9416cf2a3d53134c8bebfb334fe2e7996e373c19a79cc23
def _load(self, columns: typing.Optional[typing.Iterable[dsl.Feature]], predicate: typing.Optional[dsl.Feature]) -> pandas.DataFrame: 'Content loader with caching capabilities.' return cache.dataframe((repr(tuple((columns or []))) + repr(predicate)), (lambda : self.parse(self.fetch(columns, predicate), columns, predicate)), self._cachedir)
Content loader with caching capabilities.
openlake/provider/__init__.py
_load
formlio/openlake
0
python
def _load(self, columns: typing.Optional[typing.Iterable[dsl.Feature]], predicate: typing.Optional[dsl.Feature]) -> pandas.DataFrame: return cache.dataframe((repr(tuple((columns or []))) + repr(predicate)), (lambda : self.parse(self.fetch(columns, predicate), columns, predicate)), self._cachedir)
def _load(self, columns: typing.Optional[typing.Iterable[dsl.Feature]], predicate: typing.Optional[dsl.Feature]) -> pandas.DataFrame: return cache.dataframe((repr(tuple((columns or []))) + repr(predicate)), (lambda : self.parse(self.fetch(columns, predicate), columns, predicate)), self._cachedir)<|docstring|>Content loader with caching capabilities.<|endoftext|>
cac4b3c8b2cc1274aa922eb3f8993f3c7004c8a926fe91041f08a068c049fafc
@property def name(self) -> str: 'Name to be used for internal (unique) referencing.\n\n Must be a valid identifier -> [_a-zA-Z][_a-zA-Z0-9]*\n ' return repr(self.source)
Name to be used for internal (unique) referencing. Must be a valid identifier -> [_a-zA-Z][_a-zA-Z0-9]*
openlake/provider/__init__.py
name
formlio/openlake
0
python
@property def name(self) -> str: 'Name to be used for internal (unique) referencing.\n\n Must be a valid identifier -> [_a-zA-Z][_a-zA-Z0-9]*\n ' return repr(self.source)
@property def name(self) -> str: 'Name to be used for internal (unique) referencing.\n\n Must be a valid identifier -> [_a-zA-Z][_a-zA-Z0-9]*\n ' return repr(self.source)<|docstring|>Name to be used for internal (unique) referencing. Must be a valid identifier -> [_a-zA-Z][_a-zA-Z0-9]*<|endoftext|>
115c2570ebff45b7ff5d440fe28f93796dc159d2a9b4f714ca804b9d59503914
@property @abc.abstractmethod def source(self) -> dsl.Queryable: 'The source query this origin provides.'
The source query this origin provides.
openlake/provider/__init__.py
source
formlio/openlake
0
python
@property @abc.abstractmethod def source(self) -> dsl.Queryable:
@property @abc.abstractmethod def source(self) -> dsl.Queryable: <|docstring|>The source query this origin provides.<|endoftext|>
828c2c957ac4aacfee72bfc1ea5fbc5d75fc3a5413e57eb63033bfb77060ed2c
@abc.abstractmethod def fetch(self, columns: typing.Optional[typing.Iterable[dsl.Feature]], predicate: typing.Optional[dsl.Feature]) -> Format: 'Fetch the content and return a file object.'
Fetch the content and return a file object.
openlake/provider/__init__.py
fetch
formlio/openlake
0
python
@abc.abstractmethod def fetch(self, columns: typing.Optional[typing.Iterable[dsl.Feature]], predicate: typing.Optional[dsl.Feature]) -> Format:
@abc.abstractmethod def fetch(self, columns: typing.Optional[typing.Iterable[dsl.Feature]], predicate: typing.Optional[dsl.Feature]) -> Format: <|docstring|>Fetch the content and return a file object.<|endoftext|>
aef04cebd8d43ae7befb336e781131c5f83762037c247193f4b059de79d09fe4
@abc.abstractmethod def parse(self, content: Format, columns: typing.Optional[typing.Iterable[dsl.Feature]], predicate: typing.Optional[dsl.Feature]) -> pandas.DataFrame: 'Load the origin dataset.'
Load the origin dataset.
openlake/provider/__init__.py
parse
formlio/openlake
0
python
@abc.abstractmethod def parse(self, content: Format, columns: typing.Optional[typing.Iterable[dsl.Feature]], predicate: typing.Optional[dsl.Feature]) -> pandas.DataFrame:
@abc.abstractmethod def parse(self, content: Format, columns: typing.Optional[typing.Iterable[dsl.Feature]], predicate: typing.Optional[dsl.Feature]) -> pandas.DataFrame: <|docstring|>Load the origin dataset.<|endoftext|>
3a34c9dead9c877999a4b6d3d61a6789f3e2b369f245b5a2dfb37171dc15c5d2
def handle(self, *args, **options): 'Generate a test user for each role.' first_name = factory.Faker('first_name') last_name = factory.Faker('last_name') password = 'test_password' user_count = 0 try: user = User.objects.create_user(username='test__unassigned', email='example@example.com', password=password, first_name=first_name, last_name=last_name) superuser = User.objects.create_superuser(username='test__superuser', email='example@example.com', password=password, first_name=first_name, last_name=last_name) except IntegrityError: pass else: user_count += 2 self.stdout.write(f'Username: {user.username}') self.stdout.write(f'Password: {password}') self.stdout.write() self.stdout.write(f'Username: {superuser.username}') self.stdout.write(f'Password: {password}') for group in Group.objects.all(): username = f"test__{group.name.replace(' ', '_').lower()}" email = (f"test_{group.name.replace(' ', '_').lower()}" + '@example.com') try: with transaction.atomic(): user = User.objects.create_user(username=username, email=email, password=password, first_name=first_name, last_name=last_name) user.groups.add(group) except IntegrityError: pass else: user_count += 1 self.stdout.write(f'Username: {user.username}') self.stdout.write(f'Password: {password}') self.stdout.write() self.stdout.write(f'Created {user_count} users.')
Generate a test user for each role.
tdrs-backend/tdpservice/users/management/commands/generate_test_users.py
handle
amilash/TANF-app
18
python
def handle(self, *args, **options): first_name = factory.Faker('first_name') last_name = factory.Faker('last_name') password = 'test_password' user_count = 0 try: user = User.objects.create_user(username='test__unassigned', email='example@example.com', password=password, first_name=first_name, last_name=last_name) superuser = User.objects.create_superuser(username='test__superuser', email='example@example.com', password=password, first_name=first_name, last_name=last_name) except IntegrityError: pass else: user_count += 2 self.stdout.write(f'Username: {user.username}') self.stdout.write(f'Password: {password}') self.stdout.write() self.stdout.write(f'Username: {superuser.username}') self.stdout.write(f'Password: {password}') for group in Group.objects.all(): username = f"test__{group.name.replace(' ', '_').lower()}" email = (f"test_{group.name.replace(' ', '_').lower()}" + '@example.com') try: with transaction.atomic(): user = User.objects.create_user(username=username, email=email, password=password, first_name=first_name, last_name=last_name) user.groups.add(group) except IntegrityError: pass else: user_count += 1 self.stdout.write(f'Username: {user.username}') self.stdout.write(f'Password: {password}') self.stdout.write() self.stdout.write(f'Created {user_count} users.')
def handle(self, *args, **options): first_name = factory.Faker('first_name') last_name = factory.Faker('last_name') password = 'test_password' user_count = 0 try: user = User.objects.create_user(username='test__unassigned', email='example@example.com', password=password, first_name=first_name, last_name=last_name) superuser = User.objects.create_superuser(username='test__superuser', email='example@example.com', password=password, first_name=first_name, last_name=last_name) except IntegrityError: pass else: user_count += 2 self.stdout.write(f'Username: {user.username}') self.stdout.write(f'Password: {password}') self.stdout.write() self.stdout.write(f'Username: {superuser.username}') self.stdout.write(f'Password: {password}') for group in Group.objects.all(): username = f"test__{group.name.replace(' ', '_').lower()}" email = (f"test_{group.name.replace(' ', '_').lower()}" + '@example.com') try: with transaction.atomic(): user = User.objects.create_user(username=username, email=email, password=password, first_name=first_name, last_name=last_name) user.groups.add(group) except IntegrityError: pass else: user_count += 1 self.stdout.write(f'Username: {user.username}') self.stdout.write(f'Password: {password}') self.stdout.write() self.stdout.write(f'Created {user_count} users.')<|docstring|>Generate a test user for each role.<|endoftext|>
3e04ba1618e7e851dee6cd43e1568b83098d375a8b51afbfab2e964fdc8d814c
def init(baudrate: int=1000000, bits: int=8, mode: int=0, sclk: MicroBitDigitalPin=pin13, mosi: MicroBitDigitalPin=pin15, miso: MicroBitDigitalPin=pin14) -> None: 'Initialize SPI communication with the specified parameters on the\n specified ``pins``. Note that for correct communication, the parameters\n have to be the same on both communicating devices.\n\n The ``baudrate`` defines the speed of communication.\n\n The ``bits`` defines the size of bytes being transmitted. Currently only\n ``bits=8`` is supported. However, this may change in the future.\n\n The ``mode`` determines the combination of clock polarity and phase\n according to the following convention, with polarity as the high order bit\n and phase as the low order bit:\n\n +----------+-----------------+--------------+\n | SPI Mode | Polarity (CPOL) | Phase (CPHA) |\n +==========+=================+==============+\n | 0 | 0 | 0 |\n +----------+-----------------+--------------+\n | 1 | 0 | 1 |\n +----------+-----------------+--------------+\n | 2 | 1 | 0 |\n +----------+-----------------+--------------+\n | 3 | 1 | 1 |\n +----------+-----------------+--------------+\n\n Polarity (aka CPOL) 0 means that the clock is at logic value 0 when idle\n and goes high (logic value 1) when active; polarity 1 means the clock is\n at logic value 1 when idle and goes low (logic value 0) when active. Phase\n (aka CPHA) 0 means that data is sampled on the leading edge of the clock,\n and 1 means on the trailing edge\n (viz. https://en.wikipedia.org/wiki/Signal_edge).\n\n The ``sclk``, ``mosi`` and ``miso`` arguments specify the pins to use for\n each type of signal.\n '
Initialize SPI communication with the specified parameters on the specified ``pins``. Note that for correct communication, the parameters have to be the same on both communicating devices. The ``baudrate`` defines the speed of communication. The ``bits`` defines the size of bytes being transmitted. Currently only ``bits=8`` is supported. However, this may change in the future. The ``mode`` determines the combination of clock polarity and phase according to the following convention, with polarity as the high order bit and phase as the low order bit: +----------+-----------------+--------------+ | SPI Mode | Polarity (CPOL) | Phase (CPHA) | +==========+=================+==============+ | 0 | 0 | 0 | +----------+-----------------+--------------+ | 1 | 0 | 1 | +----------+-----------------+--------------+ | 2 | 1 | 0 | +----------+-----------------+--------------+ | 3 | 1 | 1 | +----------+-----------------+--------------+ Polarity (aka CPOL) 0 means that the clock is at logic value 0 when idle and goes high (logic value 1) when active; polarity 1 means the clock is at logic value 1 when idle and goes low (logic value 0) when active. Phase (aka CPHA) 0 means that data is sampled on the leading edge of the clock, and 1 means on the trailing edge (viz. https://en.wikipedia.org/wiki/Signal_edge). The ``sclk``, ``mosi`` and ``miso`` arguments specify the pins to use for each type of signal.
microbit/spi.py
init
bertbaron/pseudo-microbit
10
python
def init(baudrate: int=1000000, bits: int=8, mode: int=0, sclk: MicroBitDigitalPin=pin13, mosi: MicroBitDigitalPin=pin15, miso: MicroBitDigitalPin=pin14) -> None: 'Initialize SPI communication with the specified parameters on the\n specified ``pins``. Note that for correct communication, the parameters\n have to be the same on both communicating devices.\n\n The ``baudrate`` defines the speed of communication.\n\n The ``bits`` defines the size of bytes being transmitted. Currently only\n ``bits=8`` is supported. However, this may change in the future.\n\n The ``mode`` determines the combination of clock polarity and phase\n according to the following convention, with polarity as the high order bit\n and phase as the low order bit:\n\n +----------+-----------------+--------------+\n | SPI Mode | Polarity (CPOL) | Phase (CPHA) |\n +==========+=================+==============+\n | 0 | 0 | 0 |\n +----------+-----------------+--------------+\n | 1 | 0 | 1 |\n +----------+-----------------+--------------+\n | 2 | 1 | 0 |\n +----------+-----------------+--------------+\n | 3 | 1 | 1 |\n +----------+-----------------+--------------+\n\n Polarity (aka CPOL) 0 means that the clock is at logic value 0 when idle\n and goes high (logic value 1) when active; polarity 1 means the clock is\n at logic value 1 when idle and goes low (logic value 0) when active. Phase\n (aka CPHA) 0 means that data is sampled on the leading edge of the clock,\n and 1 means on the trailing edge\n (viz. https://en.wikipedia.org/wiki/Signal_edge).\n\n The ``sclk``, ``mosi`` and ``miso`` arguments specify the pins to use for\n each type of signal.\n '
def init(baudrate: int=1000000, bits: int=8, mode: int=0, sclk: MicroBitDigitalPin=pin13, mosi: MicroBitDigitalPin=pin15, miso: MicroBitDigitalPin=pin14) -> None: 'Initialize SPI communication with the specified parameters on the\n specified ``pins``. Note that for correct communication, the parameters\n have to be the same on both communicating devices.\n\n The ``baudrate`` defines the speed of communication.\n\n The ``bits`` defines the size of bytes being transmitted. Currently only\n ``bits=8`` is supported. However, this may change in the future.\n\n The ``mode`` determines the combination of clock polarity and phase\n according to the following convention, with polarity as the high order bit\n and phase as the low order bit:\n\n +----------+-----------------+--------------+\n | SPI Mode | Polarity (CPOL) | Phase (CPHA) |\n +==========+=================+==============+\n | 0 | 0 | 0 |\n +----------+-----------------+--------------+\n | 1 | 0 | 1 |\n +----------+-----------------+--------------+\n | 2 | 1 | 0 |\n +----------+-----------------+--------------+\n | 3 | 1 | 1 |\n +----------+-----------------+--------------+\n\n Polarity (aka CPOL) 0 means that the clock is at logic value 0 when idle\n and goes high (logic value 1) when active; polarity 1 means the clock is\n at logic value 1 when idle and goes low (logic value 0) when active. Phase\n (aka CPHA) 0 means that data is sampled on the leading edge of the clock,\n and 1 means on the trailing edge\n (viz. https://en.wikipedia.org/wiki/Signal_edge).\n\n The ``sclk``, ``mosi`` and ``miso`` arguments specify the pins to use for\n each type of signal.\n '<|docstring|>Initialize SPI communication with the specified parameters on the specified ``pins``. Note that for correct communication, the parameters have to be the same on both communicating devices. The ``baudrate`` defines the speed of communication. The ``bits`` defines the size of bytes being transmitted. Currently only ``bits=8`` is supported. However, this may change in the future. The ``mode`` determines the combination of clock polarity and phase according to the following convention, with polarity as the high order bit and phase as the low order bit: +----------+-----------------+--------------+ | SPI Mode | Polarity (CPOL) | Phase (CPHA) | +==========+=================+==============+ | 0 | 0 | 0 | +----------+-----------------+--------------+ | 1 | 0 | 1 | +----------+-----------------+--------------+ | 2 | 1 | 0 | +----------+-----------------+--------------+ | 3 | 1 | 1 | +----------+-----------------+--------------+ Polarity (aka CPOL) 0 means that the clock is at logic value 0 when idle and goes high (logic value 1) when active; polarity 1 means the clock is at logic value 1 when idle and goes low (logic value 0) when active. Phase (aka CPHA) 0 means that data is sampled on the leading edge of the clock, and 1 means on the trailing edge (viz. https://en.wikipedia.org/wiki/Signal_edge). The ``sclk``, ``mosi`` and ``miso`` arguments specify the pins to use for each type of signal.<|endoftext|>
13ba6799f7c75b319319ceddf2cf95fb21d9b2d64a7ceffb9b9642406de2625e
def read(nbytes: int) -> bytes: 'Read at most ``nbytes``. Returns what was read.'
Read at most ``nbytes``. Returns what was read.
microbit/spi.py
read
bertbaron/pseudo-microbit
10
python
def read(nbytes: int) -> bytes:
def read(nbytes: int) -> bytes: <|docstring|>Read at most ``nbytes``. Returns what was read.<|endoftext|>
a177ef8499e83b005943e5a3656a5c8b29504b66103378e1197efaca0b59c6f9
def write(buffer: Union[(bytes, bytearray)]) -> None: 'Write the ``buffer`` of bytes to the bus.'
Write the ``buffer`` of bytes to the bus.
microbit/spi.py
write
bertbaron/pseudo-microbit
10
python
def write(buffer: Union[(bytes, bytearray)]) -> None:
def write(buffer: Union[(bytes, bytearray)]) -> None: <|docstring|>Write the ``buffer`` of bytes to the bus.<|endoftext|>
95ab852fb5ec69b571a36427bf3fa69fd81f261052a3fa9200e62a53c30c71f8
def write_readinto(out: Union[(bytes, bytearray)], in_: bytearray) -> None: 'Write the ``out`` buffer to the bus and read any response into the ``in``\n buffer. The length of the buffers should be the same. The buffers can be\n the same object.'
Write the ``out`` buffer to the bus and read any response into the ``in`` buffer. The length of the buffers should be the same. The buffers can be the same object.
microbit/spi.py
write_readinto
bertbaron/pseudo-microbit
10
python
def write_readinto(out: Union[(bytes, bytearray)], in_: bytearray) -> None: 'Write the ``out`` buffer to the bus and read any response into the ``in``\n buffer. The length of the buffers should be the same. The buffers can be\n the same object.'
def write_readinto(out: Union[(bytes, bytearray)], in_: bytearray) -> None: 'Write the ``out`` buffer to the bus and read any response into the ``in``\n buffer. The length of the buffers should be the same. The buffers can be\n the same object.'<|docstring|>Write the ``out`` buffer to the bus and read any response into the ``in`` buffer. The length of the buffers should be the same. The buffers can be the same object.<|endoftext|>
d1d4904963dacf85288588f909869e759c77d339aaafd69659591b5e993670e5
def best_f1(true, pos_logits): 'Find optimal threshold for F1 score.\n\n true = [1, 0, 0, 1]\n pos_logits = [0.27292988, 0.27282527, 0.7942509, 0.20574914]\n ' (precision, recall, thresholds) = precision_recall_curve(true, pos_logits) thresh_scores = [] for i in range(len(thresholds)): if ((precision[i] + recall[i]) == 0): continue f1 = ((2 * (precision[i] * recall[i])) / (precision[i] + recall[i])) thresh = thresholds[i] thresh_scores.append([f1, thresh]) thresh_scores = sorted(thresh_scores, reverse=True) thresh_scores = [i for i in thresh_scores if (i[0] > 0)] return thresh_scores[0][(- 1)]
Find optimal threshold for F1 score. true = [1, 0, 0, 1] pos_logits = [0.27292988, 0.27282527, 0.7942509, 0.20574914]
sastvd/helpers/ml.py
best_f1
davidhin/linevd
13
python
def best_f1(true, pos_logits): 'Find optimal threshold for F1 score.\n\n true = [1, 0, 0, 1]\n pos_logits = [0.27292988, 0.27282527, 0.7942509, 0.20574914]\n ' (precision, recall, thresholds) = precision_recall_curve(true, pos_logits) thresh_scores = [] for i in range(len(thresholds)): if ((precision[i] + recall[i]) == 0): continue f1 = ((2 * (precision[i] * recall[i])) / (precision[i] + recall[i])) thresh = thresholds[i] thresh_scores.append([f1, thresh]) thresh_scores = sorted(thresh_scores, reverse=True) thresh_scores = [i for i in thresh_scores if (i[0] > 0)] return thresh_scores[0][(- 1)]
def best_f1(true, pos_logits): 'Find optimal threshold for F1 score.\n\n true = [1, 0, 0, 1]\n pos_logits = [0.27292988, 0.27282527, 0.7942509, 0.20574914]\n ' (precision, recall, thresholds) = precision_recall_curve(true, pos_logits) thresh_scores = [] for i in range(len(thresholds)): if ((precision[i] + recall[i]) == 0): continue f1 = ((2 * (precision[i] * recall[i])) / (precision[i] + recall[i])) thresh = thresholds[i] thresh_scores.append([f1, thresh]) thresh_scores = sorted(thresh_scores, reverse=True) thresh_scores = [i for i in thresh_scores if (i[0] > 0)] return thresh_scores[0][(- 1)]<|docstring|>Find optimal threshold for F1 score. true = [1, 0, 0, 1] pos_logits = [0.27292988, 0.27282527, 0.7942509, 0.20574914]<|endoftext|>
ed1df6e88c9a55a18aed6cfd2572339e3cdd7f2afcf43dd591698896bfebce04
def get_metrics(true, pred): 'Get relevant metrics given true labels and logits.' metrics = {} metrics['acc'] = accuracy_score(true, pred) metrics['f1'] = f1_score(true, pred, zero_division=0) metrics['rec'] = recall_score(true, pred, zero_division=0) metrics['prec'] = precision_score(true, pred, zero_division=0) metrics['mcc'] = matthews_corrcoef(true, pred) metrics['fpr'] = (- 1) metrics['fnr'] = (- 1) if (sum((true + pred)) != 0): (tn, fp, fn, tp) = confusion_matrix(true, pred).ravel() if ((fp + tn) != 0): metrics['fpr'] = (fp / (fp + tn)) if ((fn + tp) != 0): metrics['fnr'] = (fn / (fn + tp)) return metrics
Get relevant metrics given true labels and logits.
sastvd/helpers/ml.py
get_metrics
davidhin/linevd
13
python
def get_metrics(true, pred): metrics = {} metrics['acc'] = accuracy_score(true, pred) metrics['f1'] = f1_score(true, pred, zero_division=0) metrics['rec'] = recall_score(true, pred, zero_division=0) metrics['prec'] = precision_score(true, pred, zero_division=0) metrics['mcc'] = matthews_corrcoef(true, pred) metrics['fpr'] = (- 1) metrics['fnr'] = (- 1) if (sum((true + pred)) != 0): (tn, fp, fn, tp) = confusion_matrix(true, pred).ravel() if ((fp + tn) != 0): metrics['fpr'] = (fp / (fp + tn)) if ((fn + tp) != 0): metrics['fnr'] = (fn / (fn + tp)) return metrics
def get_metrics(true, pred): metrics = {} metrics['acc'] = accuracy_score(true, pred) metrics['f1'] = f1_score(true, pred, zero_division=0) metrics['rec'] = recall_score(true, pred, zero_division=0) metrics['prec'] = precision_score(true, pred, zero_division=0) metrics['mcc'] = matthews_corrcoef(true, pred) metrics['fpr'] = (- 1) metrics['fnr'] = (- 1) if (sum((true + pred)) != 0): (tn, fp, fn, tp) = confusion_matrix(true, pred).ravel() if ((fp + tn) != 0): metrics['fpr'] = (fp / (fp + tn)) if ((fn + tp) != 0): metrics['fnr'] = (fn / (fn + tp)) return metrics<|docstring|>Get relevant metrics given true labels and logits.<|endoftext|>
09578a8bc936881159111f6631943b118477a63057f39cebc3dfd3df3af62c3b
def get_metrics_logits(true, logits): 'Call get_metrics with logits.' loss = F.cross_entropy(logits, true).detach().cpu().item() if torch.is_tensor(true): true_oh = torch.nn.functional.one_hot(true).detach().cpu().numpy() true = true.detach().cpu().numpy() if torch.is_tensor(logits): sm_logits = torch.nn.functional.softmax(logits, dim=1) pos_logits = sm_logits[(:, 1)].detach().cpu().numpy() logits = logits.detach().cpu().numpy() f1_threshold = best_f1(true, pos_logits) pred = [(1 if (i > f1_threshold) else 0) for i in pos_logits] try: roc_auc = roc_auc_score(true, logits[(:, 1)]) except: roc_auc = (- 1) try: pr_auc = average_precision_score(true_oh, logits) except: pr_auc = (- 1) ret = get_metrics(true, pred) ret['roc_auc'] = roc_auc ret['pr_auc'] = pr_auc ret['pr_auc_pos'] = average_precision_score(true, logits[(:, 1)]) ret['loss'] = loss return ret
Call get_metrics with logits.
sastvd/helpers/ml.py
get_metrics_logits
davidhin/linevd
13
python
def get_metrics_logits(true, logits): loss = F.cross_entropy(logits, true).detach().cpu().item() if torch.is_tensor(true): true_oh = torch.nn.functional.one_hot(true).detach().cpu().numpy() true = true.detach().cpu().numpy() if torch.is_tensor(logits): sm_logits = torch.nn.functional.softmax(logits, dim=1) pos_logits = sm_logits[(:, 1)].detach().cpu().numpy() logits = logits.detach().cpu().numpy() f1_threshold = best_f1(true, pos_logits) pred = [(1 if (i > f1_threshold) else 0) for i in pos_logits] try: roc_auc = roc_auc_score(true, logits[(:, 1)]) except: roc_auc = (- 1) try: pr_auc = average_precision_score(true_oh, logits) except: pr_auc = (- 1) ret = get_metrics(true, pred) ret['roc_auc'] = roc_auc ret['pr_auc'] = pr_auc ret['pr_auc_pos'] = average_precision_score(true, logits[(:, 1)]) ret['loss'] = loss return ret
def get_metrics_logits(true, logits): loss = F.cross_entropy(logits, true).detach().cpu().item() if torch.is_tensor(true): true_oh = torch.nn.functional.one_hot(true).detach().cpu().numpy() true = true.detach().cpu().numpy() if torch.is_tensor(logits): sm_logits = torch.nn.functional.softmax(logits, dim=1) pos_logits = sm_logits[(:, 1)].detach().cpu().numpy() logits = logits.detach().cpu().numpy() f1_threshold = best_f1(true, pos_logits) pred = [(1 if (i > f1_threshold) else 0) for i in pos_logits] try: roc_auc = roc_auc_score(true, logits[(:, 1)]) except: roc_auc = (- 1) try: pr_auc = average_precision_score(true_oh, logits) except: pr_auc = (- 1) ret = get_metrics(true, pred) ret['roc_auc'] = roc_auc ret['pr_auc'] = pr_auc ret['pr_auc_pos'] = average_precision_score(true, logits[(:, 1)]) ret['loss'] = loss return ret<|docstring|>Call get_metrics with logits.<|endoftext|>
1f859a303f9b08fe96c4e4ace217b3097d3185e68ce0cf8790fe091267b381a8
def met_dict_to_str(md, prefix='', verbose=1): 'Convert metric dictionary to string for printing.' ret_str = prefix for (k, v) in md.items(): if (k == 'loss'): ret_str += (((k + ': ') + ('%.5f' % v)) + ' | ') else: ret_str += (((k + ': ') + ('%.3f' % v)) + ' | ') if (verbose > 0): print((('\x1b[40m\x1b[37m' + ret_str[:(- 1)]) + '\x1b[0m')) return ret_str
Convert metric dictionary to string for printing.
sastvd/helpers/ml.py
met_dict_to_str
davidhin/linevd
13
python
def met_dict_to_str(md, prefix=, verbose=1): ret_str = prefix for (k, v) in md.items(): if (k == 'loss'): ret_str += (((k + ': ') + ('%.5f' % v)) + ' | ') else: ret_str += (((k + ': ') + ('%.3f' % v)) + ' | ') if (verbose > 0): print((('\x1b[40m\x1b[37m' + ret_str[:(- 1)]) + '\x1b[0m')) return ret_str
def met_dict_to_str(md, prefix=, verbose=1): ret_str = prefix for (k, v) in md.items(): if (k == 'loss'): ret_str += (((k + ': ') + ('%.5f' % v)) + ' | ') else: ret_str += (((k + ': ') + ('%.3f' % v)) + ' | ') if (verbose > 0): print((('\x1b[40m\x1b[37m' + ret_str[:(- 1)]) + '\x1b[0m')) return ret_str<|docstring|>Convert metric dictionary to string for printing.<|endoftext|>
afc77a252fd63c0960f7a1cb8daf95843cc97aa967df05d8893df7b4acd3ebda
def met_dict_to_writer(md, step, writer, prefix): 'Given a dict of eval metrics, write to given Tensorboard writer.' for (k, v) in md.items(): writer.add_scalar(f'{prefix}/{k}', v, step)
Given a dict of eval metrics, write to given Tensorboard writer.
sastvd/helpers/ml.py
met_dict_to_writer
davidhin/linevd
13
python
def met_dict_to_writer(md, step, writer, prefix): for (k, v) in md.items(): writer.add_scalar(f'{prefix}/{k}', v, step)
def met_dict_to_writer(md, step, writer, prefix): for (k, v) in md.items(): writer.add_scalar(f'{prefix}/{k}', v, step)<|docstring|>Given a dict of eval metrics, write to given Tensorboard writer.<|endoftext|>
210dc0c0d779f239e15d712c15319bd08fd8ce42b84239da9de18313553f6b81
def print_seperator(strings: list, max_len: int): 'Print text inside a one-line string with "=" seperation to a max length.\n\n Args:\n strings (list): List of strings.\n max_len (int): Max length.\n ' midpoints = int((max_len / len(strings))) strings = [str(i) for i in strings] final_str = '' cutoff = (max_len + (9 * len(strings))) for s in strings: if ('\x1b' in s): cutoff += 9 len_s = len(s.replace('\x1b[32m', '').replace('\x1b[39m', '')) final_str += '\x1b[40m' final_str += ('=' * (int(((midpoints / 2) - int((len_s / 2)))) - 1)) final_str += f' {s} ' final_str += ('=' * (int(((midpoints / 2) - int((len_s / 2)))) - 1)) final_str += '\x1b[0m' print(final_str[:cutoff])
Print text inside a one-line string with "=" seperation to a max length. Args: strings (list): List of strings. max_len (int): Max length.
sastvd/helpers/ml.py
print_seperator
davidhin/linevd
13
python
def print_seperator(strings: list, max_len: int): 'Print text inside a one-line string with "=" seperation to a max length.\n\n Args:\n strings (list): List of strings.\n max_len (int): Max length.\n ' midpoints = int((max_len / len(strings))) strings = [str(i) for i in strings] final_str = cutoff = (max_len + (9 * len(strings))) for s in strings: if ('\x1b' in s): cutoff += 9 len_s = len(s.replace('\x1b[32m', ).replace('\x1b[39m', )) final_str += '\x1b[40m' final_str += ('=' * (int(((midpoints / 2) - int((len_s / 2)))) - 1)) final_str += f' {s} ' final_str += ('=' * (int(((midpoints / 2) - int((len_s / 2)))) - 1)) final_str += '\x1b[0m' print(final_str[:cutoff])
def print_seperator(strings: list, max_len: int): 'Print text inside a one-line string with "=" seperation to a max length.\n\n Args:\n strings (list): List of strings.\n max_len (int): Max length.\n ' midpoints = int((max_len / len(strings))) strings = [str(i) for i in strings] final_str = cutoff = (max_len + (9 * len(strings))) for s in strings: if ('\x1b' in s): cutoff += 9 len_s = len(s.replace('\x1b[32m', ).replace('\x1b[39m', )) final_str += '\x1b[40m' final_str += ('=' * (int(((midpoints / 2) - int((len_s / 2)))) - 1)) final_str += f' {s} ' final_str += ('=' * (int(((midpoints / 2) - int((len_s / 2)))) - 1)) final_str += '\x1b[0m' print(final_str[:cutoff])<|docstring|>Print text inside a one-line string with "=" seperation to a max length. Args: strings (list): List of strings. max_len (int): Max length.<|endoftext|>
b769fd6be30529f4783373342122f424db584dbaefb03f498ff17831da962eb6
def dict_mean(dict_list): 'Get mean of values from list of dicts.\n\n https://stackoverflow.com/questions/29027792\n ' mean_dict = {} for key in dict_list[0].keys(): mean_dict[key] = (sum((d[key] for d in dict_list if (not np.isnan(d[key])))) / len([d[key] for d in dict_list if (not np.isnan(d[key]))])) return mean_dict
Get mean of values from list of dicts. https://stackoverflow.com/questions/29027792
sastvd/helpers/ml.py
dict_mean
davidhin/linevd
13
python
def dict_mean(dict_list): 'Get mean of values from list of dicts.\n\n https://stackoverflow.com/questions/29027792\n ' mean_dict = {} for key in dict_list[0].keys(): mean_dict[key] = (sum((d[key] for d in dict_list if (not np.isnan(d[key])))) / len([d[key] for d in dict_list if (not np.isnan(d[key]))])) return mean_dict
def dict_mean(dict_list): 'Get mean of values from list of dicts.\n\n https://stackoverflow.com/questions/29027792\n ' mean_dict = {} for key in dict_list[0].keys(): mean_dict[key] = (sum((d[key] for d in dict_list if (not np.isnan(d[key])))) / len([d[key] for d in dict_list if (not np.isnan(d[key]))])) return mean_dict<|docstring|>Get mean of values from list of dicts. https://stackoverflow.com/questions/29027792<|endoftext|>
689b69e1fc91d39e69aca875edf3b941edd5843e5a868cb4edb6bb83b7ac7c96
def __init__(self, model, path: str, max_patience: int=100, log_every: int=10, val_every: int=50): 'Init writer.\n\n Args:\n model: Pytorch model.\n path (str): Path to save log files.\n ' self._model = model self._best_val_loss = 100 self._patience = 0 self._max_patience = max_patience self._epoch = 0 self._step = 0 self._path = Path(path) self._writer = SummaryWriter(path) self._log_every = log_every self._val_every = val_every self.save_attrs = ['_best_val_loss', '_patience', '_epoch', '_step']
Init writer. Args: model: Pytorch model. path (str): Path to save log files.
sastvd/helpers/ml.py
__init__
davidhin/linevd
13
python
def __init__(self, model, path: str, max_patience: int=100, log_every: int=10, val_every: int=50): 'Init writer.\n\n Args:\n model: Pytorch model.\n path (str): Path to save log files.\n ' self._model = model self._best_val_loss = 100 self._patience = 0 self._max_patience = max_patience self._epoch = 0 self._step = 0 self._path = Path(path) self._writer = SummaryWriter(path) self._log_every = log_every self._val_every = val_every self.save_attrs = ['_best_val_loss', '_patience', '_epoch', '_step']
def __init__(self, model, path: str, max_patience: int=100, log_every: int=10, val_every: int=50): 'Init writer.\n\n Args:\n model: Pytorch model.\n path (str): Path to save log files.\n ' self._model = model self._best_val_loss = 100 self._patience = 0 self._max_patience = max_patience self._epoch = 0 self._step = 0 self._path = Path(path) self._writer = SummaryWriter(path) self._log_every = log_every self._val_every = val_every self.save_attrs = ['_best_val_loss', '_patience', '_epoch', '_step']<|docstring|>Init writer. Args: model: Pytorch model. path (str): Path to save log files.<|endoftext|>
f67d6542bcbe7e25254f8e6f5112a76beec646e6c25ed5dd9eabdb1b8c4442ee
def log(self, train_mets, val_mets): 'Log information.' if ((self._step % self._log_every) != 0): self.step() return if (not self.log_val()): met_dict_to_str(train_mets, 'TR = ') met_dict_to_writer(train_mets, self._step, self._writer, 'TRN') self.step() return val_loss = val_mets['loss'] if (val_loss < self._best_val_loss): self._best_val_loss = val_loss with open((self._path / 'best.model'), 'wb') as f: torch.save(self._model.state_dict(), f) best_model_string = ('Best model saved: %.3f' % val_loss) best_model_string = f'{best_model_string}' self._patience = 0 else: self._patience += 1 best_model_string = 'No improvement.' print_seperator([f'Patience: {self._patience:03d}', f'Epoch: {self._epoch:03d}', f'Step: {self._step:03d}', best_model_string], 131) met_dict_to_str(train_mets, 'TR = ') met_dict_to_writer(train_mets, self._step, self._writer, 'TRN') met_dict_to_str(val_mets, 'VA = ') met_dict_to_writer(val_mets, self._step, self._writer, 'VAL') self.step()
Log information.
sastvd/helpers/ml.py
log
davidhin/linevd
13
python
def log(self, train_mets, val_mets): if ((self._step % self._log_every) != 0): self.step() return if (not self.log_val()): met_dict_to_str(train_mets, 'TR = ') met_dict_to_writer(train_mets, self._step, self._writer, 'TRN') self.step() return val_loss = val_mets['loss'] if (val_loss < self._best_val_loss): self._best_val_loss = val_loss with open((self._path / 'best.model'), 'wb') as f: torch.save(self._model.state_dict(), f) best_model_string = ('Best model saved: %.3f' % val_loss) best_model_string = f'{best_model_string}' self._patience = 0 else: self._patience += 1 best_model_string = 'No improvement.' print_seperator([f'Patience: {self._patience:03d}', f'Epoch: {self._epoch:03d}', f'Step: {self._step:03d}', best_model_string], 131) met_dict_to_str(train_mets, 'TR = ') met_dict_to_writer(train_mets, self._step, self._writer, 'TRN') met_dict_to_str(val_mets, 'VA = ') met_dict_to_writer(val_mets, self._step, self._writer, 'VAL') self.step()
def log(self, train_mets, val_mets): if ((self._step % self._log_every) != 0): self.step() return if (not self.log_val()): met_dict_to_str(train_mets, 'TR = ') met_dict_to_writer(train_mets, self._step, self._writer, 'TRN') self.step() return val_loss = val_mets['loss'] if (val_loss < self._best_val_loss): self._best_val_loss = val_loss with open((self._path / 'best.model'), 'wb') as f: torch.save(self._model.state_dict(), f) best_model_string = ('Best model saved: %.3f' % val_loss) best_model_string = f'{best_model_string}' self._patience = 0 else: self._patience += 1 best_model_string = 'No improvement.' print_seperator([f'Patience: {self._patience:03d}', f'Epoch: {self._epoch:03d}', f'Step: {self._step:03d}', best_model_string], 131) met_dict_to_str(train_mets, 'TR = ') met_dict_to_writer(train_mets, self._step, self._writer, 'TRN') met_dict_to_str(val_mets, 'VA = ') met_dict_to_writer(val_mets, self._step, self._writer, 'VAL') self.step()<|docstring|>Log information.<|endoftext|>
6743e7decac1d85165813c8f030a15bf2b610ac6f1da5e361035bca1efefe8a1
def test(self, test_mets): 'Helper function to write test mets.' print_seperator(['\x1b[36mTest Set\x1b[39m'], 135) met_dict_to_str(test_mets, 'TS = ')
Helper function to write test mets.
sastvd/helpers/ml.py
test
davidhin/linevd
13
python
def test(self, test_mets): print_seperator(['\x1b[36mTest Set\x1b[39m'], 135) met_dict_to_str(test_mets, 'TS = ')
def test(self, test_mets): print_seperator(['\x1b[36mTest Set\x1b[39m'], 135) met_dict_to_str(test_mets, 'TS = ')<|docstring|>Helper function to write test mets.<|endoftext|>
668bb37f65968344478fc56f972d17c9690c2933e879ab5f92b7956fa446bdb3
def log_val(self): 'Check whether should validate or not.' if ((self._step % self._val_every) == 0): return True return False
Check whether should validate or not.
sastvd/helpers/ml.py
log_val
davidhin/linevd
13
python
def log_val(self): if ((self._step % self._val_every) == 0): return True return False
def log_val(self): if ((self._step % self._val_every) == 0): return True return False<|docstring|>Check whether should validate or not.<|endoftext|>
40e7b58a95a9fac98e6b1b1f3355e5dabae7b3da9319ff6051c20c84f3f1139f
def step(self): 'Increment step.' self._step += 1
Increment step.
sastvd/helpers/ml.py
step
davidhin/linevd
13
python
def step(self): self._step += 1
def step(self): self._step += 1<|docstring|>Increment step.<|endoftext|>
af3d6e30083edb050bf5a8a040d8a0ea63b317ddf30d776b9937c167fc731019
def epoch(self): 'Increment epoch.' self._epoch += 1
Increment epoch.
sastvd/helpers/ml.py
epoch
davidhin/linevd
13
python
def epoch(self): self._epoch += 1
def epoch(self): self._epoch += 1<|docstring|>Increment epoch.<|endoftext|>
b829a7cf83c6284d31a11dc494b91a8978e569399166a72d5776ae1b80b48eda
def stop(self): 'Check if should stop training.' return (self._patience > self._max_patience)
Check if should stop training.
sastvd/helpers/ml.py
stop
davidhin/linevd
13
python
def stop(self): return (self._patience > self._max_patience)
def stop(self): return (self._patience > self._max_patience)<|docstring|>Check if should stop training.<|endoftext|>