body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def __init__(self, request, args=None, kwargs=None, progress=None): '\n\n :param request: The WAMP request ID of the original call.\n :type request: int\n :param args: Positional values for application-defined event payload.\n Must be serializable using any serializers in use.\n :type args: list or tuple or None\n :param kwargs: Keyword values for application-defined event payload.\n Must be serializable using any serializers in use.\n :type kwargs: dict or None\n :param progress: If ``True``, this result is a progressive invocation result, and subsequent\n results (or a final error) will follow.\n :type progress: bool or None\n ' assert (type(request) in six.integer_types) assert ((args is None) or (type(args) in [list, tuple])) assert ((kwargs is None) or (type(kwargs) == dict)) assert ((progress is None) or (type(progress) == bool)) Message.__init__(self) self.request = request self.args = args self.kwargs = kwargs self.progress = progress
-7,122,721,118,999,374,000
:param request: The WAMP request ID of the original call. :type request: int :param args: Positional values for application-defined event payload. Must be serializable using any serializers in use. :type args: list or tuple or None :param kwargs: Keyword values for application-defined event payload. Must be serializable using any serializers in use. :type kwargs: dict or None :param progress: If ``True``, this result is a progressive invocation result, and subsequent results (or a final error) will follow. :type progress: bool or None
ThirdParty/AutobahnPython/autobahn/wamp/message.py
__init__
Crimson-MITK-ThirdParty/VTK-7.0.0
python
def __init__(self, request, args=None, kwargs=None, progress=None): '\n\n :param request: The WAMP request ID of the original call.\n :type request: int\n :param args: Positional values for application-defined event payload.\n Must be serializable using any serializers in use.\n :type args: list or tuple or None\n :param kwargs: Keyword values for application-defined event payload.\n Must be serializable using any serializers in use.\n :type kwargs: dict or None\n :param progress: If ``True``, this result is a progressive invocation result, and subsequent\n results (or a final error) will follow.\n :type progress: bool or None\n ' assert (type(request) in six.integer_types) assert ((args is None) or (type(args) in [list, tuple])) assert ((kwargs is None) or (type(kwargs) == dict)) assert ((progress is None) or (type(progress) == bool)) Message.__init__(self) self.request = request self.args = args self.kwargs = kwargs self.progress = progress
@staticmethod def parse(wmsg): '\n Verifies and parses an unserialized raw message into an actual WAMP message instance.\n\n :param wmsg: The unserialized raw message.\n :type wmsg: list\n\n :returns: An instance of this class.\n ' assert ((len(wmsg) > 0) and (wmsg[0] == Yield.MESSAGE_TYPE)) if (len(wmsg) not in (3, 4, 5)): raise ProtocolError('invalid message length {0} for YIELD'.format(len(wmsg))) request = check_or_raise_id(wmsg[1], "'request' in YIELD") options = check_or_raise_extra(wmsg[2], "'options' in YIELD") args = None if (len(wmsg) > 3): args = wmsg[3] if (type(args) != list): raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args))) kwargs = None if (len(wmsg) > 4): kwargs = wmsg[4] if (type(kwargs) != dict): raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs))) progress = None if (u'progress' in options): option_progress = options[u'progress'] if (type(option_progress) != bool): raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress))) progress = option_progress obj = Yield(request, args=args, kwargs=kwargs, progress=progress) return obj
-2,507,044,840,997,064,700
Verifies and parses an unserialized raw message into an actual WAMP message instance. :param wmsg: The unserialized raw message. :type wmsg: list :returns: An instance of this class.
ThirdParty/AutobahnPython/autobahn/wamp/message.py
parse
Crimson-MITK-ThirdParty/VTK-7.0.0
python
@staticmethod def parse(wmsg): '\n Verifies and parses an unserialized raw message into an actual WAMP message instance.\n\n :param wmsg: The unserialized raw message.\n :type wmsg: list\n\n :returns: An instance of this class.\n ' assert ((len(wmsg) > 0) and (wmsg[0] == Yield.MESSAGE_TYPE)) if (len(wmsg) not in (3, 4, 5)): raise ProtocolError('invalid message length {0} for YIELD'.format(len(wmsg))) request = check_or_raise_id(wmsg[1], "'request' in YIELD") options = check_or_raise_extra(wmsg[2], "'options' in YIELD") args = None if (len(wmsg) > 3): args = wmsg[3] if (type(args) != list): raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args))) kwargs = None if (len(wmsg) > 4): kwargs = wmsg[4] if (type(kwargs) != dict): raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs))) progress = None if (u'progress' in options): option_progress = options[u'progress'] if (type(option_progress) != bool): raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress))) progress = option_progress obj = Yield(request, args=args, kwargs=kwargs, progress=progress) return obj
def marshal(self): '\n Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`\n ' options = {} if (self.progress is not None): options[u'progress'] = self.progress if self.kwargs: return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs] elif self.args: return [Yield.MESSAGE_TYPE, self.request, options, self.args] else: return [Yield.MESSAGE_TYPE, self.request, options]
7,874,992,123,354,696,000
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
ThirdParty/AutobahnPython/autobahn/wamp/message.py
marshal
Crimson-MITK-ThirdParty/VTK-7.0.0
python
def marshal(self): '\n \n ' options = {} if (self.progress is not None): options[u'progress'] = self.progress if self.kwargs: return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs] elif self.args: return [Yield.MESSAGE_TYPE, self.request, options, self.args] else: return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self): '\n Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`\n ' return 'WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})'.format(self.request, self.args, self.kwargs, self.progress)
-8,893,411,419,016,217,000
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
ThirdParty/AutobahnPython/autobahn/wamp/message.py
__str__
Crimson-MITK-ThirdParty/VTK-7.0.0
python
def __str__(self): '\n \n ' return 'WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})'.format(self.request, self.args, self.kwargs, self.progress)
def __init__(self, reason=ErrorReason.NO_ERROR, error_code=None, error_detail=''): ' Constructs a DISCONNECT-State with given reason (``ErrorReason``\n enum), error id and additional information provided as string.\n\n ' self._error_code = error_code self._error_detail = error_detail self._error_reason = reason
-836,847,888,539,756,000
Constructs a DISCONNECT-State with given reason (``ErrorReason`` enum), error id and additional information provided as string.
bokeh/client/states.py
__init__
lvcarlosja/bokeh
python
def __init__(self, reason=ErrorReason.NO_ERROR, error_code=None, error_detail=): ' Constructs a DISCONNECT-State with given reason (``ErrorReason``\n enum), error id and additional information provided as string.\n\n ' self._error_code = error_code self._error_detail = error_detail self._error_reason = reason
@property def error_reason(self): ' The reason for the error encoded as an enumeration value.\n\n ' return self._error_reason
-655,606,898,689,896,000
The reason for the error encoded as an enumeration value.
bokeh/client/states.py
error_reason
lvcarlosja/bokeh
python
@property def error_reason(self): ' \n\n ' return self._error_reason
@property def error_code(self): ' Holds the error code, if any. None otherwise.\n\n ' return self._error_code
-963,558,839,673,669,000
Holds the error code, if any. None otherwise.
bokeh/client/states.py
error_code
lvcarlosja/bokeh
python
@property def error_code(self): ' \n\n ' return self._error_code
@property def error_detail(self): ' Holds the error message, if any. Empty string otherwise.\n\n ' return self._error_detail
-3,810,004,267,138,733,000
Holds the error message, if any. Empty string otherwise.
bokeh/client/states.py
error_detail
lvcarlosja/bokeh
python
@property def error_detail(self): ' \n\n ' return self._error_detail
@property def reply(self): ' The reply from the server. (``None`` until the reply arrives) ' return self._reply
-7,536,372,399,029,573,000
The reply from the server. (``None`` until the reply arrives)
bokeh/client/states.py
reply
lvcarlosja/bokeh
python
@property def reply(self): ' ' return self._reply
@property def reqid(self): ' The request ID of the originating message. ' return self._reqid
2,244,517,005,024,000,300
The request ID of the originating message.
bokeh/client/states.py
reqid
lvcarlosja/bokeh
python
@property def reqid(self): ' ' return self._reqid
@nb.njit(parallel=True) def _snn_imp(ind, ref_set_): 'Internal function for fast snn calculation\n\n Parameters\n ----------\n ind : int\n Indices return by kNN.\n\n ref_set_ : int, optional (default=10)\n specifies the number of shared nearest neighbors to create the\n reference set. Note that ref_set must be smaller than n_neighbors.\n\n ' n = ind.shape[0] _count = np.zeros(shape=(n, ref_set_), dtype=np.uint32) for i in nb.prange(n): temp = np.empty(n, dtype=np.uint32) test_element_set = set(ind[i]) for j in nb.prange(n): temp[j] = len(set(ind[j]).intersection(test_element_set)) temp[i] = np.iinfo(np.uint32).max _count[i] = np.argsort(temp)[::(- 1)][1:(ref_set_ + 1)] return _count
3,596,093,814,803,237,000
Internal function for fast snn calculation Parameters ---------- ind : int Indices return by kNN. ref_set_ : int, optional (default=10) specifies the number of shared nearest neighbors to create the reference set. Note that ref_set must be smaller than n_neighbors.
pyod/models/sod.py
_snn_imp
BillyGareth/pyod
python
@nb.njit(parallel=True) def _snn_imp(ind, ref_set_): 'Internal function for fast snn calculation\n\n Parameters\n ----------\n ind : int\n Indices return by kNN.\n\n ref_set_ : int, optional (default=10)\n specifies the number of shared nearest neighbors to create the\n reference set. Note that ref_set must be smaller than n_neighbors.\n\n ' n = ind.shape[0] _count = np.zeros(shape=(n, ref_set_), dtype=np.uint32) for i in nb.prange(n): temp = np.empty(n, dtype=np.uint32) test_element_set = set(ind[i]) for j in nb.prange(n): temp[j] = len(set(ind[j]).intersection(test_element_set)) temp[i] = np.iinfo(np.uint32).max _count[i] = np.argsort(temp)[::(- 1)][1:(ref_set_ + 1)] return _count
def fit(self, X, y=None): 'Fit detector. y is ignored in unsupervised methods.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The input samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted estimator.\n ' X = check_array(X) self._set_n_classes(y) self.decision_scores_ = self.decision_function(X) self._process_decision_scores() return self
4,795,284,134,973,060,000
Fit detector. y is ignored in unsupervised methods. Parameters ---------- X : numpy array of shape (n_samples, n_features) The input samples. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Fitted estimator.
pyod/models/sod.py
fit
BillyGareth/pyod
python
def fit(self, X, y=None): 'Fit detector. y is ignored in unsupervised methods.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The input samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted estimator.\n ' X = check_array(X) self._set_n_classes(y) self.decision_scores_ = self.decision_function(X) self._process_decision_scores() return self
def decision_function(self, X): 'Predict raw anomaly score of X using the fitted detector.\n The anomaly score of an input sample is computed based on different\n detector algorithms. For consistency, outliers are assigned with\n larger anomaly scores.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The training input samples. Sparse matrices are accepted only\n if they are supported by the base estimator.\n\n Returns\n -------\n anomaly_scores : numpy array of shape (n_samples,)\n The anomaly score of the input samples.\n ' return self._sod(X)
-6,096,792,183,057,632,000
Predict raw anomaly score of X using the fitted detector. The anomaly score of an input sample is computed based on different detector algorithms. For consistency, outliers are assigned with larger anomaly scores. Parameters ---------- X : numpy array of shape (n_samples, n_features) The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. Returns ------- anomaly_scores : numpy array of shape (n_samples,) The anomaly score of the input samples.
pyod/models/sod.py
decision_function
BillyGareth/pyod
python
def decision_function(self, X): 'Predict raw anomaly score of X using the fitted detector.\n The anomaly score of an input sample is computed based on different\n detector algorithms. For consistency, outliers are assigned with\n larger anomaly scores.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The training input samples. Sparse matrices are accepted only\n if they are supported by the base estimator.\n\n Returns\n -------\n anomaly_scores : numpy array of shape (n_samples,)\n The anomaly score of the input samples.\n ' return self._sod(X)
def _snn(self, X): 'This function is called internally to calculate the shared nearest\n neighbors (SNN). SNN is reported to be more robust than k nearest\n neighbors.\n\n Returns\n -------\n snn_indices : numpy array of shape (n_shared_nearest_neighbors,)\n The indices of top k shared nearest neighbors for each observation.\n ' knn = NearestNeighbors(n_neighbors=self.n_neighbors_) knn.fit(X) ind = knn.kneighbors(return_distance=False) return _snn_imp(ind, self.ref_set_)
-7,087,107,546,413,116,000
This function is called internally to calculate the shared nearest neighbors (SNN). SNN is reported to be more robust than k nearest neighbors. Returns ------- snn_indices : numpy array of shape (n_shared_nearest_neighbors,) The indices of top k shared nearest neighbors for each observation.
pyod/models/sod.py
_snn
BillyGareth/pyod
python
def _snn(self, X): 'This function is called internally to calculate the shared nearest\n neighbors (SNN). SNN is reported to be more robust than k nearest\n neighbors.\n\n Returns\n -------\n snn_indices : numpy array of shape (n_shared_nearest_neighbors,)\n The indices of top k shared nearest neighbors for each observation.\n ' knn = NearestNeighbors(n_neighbors=self.n_neighbors_) knn.fit(X) ind = knn.kneighbors(return_distance=False) return _snn_imp(ind, self.ref_set_)
def _sod(self, X): 'This function is called internally to perform subspace outlier \n detection algorithm.\n \n Returns\n -------\n anomaly_scores : numpy array of shape (n_samples,)\n The anomaly score of the input samples.\n ' ref_inds = self._snn(X) anomaly_scores = np.zeros(shape=(X.shape[0],)) for i in range(X.shape[0]): obs = X[i] ref = X[(ref_inds[(i,)],)] means = np.mean(ref, axis=0) var_total = (np.sum(np.sum(np.square((ref - means)))) / self.ref_set_) var_expect = ((self.alpha_ * var_total) / X.shape[1]) var_actual = np.var(ref, axis=0) var_inds = [(1 if (j < var_expect) else 0) for j in var_actual] rel_dim = np.sum(var_inds) if (rel_dim != 0): anomaly_scores[i] = np.sqrt((np.dot(var_inds, np.square((obs - means))) / rel_dim)) return anomaly_scores
1,585,102,162,273,630,700
This function is called internally to perform subspace outlier detection algorithm. Returns ------- anomaly_scores : numpy array of shape (n_samples,) The anomaly score of the input samples.
pyod/models/sod.py
_sod
BillyGareth/pyod
python
def _sod(self, X): 'This function is called internally to perform subspace outlier \n detection algorithm.\n \n Returns\n -------\n anomaly_scores : numpy array of shape (n_samples,)\n The anomaly score of the input samples.\n ' ref_inds = self._snn(X) anomaly_scores = np.zeros(shape=(X.shape[0],)) for i in range(X.shape[0]): obs = X[i] ref = X[(ref_inds[(i,)],)] means = np.mean(ref, axis=0) var_total = (np.sum(np.sum(np.square((ref - means)))) / self.ref_set_) var_expect = ((self.alpha_ * var_total) / X.shape[1]) var_actual = np.var(ref, axis=0) var_inds = [(1 if (j < var_expect) else 0) for j in var_actual] rel_dim = np.sum(var_inds) if (rel_dim != 0): anomaly_scores[i] = np.sqrt((np.dot(var_inds, np.square((obs - means))) / rel_dim)) return anomaly_scores
@pytest.mark.nondestructive def test_feedback_can_be_filtered_by_all_products_and_versions(self, mozwebqa): "Tests product filtering in dashboard\n\n 1. Verify that at least one product exists\n 2. Verify that filtering by product returns results\n 3. Verify that versions show up when you choose a product\n 4. Verify that the state of the filters are correct after being applied\n 5. Verify product and version values in the URL\n\n NB: We don't cycle through all product/version\n combinations--only the first two of each.\n\n " dashboard_pg = DashboardPage(mozwebqa) dashboard_pg.go_to_dashboard_page() total_messages = dashboard_pg.total_message_count products = dashboard_pg.product_filter.products Assert.greater(len(products), 0) for product in products[:2]: if (not product): continue dashboard_pg.product_filter.select_product(product) Assert.greater(total_messages, dashboard_pg.total_message_count) versions = dashboard_pg.product_filter.versions Assert.greater(len(versions), 0) for version in versions[:2]: if (not version): continue dashboard_pg.product_filter.select_version(version) Assert.greater(total_messages, dashboard_pg.total_message_count) Assert.equal(dashboard_pg.product_filter.selected_product, product) Assert.equal(dashboard_pg.product_filter.selected_version, version) Assert.equal(dashboard_pg.product_from_url, product) Assert.equal(dashboard_pg.version_from_url, version) Assert.greater(len(dashboard_pg.messages), 0) dashboard_pg.product_filter.unselect_version(version) dashboard_pg.product_filter.unselect_product(product)
-7,117,533,594,935,297,000
Tests product filtering in dashboard 1. Verify that at least one product exists 2. Verify that filtering by product returns results 3. Verify that versions show up when you choose a product 4. Verify that the state of the filters are correct after being applied 5. Verify product and version values in the URL NB: We don't cycle through all product/version combinations--only the first two of each.
smoketests/tests/dashboard/test_product_filter.py
test_feedback_can_be_filtered_by_all_products_and_versions
ANKIT-KS/fjord
python
@pytest.mark.nondestructive def test_feedback_can_be_filtered_by_all_products_and_versions(self, mozwebqa): "Tests product filtering in dashboard\n\n 1. Verify that at least one product exists\n 2. Verify that filtering by product returns results\n 3. Verify that versions show up when you choose a product\n 4. Verify that the state of the filters are correct after being applied\n 5. Verify product and version values in the URL\n\n NB: We don't cycle through all product/version\n combinations--only the first two of each.\n\n " dashboard_pg = DashboardPage(mozwebqa) dashboard_pg.go_to_dashboard_page() total_messages = dashboard_pg.total_message_count products = dashboard_pg.product_filter.products Assert.greater(len(products), 0) for product in products[:2]: if (not product): continue dashboard_pg.product_filter.select_product(product) Assert.greater(total_messages, dashboard_pg.total_message_count) versions = dashboard_pg.product_filter.versions Assert.greater(len(versions), 0) for version in versions[:2]: if (not version): continue dashboard_pg.product_filter.select_version(version) Assert.greater(total_messages, dashboard_pg.total_message_count) Assert.equal(dashboard_pg.product_filter.selected_product, product) Assert.equal(dashboard_pg.product_filter.selected_version, version) Assert.equal(dashboard_pg.product_from_url, product) Assert.equal(dashboard_pg.version_from_url, version) Assert.greater(len(dashboard_pg.messages), 0) dashboard_pg.product_filter.unselect_version(version) dashboard_pg.product_filter.unselect_product(product)
def make_instance(self, include_optional): 'Test RequestMethodConfig\n include_option is a boolean, when False only required\n params are included, when True both required and\n optional params are included ' if include_optional: return RequestMethodConfig(action='0', errors=[ory_kratos_client.models.error.Error(message='0')], fields=[ory_kratos_client.models.form_field.formField(disabled=True, errors=[ory_kratos_client.models.error.Error(message='0')], name='0', pattern='0', required=True, type='0', value=ory_kratos_client.models.value.value())], method='0') else: return RequestMethodConfig(action='0', fields=[ory_kratos_client.models.form_field.formField(disabled=True, errors=[ory_kratos_client.models.error.Error(message='0')], name='0', pattern='0', required=True, type='0', value=ory_kratos_client.models.value.value())], method='0')
-448,022,077,382,581,440
Test RequestMethodConfig include_option is a boolean, when False only required params are included, when True both required and optional params are included
clients/kratos/python/test/test_request_method_config.py
make_instance
Marcuzz/sdk
python
def make_instance(self, include_optional): 'Test RequestMethodConfig\n include_option is a boolean, when False only required\n params are included, when True both required and\n optional params are included ' if include_optional: return RequestMethodConfig(action='0', errors=[ory_kratos_client.models.error.Error(message='0')], fields=[ory_kratos_client.models.form_field.formField(disabled=True, errors=[ory_kratos_client.models.error.Error(message='0')], name='0', pattern='0', required=True, type='0', value=ory_kratos_client.models.value.value())], method='0') else: return RequestMethodConfig(action='0', fields=[ory_kratos_client.models.form_field.formField(disabled=True, errors=[ory_kratos_client.models.error.Error(message='0')], name='0', pattern='0', required=True, type='0', value=ory_kratos_client.models.value.value())], method='0')
def testRequestMethodConfig(self): 'Test RequestMethodConfig' inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True)
-378,840,182,432,186,400
Test RequestMethodConfig
clients/kratos/python/test/test_request_method_config.py
testRequestMethodConfig
Marcuzz/sdk
python
def testRequestMethodConfig(self): inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True)
def testV2beta1HorizontalPodAutoscaler(self): '\n Test V2beta1HorizontalPodAutoscaler\n ' pass
2,167,114,869,670,492,000
Test V2beta1HorizontalPodAutoscaler
kubernetes/test/test_v2beta1_horizontal_pod_autoscaler.py
testV2beta1HorizontalPodAutoscaler
TokkoLabs/client-python
python
def testV2beta1HorizontalPodAutoscaler(self): '\n \n ' pass
def powerset(iterable): 'powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)' s = list(iterable) return chain.from_iterable((combinations(s, r) for r in range((len(s) + 1))))
4,877,089,489,311,841,000
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
DiffractionClassifierCombinatorial2.0.py
powerset
MatthewGong/DiffractionClassification
python
def powerset(iterable): s = list(iterable) return chain.from_iterable((combinations(s, r) for r in range((len(s) + 1))))
@pytest.mark.usefixtures('hass_history') def test_setup(): 'Test setup method of history.' pass
8,167,136,447,770,618,000
Test setup method of history.
tests/components/history/test_init.py
test_setup
0xFEEDC0DE64/homeassistant-core
python
@pytest.mark.usefixtures('hass_history') def test_setup(): pass
def test_get_significant_states(hass_history): 'Test that only significant states are returned.\n\n We should get back every thermostat change that\n includes an attribute change, but only the state updates for\n media player (attribute changes are not significant and not returned).\n ' hass = hass_history (zero, four, states) = record_states(hass) hist = get_significant_states(hass, zero, four, filters=history.Filters()) assert (states == hist)
67,503,898,248,999,780
Test that only significant states are returned. We should get back every thermostat change that includes an attribute change, but only the state updates for media player (attribute changes are not significant and not returned).
tests/components/history/test_init.py
test_get_significant_states
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states(hass_history): 'Test that only significant states are returned.\n\n We should get back every thermostat change that\n includes an attribute change, but only the state updates for\n media player (attribute changes are not significant and not returned).\n ' hass = hass_history (zero, four, states) = record_states(hass) hist = get_significant_states(hass, zero, four, filters=history.Filters()) assert (states == hist)
def test_get_significant_states_minimal_response(hass_history): 'Test that only significant states are returned.\n\n When minimal responses is set only the first and\n last states return a complete state.\n\n We should get back every thermostat change that\n includes an attribute change, but only the state updates for\n media player (attribute changes are not significant and not returned).\n ' hass = hass_history (zero, four, states) = record_states(hass) hist = get_significant_states(hass, zero, four, filters=history.Filters(), minimal_response=True) input_state = states['media_player.test'][1] orig_last_changed = json.dumps(process_timestamp(input_state.last_changed), cls=JSONEncoder).replace('"', '') orig_state = input_state.state states['media_player.test'][1] = {'last_changed': orig_last_changed, 'state': orig_state} assert (states == hist)
3,880,872,984,299,671,600
Test that only significant states are returned. When minimal responses is set only the first and last states return a complete state. We should get back every thermostat change that includes an attribute change, but only the state updates for media player (attribute changes are not significant and not returned).
tests/components/history/test_init.py
test_get_significant_states_minimal_response
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_minimal_response(hass_history): 'Test that only significant states are returned.\n\n When minimal responses is set only the first and\n last states return a complete state.\n\n We should get back every thermostat change that\n includes an attribute change, but only the state updates for\n media player (attribute changes are not significant and not returned).\n ' hass = hass_history (zero, four, states) = record_states(hass) hist = get_significant_states(hass, zero, four, filters=history.Filters(), minimal_response=True) input_state = states['media_player.test'][1] orig_last_changed = json.dumps(process_timestamp(input_state.last_changed), cls=JSONEncoder).replace('"', ) orig_state = input_state.state states['media_player.test'][1] = {'last_changed': orig_last_changed, 'state': orig_state} assert (states == hist)
def test_get_significant_states_with_initial(hass_history): 'Test that only significant states are returned.\n\n We should get back every thermostat change that\n includes an attribute change, but only the state updates for\n media player (attribute changes are not significant and not returned).\n ' hass = hass_history (zero, four, states) = record_states(hass) one = (zero + timedelta(seconds=1)) one_and_half = (zero + timedelta(seconds=1.5)) for entity_id in states: if (entity_id == 'media_player.test'): states[entity_id] = states[entity_id][1:] for state in states[entity_id]: if (state.last_changed == one): state.last_changed = one_and_half hist = get_significant_states(hass, one_and_half, four, filters=history.Filters(), include_start_time_state=True) assert (states == hist)
-51,942,831,712,115,750
Test that only significant states are returned. We should get back every thermostat change that includes an attribute change, but only the state updates for media player (attribute changes are not significant and not returned).
tests/components/history/test_init.py
test_get_significant_states_with_initial
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_with_initial(hass_history): 'Test that only significant states are returned.\n\n We should get back every thermostat change that\n includes an attribute change, but only the state updates for\n media player (attribute changes are not significant and not returned).\n ' hass = hass_history (zero, four, states) = record_states(hass) one = (zero + timedelta(seconds=1)) one_and_half = (zero + timedelta(seconds=1.5)) for entity_id in states: if (entity_id == 'media_player.test'): states[entity_id] = states[entity_id][1:] for state in states[entity_id]: if (state.last_changed == one): state.last_changed = one_and_half hist = get_significant_states(hass, one_and_half, four, filters=history.Filters(), include_start_time_state=True) assert (states == hist)
def test_get_significant_states_without_initial(hass_history): 'Test that only significant states are returned.\n\n We should get back every thermostat change that\n includes an attribute change, but only the state updates for\n media player (attribute changes are not significant and not returned).\n ' hass = hass_history (zero, four, states) = record_states(hass) one = (zero + timedelta(seconds=1)) one_and_half = (zero + timedelta(seconds=1.5)) for entity_id in states: states[entity_id] = list(filter((lambda s: (s.last_changed != one)), states[entity_id])) del states['media_player.test2'] hist = get_significant_states(hass, one_and_half, four, filters=history.Filters(), include_start_time_state=False) assert (states == hist)
-2,799,910,346,006,164,500
Test that only significant states are returned. We should get back every thermostat change that includes an attribute change, but only the state updates for media player (attribute changes are not significant and not returned).
tests/components/history/test_init.py
test_get_significant_states_without_initial
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_without_initial(hass_history): 'Test that only significant states are returned.\n\n We should get back every thermostat change that\n includes an attribute change, but only the state updates for\n media player (attribute changes are not significant and not returned).\n ' hass = hass_history (zero, four, states) = record_states(hass) one = (zero + timedelta(seconds=1)) one_and_half = (zero + timedelta(seconds=1.5)) for entity_id in states: states[entity_id] = list(filter((lambda s: (s.last_changed != one)), states[entity_id])) del states['media_player.test2'] hist = get_significant_states(hass, one_and_half, four, filters=history.Filters(), include_start_time_state=False) assert (states == hist)
def test_get_significant_states_entity_id(hass_history): 'Test that only significant states are returned for one entity.' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test2'] del states['media_player.test3'] del states['thermostat.test'] del states['thermostat.test2'] del states['script.can_cancel_this_one'] hist = get_significant_states(hass, zero, four, ['media_player.test'], filters=history.Filters()) assert (states == hist)
-4,269,895,489,921,417,000
Test that only significant states are returned for one entity.
tests/components/history/test_init.py
test_get_significant_states_entity_id
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_entity_id(hass_history): hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test2'] del states['media_player.test3'] del states['thermostat.test'] del states['thermostat.test2'] del states['script.can_cancel_this_one'] hist = get_significant_states(hass, zero, four, ['media_player.test'], filters=history.Filters()) assert (states == hist)
def test_get_significant_states_multiple_entity_ids(hass_history): 'Test that only significant states are returned for one entity.' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test2'] del states['media_player.test3'] del states['thermostat.test2'] del states['script.can_cancel_this_one'] hist = get_significant_states(hass, zero, four, ['media_player.test', 'thermostat.test'], filters=history.Filters()) assert (states == hist)
-3,067,380,156,919,801,300
Test that only significant states are returned for one entity.
tests/components/history/test_init.py
test_get_significant_states_multiple_entity_ids
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_multiple_entity_ids(hass_history): hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test2'] del states['media_player.test3'] del states['thermostat.test2'] del states['script.can_cancel_this_one'] hist = get_significant_states(hass, zero, four, ['media_player.test', 'thermostat.test'], filters=history.Filters()) assert (states == hist)
def test_get_significant_states_exclude_domain(hass_history): 'Test if significant states are returned when excluding domains.\n\n We should get back every thermostat change that includes an attribute\n change, but no media player changes.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test'] del states['media_player.test2'] del states['media_player.test3'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_EXCLUDE: {history.CONF_DOMAINS: ['media_player']}}}) check_significant_states(hass, zero, four, states, config)
-3,757,802,504,288,435,000
Test if significant states are returned when excluding domains. We should get back every thermostat change that includes an attribute change, but no media player changes.
tests/components/history/test_init.py
test_get_significant_states_exclude_domain
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_exclude_domain(hass_history): 'Test if significant states are returned when excluding domains.\n\n We should get back every thermostat change that includes an attribute\n change, but no media player changes.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test'] del states['media_player.test2'] del states['media_player.test3'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_EXCLUDE: {history.CONF_DOMAINS: ['media_player']}}}) check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_exclude_entity(hass_history): 'Test if significant states are returned when excluding entities.\n\n We should get back every thermostat and script changes, but no media\n player changes.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_EXCLUDE: {history.CONF_ENTITIES: ['media_player.test']}}}) check_significant_states(hass, zero, four, states, config)
3,971,508,263,900,700,000
Test if significant states are returned when excluding entities. We should get back every thermostat and script changes, but no media player changes.
tests/components/history/test_init.py
test_get_significant_states_exclude_entity
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_exclude_entity(hass_history): 'Test if significant states are returned when excluding entities.\n\n We should get back every thermostat and script changes, but no media\n player changes.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_EXCLUDE: {history.CONF_ENTITIES: ['media_player.test']}}}) check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_exclude(hass_history): 'Test significant states when excluding entities and domains.\n\n We should not get back every thermostat and media player test changes.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test'] del states['thermostat.test'] del states['thermostat.test2'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_EXCLUDE: {history.CONF_DOMAINS: ['thermostat'], history.CONF_ENTITIES: ['media_player.test']}}}) check_significant_states(hass, zero, four, states, config)
3,192,773,205,326,326,300
Test significant states when excluding entities and domains. We should not get back every thermostat and media player test changes.
tests/components/history/test_init.py
test_get_significant_states_exclude
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_exclude(hass_history): 'Test significant states when excluding entities and domains.\n\n We should not get back every thermostat and media player test changes.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test'] del states['thermostat.test'] del states['thermostat.test2'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_EXCLUDE: {history.CONF_DOMAINS: ['thermostat'], history.CONF_ENTITIES: ['media_player.test']}}}) check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_exclude_include_entity(hass_history): 'Test significant states when excluding domains and include entities.\n\n We should not get back every thermostat and media player test changes.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test2'] del states['media_player.test3'] del states['thermostat.test'] del states['thermostat.test2'] del states['script.can_cancel_this_one'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_INCLUDE: {history.CONF_ENTITIES: ['media_player.test', 'thermostat.test']}, history.CONF_EXCLUDE: {history.CONF_DOMAINS: ['thermostat']}}}) check_significant_states(hass, zero, four, states, config)
7,050,712,471,915,617,000
Test significant states when excluding domains and include entities. We should not get back every thermostat and media player test changes.
tests/components/history/test_init.py
test_get_significant_states_exclude_include_entity
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_exclude_include_entity(hass_history): 'Test significant states when excluding domains and include entities.\n\n We should not get back every thermostat and media player test changes.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test2'] del states['media_player.test3'] del states['thermostat.test'] del states['thermostat.test2'] del states['script.can_cancel_this_one'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_INCLUDE: {history.CONF_ENTITIES: ['media_player.test', 'thermostat.test']}, history.CONF_EXCLUDE: {history.CONF_DOMAINS: ['thermostat']}}}) check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_include_domain(hass_history): 'Test if significant states are returned when including domains.\n\n We should get back every thermostat and script changes, but no media\n player changes.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test'] del states['media_player.test2'] del states['media_player.test3'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_INCLUDE: {history.CONF_DOMAINS: ['thermostat', 'script']}}}) check_significant_states(hass, zero, four, states, config)
9,085,175,532,555,936,000
Test if significant states are returned when including domains. We should get back every thermostat and script changes, but no media player changes.
tests/components/history/test_init.py
test_get_significant_states_include_domain
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_include_domain(hass_history): 'Test if significant states are returned when including domains.\n\n We should get back every thermostat and script changes, but no media\n player changes.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test'] del states['media_player.test2'] del states['media_player.test3'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_INCLUDE: {history.CONF_DOMAINS: ['thermostat', 'script']}}}) check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_include_entity(hass_history): 'Test if significant states are returned when including entities.\n\n We should only get back changes of the media_player.test entity.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test2'] del states['media_player.test3'] del states['thermostat.test'] del states['thermostat.test2'] del states['script.can_cancel_this_one'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_INCLUDE: {history.CONF_ENTITIES: ['media_player.test']}}}) check_significant_states(hass, zero, four, states, config)
-5,447,694,962,693,634,000
Test if significant states are returned when including entities. We should only get back changes of the media_player.test entity.
tests/components/history/test_init.py
test_get_significant_states_include_entity
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_include_entity(hass_history): 'Test if significant states are returned when including entities.\n\n We should only get back changes of the media_player.test entity.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test2'] del states['media_player.test3'] del states['thermostat.test'] del states['thermostat.test2'] del states['script.can_cancel_this_one'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_INCLUDE: {history.CONF_ENTITIES: ['media_player.test']}}}) check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_include(hass_history): 'Test significant states when including domains and entities.\n\n We should only get back changes of the media_player.test entity and the\n thermostat domain.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test2'] del states['media_player.test3'] del states['script.can_cancel_this_one'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_INCLUDE: {history.CONF_DOMAINS: ['thermostat'], history.CONF_ENTITIES: ['media_player.test']}}}) check_significant_states(hass, zero, four, states, config)
-141,893,945,454,664,540
Test significant states when including domains and entities. We should only get back changes of the media_player.test entity and the thermostat domain.
tests/components/history/test_init.py
test_get_significant_states_include
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_include(hass_history): 'Test significant states when including domains and entities.\n\n We should only get back changes of the media_player.test entity and the\n thermostat domain.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test2'] del states['media_player.test3'] del states['script.can_cancel_this_one'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_INCLUDE: {history.CONF_DOMAINS: ['thermostat'], history.CONF_ENTITIES: ['media_player.test']}}}) check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_include_exclude_domain(hass_history): 'Test if significant states when excluding and including domains.\n\n We should not get back any changes since we include only the\n media_player domain but also exclude it.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test'] del states['media_player.test2'] del states['media_player.test3'] del states['thermostat.test'] del states['thermostat.test2'] del states['script.can_cancel_this_one'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_INCLUDE: {history.CONF_DOMAINS: ['media_player']}, history.CONF_EXCLUDE: {history.CONF_DOMAINS: ['media_player']}}}) check_significant_states(hass, zero, four, states, config)
7,043,021,192,306,241,000
Test if significant states when excluding and including domains. We should not get back any changes since we include only the media_player domain but also exclude it.
tests/components/history/test_init.py
test_get_significant_states_include_exclude_domain
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_include_exclude_domain(hass_history): 'Test if significant states when excluding and including domains.\n\n We should not get back any changes since we include only the\n media_player domain but also exclude it.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test'] del states['media_player.test2'] del states['media_player.test3'] del states['thermostat.test'] del states['thermostat.test2'] del states['script.can_cancel_this_one'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_INCLUDE: {history.CONF_DOMAINS: ['media_player']}, history.CONF_EXCLUDE: {history.CONF_DOMAINS: ['media_player']}}}) check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_include_exclude_entity(hass_history): 'Test if significant states when excluding and including domains.\n\n We should not get back any changes since we include only\n media_player.test but also exclude it.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test'] del states['media_player.test2'] del states['media_player.test3'] del states['thermostat.test'] del states['thermostat.test2'] del states['script.can_cancel_this_one'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_INCLUDE: {history.CONF_ENTITIES: ['media_player.test']}, history.CONF_EXCLUDE: {history.CONF_ENTITIES: ['media_player.test']}}}) check_significant_states(hass, zero, four, states, config)
-5,239,096,581,611,082,000
Test if significant states when excluding and including domains. We should not get back any changes since we include only media_player.test but also exclude it.
tests/components/history/test_init.py
test_get_significant_states_include_exclude_entity
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_include_exclude_entity(hass_history): 'Test if significant states when excluding and including domains.\n\n We should not get back any changes since we include only\n media_player.test but also exclude it.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test'] del states['media_player.test2'] del states['media_player.test3'] del states['thermostat.test'] del states['thermostat.test2'] del states['script.can_cancel_this_one'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_INCLUDE: {history.CONF_ENTITIES: ['media_player.test']}, history.CONF_EXCLUDE: {history.CONF_ENTITIES: ['media_player.test']}}}) check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_include_exclude(hass_history): 'Test if significant states when in/excluding domains and entities.\n\n We should only get back changes of the media_player.test2 entity.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test'] del states['thermostat.test'] del states['thermostat.test2'] del states['script.can_cancel_this_one'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_INCLUDE: {history.CONF_DOMAINS: ['media_player'], history.CONF_ENTITIES: ['thermostat.test']}, history.CONF_EXCLUDE: {history.CONF_DOMAINS: ['thermostat'], history.CONF_ENTITIES: ['media_player.test']}}}) check_significant_states(hass, zero, four, states, config)
8,020,534,561,754,422,000
Test if significant states when in/excluding domains and entities. We should only get back changes of the media_player.test2 entity.
tests/components/history/test_init.py
test_get_significant_states_include_exclude
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_include_exclude(hass_history): 'Test if significant states when in/excluding domains and entities.\n\n We should only get back changes of the media_player.test2 entity.\n ' hass = hass_history (zero, four, states) = record_states(hass) del states['media_player.test'] del states['thermostat.test'] del states['thermostat.test2'] del states['script.can_cancel_this_one'] config = history.CONFIG_SCHEMA({ha.DOMAIN: {}, history.DOMAIN: {history.CONF_INCLUDE: {history.CONF_DOMAINS: ['media_player'], history.CONF_ENTITIES: ['thermostat.test']}, history.CONF_EXCLUDE: {history.CONF_DOMAINS: ['thermostat'], history.CONF_ENTITIES: ['media_player.test']}}}) check_significant_states(hass, zero, four, states, config)
def test_get_significant_states_are_ordered(hass_history): 'Test order of results from get_significant_states.\n\n When entity ids are given, the results should be returned with the data\n in the same order.\n ' hass = hass_history (zero, four, _states) = record_states(hass) entity_ids = ['media_player.test', 'media_player.test2'] hist = get_significant_states(hass, zero, four, entity_ids, filters=history.Filters()) assert (list(hist.keys()) == entity_ids) entity_ids = ['media_player.test2', 'media_player.test'] hist = get_significant_states(hass, zero, four, entity_ids, filters=history.Filters()) assert (list(hist.keys()) == entity_ids)
3,752,378,976,877,303,000
Test order of results from get_significant_states. When entity ids are given, the results should be returned with the data in the same order.
tests/components/history/test_init.py
test_get_significant_states_are_ordered
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_are_ordered(hass_history): 'Test order of results from get_significant_states.\n\n When entity ids are given, the results should be returned with the data\n in the same order.\n ' hass = hass_history (zero, four, _states) = record_states(hass) entity_ids = ['media_player.test', 'media_player.test2'] hist = get_significant_states(hass, zero, four, entity_ids, filters=history.Filters()) assert (list(hist.keys()) == entity_ids) entity_ids = ['media_player.test2', 'media_player.test'] hist = get_significant_states(hass, zero, four, entity_ids, filters=history.Filters()) assert (list(hist.keys()) == entity_ids)
def test_get_significant_states_only(hass_history): 'Test significant states when significant_states_only is set.' hass = hass_history entity_id = 'sensor.test' def set_state(state, **kwargs): 'Set the state.' hass.states.set(entity_id, state, **kwargs) wait_recording_done(hass) return hass.states.get(entity_id) start = (dt_util.utcnow() - timedelta(minutes=4)) points = [] for i in range(1, 4): points.append((start + timedelta(minutes=i))) states = [] with patch('homeassistant.components.recorder.dt_util.utcnow', return_value=start): set_state('123', attributes={'attribute': 10.64}) with patch('homeassistant.components.recorder.dt_util.utcnow', return_value=points[0]): states.append(set_state('123', attributes={'attribute': 21.42})) with patch('homeassistant.components.recorder.dt_util.utcnow', return_value=points[1]): states.append(set_state('32', attributes={'attribute': 21.42})) with patch('homeassistant.components.recorder.dt_util.utcnow', return_value=points[2]): states.append(set_state('412', attributes={'attribute': 54.23})) hist = get_significant_states(hass, start, significant_changes_only=True) assert (len(hist[entity_id]) == 2) assert (states[0] not in hist[entity_id]) assert (states[1] in hist[entity_id]) assert (states[2] in hist[entity_id]) hist = get_significant_states(hass, start, significant_changes_only=False) assert (len(hist[entity_id]) == 3) assert (states == hist[entity_id])
-7,254,696,558,611,209,000
Test significant states when significant_states_only is set.
tests/components/history/test_init.py
test_get_significant_states_only
0xFEEDC0DE64/homeassistant-core
python
def test_get_significant_states_only(hass_history): hass = hass_history entity_id = 'sensor.test' def set_state(state, **kwargs): 'Set the state.' hass.states.set(entity_id, state, **kwargs) wait_recording_done(hass) return hass.states.get(entity_id) start = (dt_util.utcnow() - timedelta(minutes=4)) points = [] for i in range(1, 4): points.append((start + timedelta(minutes=i))) states = [] with patch('homeassistant.components.recorder.dt_util.utcnow', return_value=start): set_state('123', attributes={'attribute': 10.64}) with patch('homeassistant.components.recorder.dt_util.utcnow', return_value=points[0]): states.append(set_state('123', attributes={'attribute': 21.42})) with patch('homeassistant.components.recorder.dt_util.utcnow', return_value=points[1]): states.append(set_state('32', attributes={'attribute': 21.42})) with patch('homeassistant.components.recorder.dt_util.utcnow', return_value=points[2]): states.append(set_state('412', attributes={'attribute': 54.23})) hist = get_significant_states(hass, start, significant_changes_only=True) assert (len(hist[entity_id]) == 2) assert (states[0] not in hist[entity_id]) assert (states[1] in hist[entity_id]) assert (states[2] in hist[entity_id]) hist = get_significant_states(hass, start, significant_changes_only=False) assert (len(hist[entity_id]) == 3) assert (states == hist[entity_id])
def check_significant_states(hass, zero, four, states, config): 'Check if significant states are retrieved.' filters = history.Filters() exclude = config[history.DOMAIN].get(history.CONF_EXCLUDE) if exclude: filters.excluded_entities = exclude.get(history.CONF_ENTITIES, []) filters.excluded_domains = exclude.get(history.CONF_DOMAINS, []) include = config[history.DOMAIN].get(history.CONF_INCLUDE) if include: filters.included_entities = include.get(history.CONF_ENTITIES, []) filters.included_domains = include.get(history.CONF_DOMAINS, []) hist = get_significant_states(hass, zero, four, filters=filters) assert (states == hist)
-1,065,899,980,731,391,900
Check if significant states are retrieved.
tests/components/history/test_init.py
check_significant_states
0xFEEDC0DE64/homeassistant-core
python
def check_significant_states(hass, zero, four, states, config): filters = history.Filters() exclude = config[history.DOMAIN].get(history.CONF_EXCLUDE) if exclude: filters.excluded_entities = exclude.get(history.CONF_ENTITIES, []) filters.excluded_domains = exclude.get(history.CONF_DOMAINS, []) include = config[history.DOMAIN].get(history.CONF_INCLUDE) if include: filters.included_entities = include.get(history.CONF_ENTITIES, []) filters.included_domains = include.get(history.CONF_DOMAINS, []) hist = get_significant_states(hass, zero, four, filters=filters) assert (states == hist)
def record_states(hass): 'Record some test states.\n\n We inject a bunch of state updates from media player, zone and\n thermostat.\n ' mp = 'media_player.test' mp2 = 'media_player.test2' mp3 = 'media_player.test3' therm = 'thermostat.test' therm2 = 'thermostat.test2' zone = 'zone.home' script_c = 'script.can_cancel_this_one' def set_state(entity_id, state, **kwargs): 'Set the state.' hass.states.set(entity_id, state, **kwargs) wait_recording_done(hass) return hass.states.get(entity_id) zero = dt_util.utcnow() one = (zero + timedelta(seconds=1)) two = (one + timedelta(seconds=1)) three = (two + timedelta(seconds=1)) four = (three + timedelta(seconds=1)) states = {therm: [], therm2: [], mp: [], mp2: [], mp3: [], script_c: []} with patch('homeassistant.components.recorder.dt_util.utcnow', return_value=one): states[mp].append(set_state(mp, 'idle', attributes={'media_title': str(sentinel.mt1)})) states[mp].append(set_state(mp, 'YouTube', attributes={'media_title': str(sentinel.mt2)})) states[mp2].append(set_state(mp2, 'YouTube', attributes={'media_title': str(sentinel.mt2)})) states[mp3].append(set_state(mp3, 'idle', attributes={'media_title': str(sentinel.mt1)})) states[therm].append(set_state(therm, 20, attributes={'current_temperature': 19.5})) with patch('homeassistant.components.recorder.dt_util.utcnow', return_value=two): set_state(mp, 'YouTube', attributes={'media_title': str(sentinel.mt3)}) set_state(zone, 'zoning') states[script_c].append(set_state(script_c, 'off', attributes={'can_cancel': True})) states[therm].append(set_state(therm, 21, attributes={'current_temperature': 19.8})) states[therm2].append(set_state(therm2, 20, attributes={'current_temperature': 19})) with patch('homeassistant.components.recorder.dt_util.utcnow', return_value=three): states[mp].append(set_state(mp, 'Netflix', attributes={'media_title': str(sentinel.mt4)})) states[mp3].append(set_state(mp3, 'Netflix', attributes={'media_title': str(sentinel.mt3)})) states[therm].append(set_state(therm, 21, attributes={'current_temperature': 20})) return (zero, four, states)
-3,874,515,981,259,733,500
Record some test states. We inject a bunch of state updates from media player, zone and thermostat.
tests/components/history/test_init.py
record_states
0xFEEDC0DE64/homeassistant-core
python
def record_states(hass): 'Record some test states.\n\n We inject a bunch of state updates from media player, zone and\n thermostat.\n ' mp = 'media_player.test' mp2 = 'media_player.test2' mp3 = 'media_player.test3' therm = 'thermostat.test' therm2 = 'thermostat.test2' zone = 'zone.home' script_c = 'script.can_cancel_this_one' def set_state(entity_id, state, **kwargs): 'Set the state.' hass.states.set(entity_id, state, **kwargs) wait_recording_done(hass) return hass.states.get(entity_id) zero = dt_util.utcnow() one = (zero + timedelta(seconds=1)) two = (one + timedelta(seconds=1)) three = (two + timedelta(seconds=1)) four = (three + timedelta(seconds=1)) states = {therm: [], therm2: [], mp: [], mp2: [], mp3: [], script_c: []} with patch('homeassistant.components.recorder.dt_util.utcnow', return_value=one): states[mp].append(set_state(mp, 'idle', attributes={'media_title': str(sentinel.mt1)})) states[mp].append(set_state(mp, 'YouTube', attributes={'media_title': str(sentinel.mt2)})) states[mp2].append(set_state(mp2, 'YouTube', attributes={'media_title': str(sentinel.mt2)})) states[mp3].append(set_state(mp3, 'idle', attributes={'media_title': str(sentinel.mt1)})) states[therm].append(set_state(therm, 20, attributes={'current_temperature': 19.5})) with patch('homeassistant.components.recorder.dt_util.utcnow', return_value=two): set_state(mp, 'YouTube', attributes={'media_title': str(sentinel.mt3)}) set_state(zone, 'zoning') states[script_c].append(set_state(script_c, 'off', attributes={'can_cancel': True})) states[therm].append(set_state(therm, 21, attributes={'current_temperature': 19.8})) states[therm2].append(set_state(therm2, 20, attributes={'current_temperature': 19})) with patch('homeassistant.components.recorder.dt_util.utcnow', return_value=three): states[mp].append(set_state(mp, 'Netflix', attributes={'media_title': str(sentinel.mt4)})) states[mp3].append(set_state(mp3, 'Netflix', attributes={'media_title': str(sentinel.mt3)})) states[therm].append(set_state(therm, 21, attributes={'current_temperature': 20})) return (zero, four, states)
async def test_fetch_period_api(hass, hass_client): 'Test the fetch period view for history.' (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}')) assert (response.status == 200)
6,506,475,214,613,486,000
Test the fetch period view for history.
tests/components/history/test_init.py
test_fetch_period_api
0xFEEDC0DE64/homeassistant-core
python
async def test_fetch_period_api(hass, hass_client): (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}')) assert (response.status == 200)
async def test_fetch_period_api_with_use_include_order(hass, hass_client): 'Test the fetch period view for history with include order.' (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {history.DOMAIN: {history.CONF_ORDER: True}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}')) assert (response.status == 200)
-6,330,643,641,254,782,000
Test the fetch period view for history with include order.
tests/components/history/test_init.py
test_fetch_period_api_with_use_include_order
0xFEEDC0DE64/homeassistant-core
python
async def test_fetch_period_api_with_use_include_order(hass, hass_client): (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {history.DOMAIN: {history.CONF_ORDER: True}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}')) assert (response.status == 200)
async def test_fetch_period_api_with_minimal_response(hass, hass_client): 'Test the fetch period view for history with minimal_response.' (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}?minimal_response')) assert (response.status == 200)
8,299,730,011,576,794,000
Test the fetch period view for history with minimal_response.
tests/components/history/test_init.py
test_fetch_period_api_with_minimal_response
0xFEEDC0DE64/homeassistant-core
python
async def test_fetch_period_api_with_minimal_response(hass, hass_client): (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}?minimal_response')) assert (response.status == 200)
async def test_fetch_period_api_with_no_timestamp(hass, hass_client): 'Test the fetch period view for history with no timestamp.' (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get('/api/history/period')) assert (response.status == 200)
4,203,735,023,861,239,000
Test the fetch period view for history with no timestamp.
tests/components/history/test_init.py
test_fetch_period_api_with_no_timestamp
0xFEEDC0DE64/homeassistant-core
python
async def test_fetch_period_api_with_no_timestamp(hass, hass_client): (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get('/api/history/period')) assert (response.status == 200)
async def test_fetch_period_api_with_include_order(hass, hass_client): 'Test the fetch period view for history.' (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {'use_include_order': True, 'include': {'entities': ['light.kitchen']}}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}', params={'filter_entity_id': 'non.existing,something.else'})) assert (response.status == 200)
736,123,220,836,228,400
Test the fetch period view for history.
tests/components/history/test_init.py
test_fetch_period_api_with_include_order
0xFEEDC0DE64/homeassistant-core
python
async def test_fetch_period_api_with_include_order(hass, hass_client): (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {'use_include_order': True, 'include': {'entities': ['light.kitchen']}}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}', params={'filter_entity_id': 'non.existing,something.else'})) assert (response.status == 200)
async def test_fetch_period_api_with_entity_glob_include(hass, hass_client): 'Test the fetch period view for history.' (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {'include': {'entity_globs': ['light.k*']}}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) hass.states.async_set('light.kitchen', 'on') hass.states.async_set('light.cow', 'on') hass.states.async_set('light.nomatch', 'on') (await hass.async_block_till_done()) (await hass.async_add_executor_job(trigger_db_commit, hass)) (await hass.async_block_till_done()) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}')) assert (response.status == 200) response_json = (await response.json()) assert (response_json[0][0]['entity_id'] == 'light.kitchen')
-7,447,962,861,008,547,000
Test the fetch period view for history.
tests/components/history/test_init.py
test_fetch_period_api_with_entity_glob_include
0xFEEDC0DE64/homeassistant-core
python
async def test_fetch_period_api_with_entity_glob_include(hass, hass_client): (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {'include': {'entity_globs': ['light.k*']}}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) hass.states.async_set('light.kitchen', 'on') hass.states.async_set('light.cow', 'on') hass.states.async_set('light.nomatch', 'on') (await hass.async_block_till_done()) (await hass.async_add_executor_job(trigger_db_commit, hass)) (await hass.async_block_till_done()) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}')) assert (response.status == 200) response_json = (await response.json()) assert (response_json[0][0]['entity_id'] == 'light.kitchen')
async def test_fetch_period_api_with_entity_glob_exclude(hass, hass_client): 'Test the fetch period view for history.' (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {'exclude': {'entity_globs': ['light.k*'], 'domains': 'switch', 'entities': 'media_player.test'}}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) hass.states.async_set('light.kitchen', 'on') hass.states.async_set('light.cow', 'on') hass.states.async_set('light.match', 'on') hass.states.async_set('switch.match', 'on') hass.states.async_set('media_player.test', 'on') (await hass.async_block_till_done()) (await hass.async_add_executor_job(trigger_db_commit, hass)) (await hass.async_block_till_done()) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}')) assert (response.status == 200) response_json = (await response.json()) assert (len(response_json) == 2) assert (response_json[0][0]['entity_id'] == 'light.cow') assert (response_json[1][0]['entity_id'] == 'light.match')
135,551,110,886,820,420
Test the fetch period view for history.
tests/components/history/test_init.py
test_fetch_period_api_with_entity_glob_exclude
0xFEEDC0DE64/homeassistant-core
python
async def test_fetch_period_api_with_entity_glob_exclude(hass, hass_client): (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {'exclude': {'entity_globs': ['light.k*'], 'domains': 'switch', 'entities': 'media_player.test'}}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) hass.states.async_set('light.kitchen', 'on') hass.states.async_set('light.cow', 'on') hass.states.async_set('light.match', 'on') hass.states.async_set('switch.match', 'on') hass.states.async_set('media_player.test', 'on') (await hass.async_block_till_done()) (await hass.async_add_executor_job(trigger_db_commit, hass)) (await hass.async_block_till_done()) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}')) assert (response.status == 200) response_json = (await response.json()) assert (len(response_json) == 2) assert (response_json[0][0]['entity_id'] == 'light.cow') assert (response_json[1][0]['entity_id'] == 'light.match')
async def test_fetch_period_api_with_entity_glob_include_and_exclude(hass, hass_client): 'Test the fetch period view for history.' (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {'exclude': {'entity_globs': ['light.many*']}, 'include': {'entity_globs': ['light.m*'], 'domains': 'switch', 'entities': 'media_player.test'}}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) hass.states.async_set('light.kitchen', 'on') hass.states.async_set('light.cow', 'on') hass.states.async_set('light.match', 'on') hass.states.async_set('light.many_state_changes', 'on') hass.states.async_set('switch.match', 'on') hass.states.async_set('media_player.test', 'on') (await hass.async_block_till_done()) (await hass.async_add_executor_job(trigger_db_commit, hass)) (await hass.async_block_till_done()) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}')) assert (response.status == 200) response_json = (await response.json()) assert (len(response_json) == 3) assert (response_json[0][0]['entity_id'] == 'light.match') assert (response_json[1][0]['entity_id'] == 'media_player.test') assert (response_json[2][0]['entity_id'] == 'switch.match')
-8,475,201,651,002,755,000
Test the fetch period view for history.
tests/components/history/test_init.py
test_fetch_period_api_with_entity_glob_include_and_exclude
0xFEEDC0DE64/homeassistant-core
python
async def test_fetch_period_api_with_entity_glob_include_and_exclude(hass, hass_client): (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {'exclude': {'entity_globs': ['light.many*']}, 'include': {'entity_globs': ['light.m*'], 'domains': 'switch', 'entities': 'media_player.test'}}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) hass.states.async_set('light.kitchen', 'on') hass.states.async_set('light.cow', 'on') hass.states.async_set('light.match', 'on') hass.states.async_set('light.many_state_changes', 'on') hass.states.async_set('switch.match', 'on') hass.states.async_set('media_player.test', 'on') (await hass.async_block_till_done()) (await hass.async_add_executor_job(trigger_db_commit, hass)) (await hass.async_block_till_done()) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}')) assert (response.status == 200) response_json = (await response.json()) assert (len(response_json) == 3) assert (response_json[0][0]['entity_id'] == 'light.match') assert (response_json[1][0]['entity_id'] == 'media_player.test') assert (response_json[2][0]['entity_id'] == 'switch.match')
async def test_entity_ids_limit_via_api(hass, hass_client): 'Test limiting history to entity_ids.' (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) hass.states.async_set('light.kitchen', 'on') hass.states.async_set('light.cow', 'on') hass.states.async_set('light.nomatch', 'on') (await hass.async_block_till_done()) (await hass.async_add_executor_job(trigger_db_commit, hass)) (await hass.async_block_till_done()) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}?filter_entity_id=light.kitchen,light.cow')) assert (response.status == 200) response_json = (await response.json()) assert (len(response_json) == 2) assert (response_json[0][0]['entity_id'] == 'light.kitchen') assert (response_json[1][0]['entity_id'] == 'light.cow')
-7,036,330,779,186,314,000
Test limiting history to entity_ids.
tests/components/history/test_init.py
test_entity_ids_limit_via_api
0xFEEDC0DE64/homeassistant-core
python
async def test_entity_ids_limit_via_api(hass, hass_client): (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) hass.states.async_set('light.kitchen', 'on') hass.states.async_set('light.cow', 'on') hass.states.async_set('light.nomatch', 'on') (await hass.async_block_till_done()) (await hass.async_add_executor_job(trigger_db_commit, hass)) (await hass.async_block_till_done()) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}?filter_entity_id=light.kitchen,light.cow')) assert (response.status == 200) response_json = (await response.json()) assert (len(response_json) == 2) assert (response_json[0][0]['entity_id'] == 'light.kitchen') assert (response_json[1][0]['entity_id'] == 'light.cow')
async def test_entity_ids_limit_via_api_with_skip_initial_state(hass, hass_client): 'Test limiting history to entity_ids with skip_initial_state.' (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) hass.states.async_set('light.kitchen', 'on') hass.states.async_set('light.cow', 'on') hass.states.async_set('light.nomatch', 'on') (await hass.async_block_till_done()) (await hass.async_add_executor_job(trigger_db_commit, hass)) (await hass.async_block_till_done()) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}?filter_entity_id=light.kitchen,light.cow&skip_initial_state')) assert (response.status == 200) response_json = (await response.json()) assert (len(response_json) == 0) when = (dt_util.utcnow() - timedelta(minutes=1)) response = (await client.get(f'/api/history/period/{when.isoformat()}?filter_entity_id=light.kitchen,light.cow&skip_initial_state')) assert (response.status == 200) response_json = (await response.json()) assert (len(response_json) == 2) assert (response_json[0][0]['entity_id'] == 'light.kitchen') assert (response_json[1][0]['entity_id'] == 'light.cow')
503,860,346,723,174,000
Test limiting history to entity_ids with skip_initial_state.
tests/components/history/test_init.py
test_entity_ids_limit_via_api_with_skip_initial_state
0xFEEDC0DE64/homeassistant-core
python
async def test_entity_ids_limit_via_api_with_skip_initial_state(hass, hass_client): (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) hass.states.async_set('light.kitchen', 'on') hass.states.async_set('light.cow', 'on') hass.states.async_set('light.nomatch', 'on') (await hass.async_block_till_done()) (await hass.async_add_executor_job(trigger_db_commit, hass)) (await hass.async_block_till_done()) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_client()) response = (await client.get(f'/api/history/period/{dt_util.utcnow().isoformat()}?filter_entity_id=light.kitchen,light.cow&skip_initial_state')) assert (response.status == 200) response_json = (await response.json()) assert (len(response_json) == 0) when = (dt_util.utcnow() - timedelta(minutes=1)) response = (await client.get(f'/api/history/period/{when.isoformat()}?filter_entity_id=light.kitchen,light.cow&skip_initial_state')) assert (response.status == 200) response_json = (await response.json()) assert (len(response_json) == 2) assert (response_json[0][0]['entity_id'] == 'light.kitchen') assert (response_json[1][0]['entity_id'] == 'light.cow')
@pytest.mark.parametrize('units, attributes, state, value', [(IMPERIAL_SYSTEM, POWER_SENSOR_ATTRIBUTES, 10, 10000), (METRIC_SYSTEM, POWER_SENSOR_ATTRIBUTES, 10, 10000), (IMPERIAL_SYSTEM, TEMPERATURE_SENSOR_ATTRIBUTES, 10, 50), (METRIC_SYSTEM, TEMPERATURE_SENSOR_ATTRIBUTES, 10, 10), (IMPERIAL_SYSTEM, PRESSURE_SENSOR_ATTRIBUTES, 1000, 14.503774389728312), (METRIC_SYSTEM, PRESSURE_SENSOR_ATTRIBUTES, 1000, 100000)]) async def test_statistics_during_period(hass, hass_ws_client, units, attributes, state, value): 'Test statistics_during_period.' now = dt_util.utcnow() hass.config.units = units (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {})) (await async_setup_component(hass, 'sensor', {})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) hass.states.async_set('sensor.test', state, attributes=attributes) (await hass.async_block_till_done()) (await hass.async_add_executor_job(trigger_db_commit, hass)) (await hass.async_block_till_done()) hass.data[recorder.DATA_INSTANCE].do_adhoc_statistics(period='hourly', start=now) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_ws_client()) (await client.send_json({'id': 1, 'type': 'history/statistics_during_period', 'start_time': now.isoformat(), 'end_time': now.isoformat(), 'statistic_ids': ['sensor.test']})) response = (await client.receive_json()) assert response['success'] assert (response['result'] == {}) client = (await hass_ws_client()) (await client.send_json({'id': 1, 'type': 'history/statistics_during_period', 'start_time': now.isoformat(), 'statistic_ids': ['sensor.test']})) response = (await client.receive_json()) assert response['success'] assert (response['result'] == {'sensor.test': [{'statistic_id': 'sensor.test', 'start': now.isoformat(), 'mean': approx(value), 'min': approx(value), 'max': approx(value), 'last_reset': None, 'state': None, 'sum': None}]})
-1,807,136,049,217,268,700
Test statistics_during_period.
tests/components/history/test_init.py
test_statistics_during_period
0xFEEDC0DE64/homeassistant-core
python
@pytest.mark.parametrize('units, attributes, state, value', [(IMPERIAL_SYSTEM, POWER_SENSOR_ATTRIBUTES, 10, 10000), (METRIC_SYSTEM, POWER_SENSOR_ATTRIBUTES, 10, 10000), (IMPERIAL_SYSTEM, TEMPERATURE_SENSOR_ATTRIBUTES, 10, 50), (METRIC_SYSTEM, TEMPERATURE_SENSOR_ATTRIBUTES, 10, 10), (IMPERIAL_SYSTEM, PRESSURE_SENSOR_ATTRIBUTES, 1000, 14.503774389728312), (METRIC_SYSTEM, PRESSURE_SENSOR_ATTRIBUTES, 1000, 100000)]) async def test_statistics_during_period(hass, hass_ws_client, units, attributes, state, value): now = dt_util.utcnow() hass.config.units = units (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {})) (await async_setup_component(hass, 'sensor', {})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) hass.states.async_set('sensor.test', state, attributes=attributes) (await hass.async_block_till_done()) (await hass.async_add_executor_job(trigger_db_commit, hass)) (await hass.async_block_till_done()) hass.data[recorder.DATA_INSTANCE].do_adhoc_statistics(period='hourly', start=now) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_ws_client()) (await client.send_json({'id': 1, 'type': 'history/statistics_during_period', 'start_time': now.isoformat(), 'end_time': now.isoformat(), 'statistic_ids': ['sensor.test']})) response = (await client.receive_json()) assert response['success'] assert (response['result'] == {}) client = (await hass_ws_client()) (await client.send_json({'id': 1, 'type': 'history/statistics_during_period', 'start_time': now.isoformat(), 'statistic_ids': ['sensor.test']})) response = (await client.receive_json()) assert response['success'] assert (response['result'] == {'sensor.test': [{'statistic_id': 'sensor.test', 'start': now.isoformat(), 'mean': approx(value), 'min': approx(value), 'max': approx(value), 'last_reset': None, 'state': None, 'sum': None}]})
async def test_statistics_during_period_bad_start_time(hass, hass_ws_client): 'Test statistics_during_period.' (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_ws_client()) (await client.send_json({'id': 1, 'type': 'history/statistics_during_period', 'start_time': 'cats'})) response = (await client.receive_json()) assert (not response['success']) assert (response['error']['code'] == 'invalid_start_time')
-1,543,574,318,116,338,400
Test statistics_during_period.
tests/components/history/test_init.py
test_statistics_during_period_bad_start_time
0xFEEDC0DE64/homeassistant-core
python
async def test_statistics_during_period_bad_start_time(hass, hass_ws_client): (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_ws_client()) (await client.send_json({'id': 1, 'type': 'history/statistics_during_period', 'start_time': 'cats'})) response = (await client.receive_json()) assert (not response['success']) assert (response['error']['code'] == 'invalid_start_time')
async def test_statistics_during_period_bad_end_time(hass, hass_ws_client): 'Test statistics_during_period.' now = dt_util.utcnow() (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_ws_client()) (await client.send_json({'id': 1, 'type': 'history/statistics_during_period', 'start_time': now.isoformat(), 'end_time': 'dogs'})) response = (await client.receive_json()) assert (not response['success']) assert (response['error']['code'] == 'invalid_end_time')
-7,029,948,967,809,684,000
Test statistics_during_period.
tests/components/history/test_init.py
test_statistics_during_period_bad_end_time
0xFEEDC0DE64/homeassistant-core
python
async def test_statistics_during_period_bad_end_time(hass, hass_ws_client): now = dt_util.utcnow() (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {}})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_ws_client()) (await client.send_json({'id': 1, 'type': 'history/statistics_during_period', 'start_time': now.isoformat(), 'end_time': 'dogs'})) response = (await client.receive_json()) assert (not response['success']) assert (response['error']['code'] == 'invalid_end_time')
@pytest.mark.parametrize('units, attributes, unit', [(IMPERIAL_SYSTEM, POWER_SENSOR_ATTRIBUTES, 'W'), (METRIC_SYSTEM, POWER_SENSOR_ATTRIBUTES, 'W'), (IMPERIAL_SYSTEM, TEMPERATURE_SENSOR_ATTRIBUTES, '°F'), (METRIC_SYSTEM, TEMPERATURE_SENSOR_ATTRIBUTES, '°C'), (IMPERIAL_SYSTEM, PRESSURE_SENSOR_ATTRIBUTES, 'psi'), (METRIC_SYSTEM, PRESSURE_SENSOR_ATTRIBUTES, 'Pa')]) async def test_list_statistic_ids(hass, hass_ws_client, units, attributes, unit): 'Test list_statistic_ids.' now = dt_util.utcnow() hass.config.units = units (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {}})) (await async_setup_component(hass, 'sensor', {})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_ws_client()) (await client.send_json({'id': 1, 'type': 'history/list_statistic_ids'})) response = (await client.receive_json()) assert response['success'] assert (response['result'] == []) hass.states.async_set('sensor.test', 10, attributes=attributes) (await hass.async_block_till_done()) (await hass.async_add_executor_job(trigger_db_commit, hass)) (await hass.async_block_till_done()) (await client.send_json({'id': 2, 'type': 'history/list_statistic_ids'})) response = (await client.receive_json()) assert response['success'] assert (response['result'] == [{'statistic_id': 'sensor.test', 'unit_of_measurement': unit}]) hass.data[recorder.DATA_INSTANCE].do_adhoc_statistics(period='hourly', start=now) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) hass.states.async_remove('sensor.test') (await hass.async_block_till_done()) (await client.send_json({'id': 3, 'type': 'history/list_statistic_ids'})) response = (await client.receive_json()) assert response['success'] assert (response['result'] == [{'statistic_id': 'sensor.test', 'unit_of_measurement': unit}]) (await client.send_json({'id': 4, 'type': 'history/list_statistic_ids', 'statistic_type': 'dogs'})) response = (await client.receive_json()) assert (not response['success']) (await client.send_json({'id': 5, 'type': 'history/list_statistic_ids', 'statistic_type': 'mean'})) response = (await client.receive_json()) assert response['success'] assert (response['result'] == [{'statistic_id': 'sensor.test', 'unit_of_measurement': unit}]) (await client.send_json({'id': 6, 'type': 'history/list_statistic_ids', 'statistic_type': 'sum'})) response = (await client.receive_json()) assert response['success'] assert (response['result'] == [])
-4,831,477,666,152,909,000
Test list_statistic_ids.
tests/components/history/test_init.py
test_list_statistic_ids
0xFEEDC0DE64/homeassistant-core
python
@pytest.mark.parametrize('units, attributes, unit', [(IMPERIAL_SYSTEM, POWER_SENSOR_ATTRIBUTES, 'W'), (METRIC_SYSTEM, POWER_SENSOR_ATTRIBUTES, 'W'), (IMPERIAL_SYSTEM, TEMPERATURE_SENSOR_ATTRIBUTES, '°F'), (METRIC_SYSTEM, TEMPERATURE_SENSOR_ATTRIBUTES, '°C'), (IMPERIAL_SYSTEM, PRESSURE_SENSOR_ATTRIBUTES, 'psi'), (METRIC_SYSTEM, PRESSURE_SENSOR_ATTRIBUTES, 'Pa')]) async def test_list_statistic_ids(hass, hass_ws_client, units, attributes, unit): now = dt_util.utcnow() hass.config.units = units (await hass.async_add_executor_job(init_recorder_component, hass)) (await async_setup_component(hass, 'history', {'history': {}})) (await async_setup_component(hass, 'sensor', {})) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) client = (await hass_ws_client()) (await client.send_json({'id': 1, 'type': 'history/list_statistic_ids'})) response = (await client.receive_json()) assert response['success'] assert (response['result'] == []) hass.states.async_set('sensor.test', 10, attributes=attributes) (await hass.async_block_till_done()) (await hass.async_add_executor_job(trigger_db_commit, hass)) (await hass.async_block_till_done()) (await client.send_json({'id': 2, 'type': 'history/list_statistic_ids'})) response = (await client.receive_json()) assert response['success'] assert (response['result'] == [{'statistic_id': 'sensor.test', 'unit_of_measurement': unit}]) hass.data[recorder.DATA_INSTANCE].do_adhoc_statistics(period='hourly', start=now) (await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)) hass.states.async_remove('sensor.test') (await hass.async_block_till_done()) (await client.send_json({'id': 3, 'type': 'history/list_statistic_ids'})) response = (await client.receive_json()) assert response['success'] assert (response['result'] == [{'statistic_id': 'sensor.test', 'unit_of_measurement': unit}]) (await client.send_json({'id': 4, 'type': 'history/list_statistic_ids', 'statistic_type': 'dogs'})) response = (await client.receive_json()) assert (not response['success']) (await client.send_json({'id': 5, 'type': 'history/list_statistic_ids', 'statistic_type': 'mean'})) response = (await client.receive_json()) assert response['success'] assert (response['result'] == [{'statistic_id': 'sensor.test', 'unit_of_measurement': unit}]) (await client.send_json({'id': 6, 'type': 'history/list_statistic_ids', 'statistic_type': 'sum'})) response = (await client.receive_json()) assert response['success'] assert (response['result'] == [])
def set_state(state, **kwargs): 'Set the state.' hass.states.set(entity_id, state, **kwargs) wait_recording_done(hass) return hass.states.get(entity_id)
8,959,891,338,023,652,000
Set the state.
tests/components/history/test_init.py
set_state
0xFEEDC0DE64/homeassistant-core
python
def set_state(state, **kwargs): hass.states.set(entity_id, state, **kwargs) wait_recording_done(hass) return hass.states.get(entity_id)
def set_state(entity_id, state, **kwargs): 'Set the state.' hass.states.set(entity_id, state, **kwargs) wait_recording_done(hass) return hass.states.get(entity_id)
960,452,124,161,646,300
Set the state.
tests/components/history/test_init.py
set_state
0xFEEDC0DE64/homeassistant-core
python
def set_state(entity_id, state, **kwargs): hass.states.set(entity_id, state, **kwargs) wait_recording_done(hass) return hass.states.get(entity_id)
def test_xml_conversion(self): "\n Test that converted XML hasn't changed from saved version.\n " tree_string = StringIO.StringIO() self.pdf.tree.write(tree_string, pretty_print=True, encoding='utf-8') tree_string = tree_string.getvalue() comparison_file = ('tests/saved_output/IRS_1040A_output%s.xml' % ('_python_2.6' if ((sys.version_info[0] == 2) and (sys.version_info[1] < 7)) else '')) with open(comparison_file, 'rb') as f: saved_string = f.read() if (tree_string != saved_string): with open('tests/failed_output.xml', 'wb') as out: out.write(tree_string) self.fail(('XML conversion of sample pdf has changed! Compare %s to tests/failed_output.xml.' % comparison_file))
8,067,755,005,227,209,000
Test that converted XML hasn't changed from saved version.
tests/tests.py
test_xml_conversion
leeoniya/pdfquery
python
def test_xml_conversion(self): "\n \n " tree_string = StringIO.StringIO() self.pdf.tree.write(tree_string, pretty_print=True, encoding='utf-8') tree_string = tree_string.getvalue() comparison_file = ('tests/saved_output/IRS_1040A_output%s.xml' % ('_python_2.6' if ((sys.version_info[0] == 2) and (sys.version_info[1] < 7)) else )) with open(comparison_file, 'rb') as f: saved_string = f.read() if (tree_string != saved_string): with open('tests/failed_output.xml', 'wb') as out: out.write(tree_string) self.fail(('XML conversion of sample pdf has changed! Compare %s to tests/failed_output.xml.' % comparison_file))
def test_selectors(self): '\n Test the :contains and :in_bbox selectors.\n ' label = self.pdf.pq('LTTextLineHorizontal:contains("Your first name and initial")') self.assertEqual(len(label), 1) left_corner = float(label.attr('x0')) self.assertEqual(left_corner, 143.651) bottom_corner = float(label.attr('y0')) self.assertEqual(bottom_corner, 714.694) name = self.pdf.pq(('LTTextLineHorizontal:in_bbox("%s, %s, %s, %s")' % (left_corner, (bottom_corner - 30), (left_corner + 150), bottom_corner))).text() self.assertEqual(name, 'John E.')
-5,185,528,845,370,750,000
Test the :contains and :in_bbox selectors.
tests/tests.py
test_selectors
leeoniya/pdfquery
python
def test_selectors(self): '\n \n ' label = self.pdf.pq('LTTextLineHorizontal:contains("Your first name and initial")') self.assertEqual(len(label), 1) left_corner = float(label.attr('x0')) self.assertEqual(left_corner, 143.651) bottom_corner = float(label.attr('y0')) self.assertEqual(bottom_corner, 714.694) name = self.pdf.pq(('LTTextLineHorizontal:in_bbox("%s, %s, %s, %s")' % (left_corner, (bottom_corner - 30), (left_corner + 150), bottom_corner))).text() self.assertEqual(name, 'John E.')
def test_extract(self): '\n Test the extract() function.\n ' values = self.pdf.extract([('with_parent', 'LTPage[pageid="1"]'), ('with_formatter', 'text'), ('last_name', 'LTTextLineHorizontal:in_bbox("315,680,395,700")'), ('spouse', 'LTTextLineHorizontal:in_bbox("170,650,220,680")'), ('with_parent', 'LTPage[pageid="2"]'), ('oath', 'LTTextLineHorizontal:contains("perjury")', (lambda match: (match.text()[:30] + '...'))), ('year', 'LTTextLineHorizontal:contains("Form 1040A (")', (lambda match: int(match.text()[(- 5):(- 1)])))]) self.assertDictEqual(values, {'last_name': 'Michaels', 'spouse': 'Susan R.', 'oath': u'Under penalties of perjury, I ...', 'year': 2007})
7,389,041,808,124,743,000
Test the extract() function.
tests/tests.py
test_extract
leeoniya/pdfquery
python
def test_extract(self): '\n \n ' values = self.pdf.extract([('with_parent', 'LTPage[pageid="1"]'), ('with_formatter', 'text'), ('last_name', 'LTTextLineHorizontal:in_bbox("315,680,395,700")'), ('spouse', 'LTTextLineHorizontal:in_bbox("170,650,220,680")'), ('with_parent', 'LTPage[pageid="2"]'), ('oath', 'LTTextLineHorizontal:contains("perjury")', (lambda match: (match.text()[:30] + '...'))), ('year', 'LTTextLineHorizontal:contains("Form 1040A (")', (lambda match: int(match.text()[(- 5):(- 1)])))]) self.assertDictEqual(values, {'last_name': 'Michaels', 'spouse': 'Susan R.', 'oath': u'Under penalties of perjury, I ...', 'year': 2007})
def test_xml_conversion(self): "\n Test that converted XML hasn't changed from saved version.\n " tree_string = StringIO.StringIO() self.pdf.tree.write(tree_string, pretty_print=True, encoding='utf-8') tree_string = tree_string.getvalue() comparison_file = 'tests/saved_output/bug28.xml' with open(comparison_file, 'rb') as f: saved_string = f.read() if (tree_string != saved_string): with open('tests/failed_output.xml', 'wb') as out: out.write(tree_string) self.fail(('XML conversion of sample pdf has changed! Compare %s to tests/failed_output.xml.' % comparison_file))
-5,659,162,527,995,697,000
Test that converted XML hasn't changed from saved version.
tests/tests.py
test_xml_conversion
leeoniya/pdfquery
python
def test_xml_conversion(self): "\n \n " tree_string = StringIO.StringIO() self.pdf.tree.write(tree_string, pretty_print=True, encoding='utf-8') tree_string = tree_string.getvalue() comparison_file = 'tests/saved_output/bug28.xml' with open(comparison_file, 'rb') as f: saved_string = f.read() if (tree_string != saved_string): with open('tests/failed_output.xml', 'wb') as out: out.write(tree_string) self.fail(('XML conversion of sample pdf has changed! Compare %s to tests/failed_output.xml.' % comparison_file))
def run_episode(batch, agent_models, batch_size, testing, render=False, initial_agent=0): '\n turning testing on means, we disable stochasticity: always pick the argmax\n ' s = State(**batch) sieve = AliveSieve(batch_size=batch_size) actions_by_timestep = [] alive_masks = [] rewards = torch.zeros(batch_size, 3, device=FLAGS.device) num_steps = torch.full((batch_size,), FLAGS.max_timesteps, dtype=torch.int64, device=FLAGS.device) term_matches_argmax_count = 0 utt_matches_argmax_count = 0 utt_stochastic_draws = 0 num_policy_runs = 0 prop_matches_argmax_count = 0 prop_stochastic_draws = 0 utt_mask = torch.zeros(2, batch_size, 3, dtype=torch.int64, device=FLAGS.device) prop_mask = torch.zeros(2, batch_size, 3, dtype=torch.int64, device=FLAGS.device) entropy_loss_by_agent = [torch.zeros(1, device=FLAGS.device), torch.zeros(1, device=FLAGS.device)] if render: print(' ') print(' ', '{} {} {}'.format(*s.utilities[0][0].tolist()), ' ', '{} {} {}'.format(*s.pool[0].tolist()), ' ', '{} {} {}'.format(*s.utilities[0][1].tolist())) current_A_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device) prev_A_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device) current_A_message = torch.zeros(sieve.batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device) prev_A_message = torch.zeros(sieve.batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device) current_A_term = torch.zeros(sieve.batch_size, 1, dtype=torch.uint8) for t in range(FLAGS.max_timesteps): if FLAGS.linguistic: if (FLAGS.normal_form and ((t % 2) == 1)): _prev_message = prev_A_message else: _prev_message = s.m_prev else: _prev_message = torch.zeros(sieve.batch_size, 6, dtype=torch.int64, device=FLAGS.device) if FLAGS.proposal: if (FLAGS.normal_form and ((t % 2) == 1)): _prev_proposal = prev_A_proposal else: _prev_proposal = s.last_proposal else: _prev_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device) agent = ((initial_agent + t) % 2) agent_model = agent_models[agent] (nodes, term_a, s.m_prev, this_proposal, _entropy_loss, _term_matches_argmax_count, _utt_matches_argmax_count, _utt_stochastic_draws, _prop_matches_argmax_count, _prop_stochastic_draws, _utt_mask, _prop_mask) = agent_model(pool=s.pool, utility=s.utilities[:, agent], m_prev=_prev_message, prev_proposal=_prev_proposal, testing=testing) entropy_loss_by_agent[agent] += _entropy_loss actions_by_timestep.append(nodes) term_matches_argmax_count += _term_matches_argmax_count num_policy_runs += sieve.batch_size utt_matches_argmax_count += _utt_matches_argmax_count utt_stochastic_draws += _utt_stochastic_draws prop_matches_argmax_count += _prop_matches_argmax_count prop_stochastic_draws += _prop_stochastic_draws if FLAGS.force_masking_comm: utt_mask[agent][sieve.out_idxes] |= _utt_mask prop_mask[agent][sieve.out_idxes] |= _prop_mask if (FLAGS.proposal_termination and (not FLAGS.normal_form)): term_a = torch.prod((this_proposal == _prev_proposal), dim=1, keepdim=True) elif ((not FLAGS.proposal_termination) and FLAGS.normal_form): if ((t % 2) == 1): term_a = (term_a * current_A_term) else: current_A_term = term_a term_a = torch.zeros((sieve.batch_size, 1), dtype=torch.uint8, device=FLAGS.device) elif (FLAGS.proposal_termination and FLAGS.normal_form): if ((t % 2) == 1): term_a = torch.prod((this_proposal == current_A_proposal), dim=1, keepdim=True) else: term_a = torch.zeros((sieve.batch_size, 1), dtype=torch.uint8, device=FLAGS.device) if (render and (sieve.out_idxes[0] == 0)): render_action(t=t, s=s, term=term_a, prop=this_proposal) new_rewards = calc_rewards(t=t, s=s, term=term_a, agent=agent) rewards[sieve.out_idxes] = new_rewards s.last_proposal = this_proposal if (FLAGS.normal_form and ((t % 2) == 0)): prev_A_proposal = current_A_proposal current_A_proposal = this_proposal prev_A_message = current_A_message current_A_message = s.m_prev sieve.mark_dead(term_a) sieve.mark_dead(((t + 1) >= s.N)) alive_masks.append(sieve.alive_mask.clone()) sieve.set_dead_global(num_steps, (t + 1)) if sieve.all_dead(): break s.sieve_(sieve.alive_idxes) if FLAGS.normal_form: current_A_proposal = current_A_proposal[sieve.alive_idxes] prev_A_proposal = prev_A_proposal[sieve.alive_idxes] current_A_message = current_A_message[sieve.alive_idxes] prev_A_message = prev_A_message[sieve.alive_idxes] sieve.self_sieve_() if render: print(' rewards: {:2.2f} {:2.2f} {:2.2f}'.format(*rewards[0].tolist())) print(' ') utt_mask_count = utt_mask.sum(dim=[1, 2]).cpu().numpy() prop_mask_count = prop_mask.sum(dim=[1, 2]).cpu().numpy() return (actions_by_timestep, rewards, num_steps, alive_masks, entropy_loss_by_agent, term_matches_argmax_count, num_policy_runs, utt_matches_argmax_count, utt_stochastic_draws, prop_matches_argmax_count, prop_stochastic_draws, utt_mask_count, prop_mask_count)
458,638,067,147,089,600
turning testing on means, we disable stochasticity: always pick the argmax
src/ecn.py
run_episode
mnoukhov/ecn
python
def run_episode(batch, agent_models, batch_size, testing, render=False, initial_agent=0): '\n \n ' s = State(**batch) sieve = AliveSieve(batch_size=batch_size) actions_by_timestep = [] alive_masks = [] rewards = torch.zeros(batch_size, 3, device=FLAGS.device) num_steps = torch.full((batch_size,), FLAGS.max_timesteps, dtype=torch.int64, device=FLAGS.device) term_matches_argmax_count = 0 utt_matches_argmax_count = 0 utt_stochastic_draws = 0 num_policy_runs = 0 prop_matches_argmax_count = 0 prop_stochastic_draws = 0 utt_mask = torch.zeros(2, batch_size, 3, dtype=torch.int64, device=FLAGS.device) prop_mask = torch.zeros(2, batch_size, 3, dtype=torch.int64, device=FLAGS.device) entropy_loss_by_agent = [torch.zeros(1, device=FLAGS.device), torch.zeros(1, device=FLAGS.device)] if render: print(' ') print(' ', '{} {} {}'.format(*s.utilities[0][0].tolist()), ' ', '{} {} {}'.format(*s.pool[0].tolist()), ' ', '{} {} {}'.format(*s.utilities[0][1].tolist())) current_A_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device) prev_A_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device) current_A_message = torch.zeros(sieve.batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device) prev_A_message = torch.zeros(sieve.batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device) current_A_term = torch.zeros(sieve.batch_size, 1, dtype=torch.uint8) for t in range(FLAGS.max_timesteps): if FLAGS.linguistic: if (FLAGS.normal_form and ((t % 2) == 1)): _prev_message = prev_A_message else: _prev_message = s.m_prev else: _prev_message = torch.zeros(sieve.batch_size, 6, dtype=torch.int64, device=FLAGS.device) if FLAGS.proposal: if (FLAGS.normal_form and ((t % 2) == 1)): _prev_proposal = prev_A_proposal else: _prev_proposal = s.last_proposal else: _prev_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device) agent = ((initial_agent + t) % 2) agent_model = agent_models[agent] (nodes, term_a, s.m_prev, this_proposal, _entropy_loss, _term_matches_argmax_count, _utt_matches_argmax_count, _utt_stochastic_draws, _prop_matches_argmax_count, _prop_stochastic_draws, _utt_mask, _prop_mask) = agent_model(pool=s.pool, utility=s.utilities[:, agent], m_prev=_prev_message, prev_proposal=_prev_proposal, testing=testing) entropy_loss_by_agent[agent] += _entropy_loss actions_by_timestep.append(nodes) term_matches_argmax_count += _term_matches_argmax_count num_policy_runs += sieve.batch_size utt_matches_argmax_count += _utt_matches_argmax_count utt_stochastic_draws += _utt_stochastic_draws prop_matches_argmax_count += _prop_matches_argmax_count prop_stochastic_draws += _prop_stochastic_draws if FLAGS.force_masking_comm: utt_mask[agent][sieve.out_idxes] |= _utt_mask prop_mask[agent][sieve.out_idxes] |= _prop_mask if (FLAGS.proposal_termination and (not FLAGS.normal_form)): term_a = torch.prod((this_proposal == _prev_proposal), dim=1, keepdim=True) elif ((not FLAGS.proposal_termination) and FLAGS.normal_form): if ((t % 2) == 1): term_a = (term_a * current_A_term) else: current_A_term = term_a term_a = torch.zeros((sieve.batch_size, 1), dtype=torch.uint8, device=FLAGS.device) elif (FLAGS.proposal_termination and FLAGS.normal_form): if ((t % 2) == 1): term_a = torch.prod((this_proposal == current_A_proposal), dim=1, keepdim=True) else: term_a = torch.zeros((sieve.batch_size, 1), dtype=torch.uint8, device=FLAGS.device) if (render and (sieve.out_idxes[0] == 0)): render_action(t=t, s=s, term=term_a, prop=this_proposal) new_rewards = calc_rewards(t=t, s=s, term=term_a, agent=agent) rewards[sieve.out_idxes] = new_rewards s.last_proposal = this_proposal if (FLAGS.normal_form and ((t % 2) == 0)): prev_A_proposal = current_A_proposal current_A_proposal = this_proposal prev_A_message = current_A_message current_A_message = s.m_prev sieve.mark_dead(term_a) sieve.mark_dead(((t + 1) >= s.N)) alive_masks.append(sieve.alive_mask.clone()) sieve.set_dead_global(num_steps, (t + 1)) if sieve.all_dead(): break s.sieve_(sieve.alive_idxes) if FLAGS.normal_form: current_A_proposal = current_A_proposal[sieve.alive_idxes] prev_A_proposal = prev_A_proposal[sieve.alive_idxes] current_A_message = current_A_message[sieve.alive_idxes] prev_A_message = prev_A_message[sieve.alive_idxes] sieve.self_sieve_() if render: print(' rewards: {:2.2f} {:2.2f} {:2.2f}'.format(*rewards[0].tolist())) print(' ') utt_mask_count = utt_mask.sum(dim=[1, 2]).cpu().numpy() prop_mask_count = prop_mask.sum(dim=[1, 2]).cpu().numpy() return (actions_by_timestep, rewards, num_steps, alive_masks, entropy_loss_by_agent, term_matches_argmax_count, num_policy_runs, utt_matches_argmax_count, utt_stochastic_draws, prop_matches_argmax_count, prop_stochastic_draws, utt_mask_count, prop_mask_count)
def safe_div(a, b): '\n returns a / b, unless b is zero, in which case returns 0\n this is primarily for usage in cases where b might be systemtically zero, eg because comms are disabled or similar\n also accounts for a or b being tensors\n ' if isinstance(a, torch.Tensor): a = a.item() if isinstance(b, torch.Tensor): b = b.item() return (0 if (b == 0) else (a / b))
3,609,836,066,754,855,400
returns a / b, unless b is zero, in which case returns 0 this is primarily for usage in cases where b might be systemtically zero, eg because comms are disabled or similar also accounts for a or b being tensors
src/ecn.py
safe_div
mnoukhov/ecn
python
def safe_div(a, b): '\n returns a / b, unless b is zero, in which case returns 0\n this is primarily for usage in cases where b might be systemtically zero, eg because comms are disabled or similar\n also accounts for a or b being tensors\n ' if isinstance(a, torch.Tensor): a = a.item() if isinstance(b, torch.Tensor): b = b.item() return (0 if (b == 0) else (a / b))
def run(args): '\n testing option will:\n - use argmax, ie disable stochastic draws\n - not run optimizers\n - not save model\n ' if args.wandb: if args.wandb_offline: os.environ['WANDB_MODE'] = 'dryrun' wandb.init(project='ecn', name=args.name, dir=f'{args.savedir}', group=args.wandb_group) wandb.config.update(args) wandb.config.update(FLAGS) flags_dict = {flag.name: flag.value for flag in FLAGS.flags_by_module_dict()['main.py']} args_dict = args.__dict__ pprint(args_dict) pprint(flags_dict) os.makedirs(args.model_dir, exist_ok=True) os.makedirs(args.logdir, exist_ok=True) if (args.seed is not None): np.random.seed(args.seed) torch.manual_seed(args.seed) train_r = np.random.RandomState(args.seed) else: train_r = np.random test_r = np.random.RandomState(args.test_seed) test_batches = generate_test_batches(batch_size=args.batch_size, num_batches=5, random_state=test_r) test_hashes = hash_batches(test_batches) episode = 0 start_time = time.time() agent_models = [] agent_opts = [] agent_name = ['A', 'B'] for i in range(2): model = AgentModel(name=agent_name[i], term_entropy_reg=args.term_entropy_reg, utterance_entropy_reg=args.utterance_entropy_reg, proposal_entropy_reg=args.proposal_entropy_reg).to(FLAGS.device) agent_models.append(model) agent_opts.append(optim.Adam(params=agent_models[i].parameters())) if args.wandb: wandb.watch(agent_models) if (path.isfile(args.model_file) and (not args.no_load)): (episode, start_time) = load_model(model_file=args.model_file, agent_models=agent_models, agent_opts=agent_opts) print('loaded model') elif args.testing: print('') print('ERROR: must have loadable model to use --testing option') print('') return last_print = time.time() rewards_sum = torch.zeros(3, device=FLAGS.device) steps_sum = 0 count_sum = 0 f_log = open(args.log_file, 'w') all_args = {**args_dict, **flags_dict} f_log.write(('meta: %s\n' % json.dumps(all_args))) last_save = time.time() baseline = torch.zeros(3, device=FLAGS.device) term_matches_argmax_count = 0 num_policy_runs = 0 utt_matches_argmax_count = 0 utt_stochastic_draws = 0 prop_matches_argmax_count = 0 prop_stochastic_draws = 0 utt_mask_count = np.array([0, 0]) prop_mask_count = np.array([0, 0]) while (episode < args.episodes): render = ((episode % args.render_every_episode) == 0) split = (2 if FLAGS.randomize_first else 1) agent_losses = [0, 0] both_rewards = [] for i in range(2): agent_opts[i].zero_grad() for initial_agent in range(split): batch = generate_training_batch(batch_size=(args.batch_size // split), test_hashes=test_hashes, random_state=train_r) (actions, rewards, steps, alive_masks, entropy_loss_by_agent, _term_matches_argmax_count, _num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws, _prop_matches_argmax_count, _prop_stochastic_draws, _utt_mask_count, _prop_mask_count) = run_episode(batch=batch, agent_models=agent_models, batch_size=(args.batch_size // split), render=render, initial_agent=initial_agent, testing=args.testing) term_matches_argmax_count += _term_matches_argmax_count utt_matches_argmax_count += _utt_matches_argmax_count utt_stochastic_draws += _utt_stochastic_draws num_policy_runs += _num_policy_runs prop_matches_argmax_count += _prop_matches_argmax_count prop_stochastic_draws += _prop_stochastic_draws utt_mask_count += _utt_mask_count prop_mask_count += _prop_mask_count if (not args.testing): reward_loss_by_agent = [0, 0] baselined_rewards = (rewards - baseline) rewards_by_agent = [] for i in range(2): if FLAGS.prosocial: rewards_by_agent.append(baselined_rewards[:, 2]) else: rewards_by_agent.append(baselined_rewards[:, i]) sieve_playback = SievePlayback(alive_masks) for (t, global_idxes) in sieve_playback: agent = ((initial_agent + t) % 2) if (len(actions[t]) > 0): for action in actions[t]: _rewards = rewards_by_agent[agent] _reward = _rewards[global_idxes].float().contiguous().view(sieve_playback.batch_size, 1) _reward_loss = (- (action * _reward)) _reward_loss = _reward_loss.sum() reward_loss_by_agent[agent] += _reward_loss for i in range(2): loss = (entropy_loss_by_agent[i] + reward_loss_by_agent[i]) loss.backward() rewards_sum += rewards.detach().sum(0) steps_sum += steps.sum() count_sum += (args.batch_size // split) both_rewards.append(rewards) for i in range(2): agent_opts[i].step() rewards = torch.cat(both_rewards).detach() baseline = ((0.7 * baseline) + (0.3 * rewards.mean(0).detach())) if render: '\n run the test batches, print the results\n ' test_rewards_sum = np.zeros(3) test_count_sum = (len(test_batches) * args.batch_size) test_num_policy_runs = 0 test_utt_mask_count = [0, 0] test_prop_mask_count = [0, 0] test_utt_mask_count = np.array([0, 0]) test_prop_mask_count = np.array([0, 0]) for test_batch in test_batches: (actions, test_rewards, steps, alive_masks, entropy_loss_by_agent, _term_matches_argmax_count, _test_num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws, _prop_matches_argmax_count, _prop_stochastic_draws, _test_utt_mask_count, _test_prop_mask_count) = run_episode(batch=test_batch, agent_models=agent_models, batch_size=args.batch_size, render=True, testing=True) test_rewards_sum += test_rewards.sum(0).cpu().numpy() test_num_policy_runs += _test_num_policy_runs test_utt_mask_count += _test_utt_mask_count test_prop_mask_count += _test_prop_mask_count time_since_last = (time.time() - last_print) rewards_str = ('%.2f,%.2f,%.2f' % ((rewards_sum[0] / count_sum), (rewards_sum[1] / count_sum), (rewards_sum[2] / count_sum))) test_rewards_str = ('%.2f,%.2f,%.2f' % ((test_rewards_sum[0] / test_count_sum), (test_rewards_sum[1] / test_count_sum), (test_rewards_sum[2] / test_count_sum))) baseline_str = ('%.2f,%.2f,%.2f' % (baseline[0], baseline[1], baseline[2])) utt_mask_pct = (utt_mask_count / (3 * count_sum)) test_utt_mask_pct = (test_utt_mask_count / (3 * test_count_sum)) prop_mask_pct = (prop_mask_count / (3 * count_sum)) test_prop_mask_pct = (test_prop_mask_count / (3 * test_count_sum)) print('test {}'.format(test_rewards_str)) print('train {}'.format(rewards_str)) print('base {}'.format(baseline_str)) print('ep {}, {} games/sec, {:2.2f} avg steps'.format(episode, int((count_sum / time_since_last)), (steps_sum.item() / count_sum))) print('argmaxp term={:4.4f} utt={:4.4f} prop={:4.4f}'.format((term_matches_argmax_count / num_policy_runs), safe_div(utt_matches_argmax_count, utt_stochastic_draws), (prop_matches_argmax_count / prop_stochastic_draws))) if FLAGS.force_masking_comm: print('utt mask % {:2.2f},{:2.2f} test % {:2.2f},{:2.2f}'.format(*utt_mask_pct, *test_utt_mask_pct)) print('prop mask % {:2.2f},{:2.2f} test % {:2.2f},{:2.2f}'.format(*prop_mask_pct, *test_prop_mask_pct)) episode_log = {'episode': episode, 'avg_reward_A': (rewards_sum[0] / count_sum).item(), 'avg_reward_B': (rewards_sum[1] / count_sum).item(), 'avg_reward_0': (rewards_sum[2] / count_sum).item(), 'test_reward_A': (test_rewards_sum[0] / test_count_sum).item(), 'test_reward_B': (test_rewards_sum[1] / test_count_sum).item(), 'test_reward': (test_rewards_sum[2] / test_count_sum).item(), 'avg_steps': torch.true_divide(steps_sum, count_sum).item(), 'games_sec': (count_sum / time_since_last), 'elapsed': (time.time() - start_time), 'argmaxp_term': (term_matches_argmax_count / num_policy_runs), 'argmaxp_utt': safe_div(utt_matches_argmax_count, utt_stochastic_draws), 'argmaxp_prop': (prop_matches_argmax_count / prop_stochastic_draws), 'utt_unmasked_A': utt_mask_pct[0], 'utt_unmasked_B': utt_mask_pct[1], 'prop_unmasked_A': prop_mask_pct[0], 'prop_unmasked_B': prop_mask_pct[1], 'test_utt_unmasked_A': test_utt_mask_pct[0], 'test_utt_unmasked_B': test_utt_mask_pct[1], 'test_prop_unmasked_A': test_prop_mask_pct[0], 'test_prop_unmasked_B': test_prop_mask_pct[1]} f_log.write((json.dumps(episode_log) + '\n')) f_log.flush() if args.wandb: wandb.log(episode_log) last_print = time.time() steps_sum = 0 rewards_sum.fill_(0) term_matches_argmax_count = 0 num_policy_runs = 0 utt_matches_argmax_count = 0 utt_stochastic_draws = 0 prop_matches_argmax_count = 0 prop_stochastic_draws = 0 count_sum = 0 utt_mask_count.fill(0) prop_mask_count.fill(0) if ((not args.testing) and (not args.no_save) and (episode > 0) and ((episode % args.save_every_episode) == 0)): save_model(model_file=args.model_file, agent_models=agent_models, agent_opts=agent_opts, start_time=start_time, episode=episode) print('saved model') episode += 1 if ((not args.no_save) and (not args.testing)): save_model(model_file=args.model_file, agent_models=agent_models, agent_opts=agent_opts, start_time=start_time, episode=episode) print('saved model') f_log.close()
-173,498,744,607,043,500
testing option will: - use argmax, ie disable stochastic draws - not run optimizers - not save model
src/ecn.py
run
mnoukhov/ecn
python
def run(args): '\n testing option will:\n - use argmax, ie disable stochastic draws\n - not run optimizers\n - not save model\n ' if args.wandb: if args.wandb_offline: os.environ['WANDB_MODE'] = 'dryrun' wandb.init(project='ecn', name=args.name, dir=f'{args.savedir}', group=args.wandb_group) wandb.config.update(args) wandb.config.update(FLAGS) flags_dict = {flag.name: flag.value for flag in FLAGS.flags_by_module_dict()['main.py']} args_dict = args.__dict__ pprint(args_dict) pprint(flags_dict) os.makedirs(args.model_dir, exist_ok=True) os.makedirs(args.logdir, exist_ok=True) if (args.seed is not None): np.random.seed(args.seed) torch.manual_seed(args.seed) train_r = np.random.RandomState(args.seed) else: train_r = np.random test_r = np.random.RandomState(args.test_seed) test_batches = generate_test_batches(batch_size=args.batch_size, num_batches=5, random_state=test_r) test_hashes = hash_batches(test_batches) episode = 0 start_time = time.time() agent_models = [] agent_opts = [] agent_name = ['A', 'B'] for i in range(2): model = AgentModel(name=agent_name[i], term_entropy_reg=args.term_entropy_reg, utterance_entropy_reg=args.utterance_entropy_reg, proposal_entropy_reg=args.proposal_entropy_reg).to(FLAGS.device) agent_models.append(model) agent_opts.append(optim.Adam(params=agent_models[i].parameters())) if args.wandb: wandb.watch(agent_models) if (path.isfile(args.model_file) and (not args.no_load)): (episode, start_time) = load_model(model_file=args.model_file, agent_models=agent_models, agent_opts=agent_opts) print('loaded model') elif args.testing: print() print('ERROR: must have loadable model to use --testing option') print() return last_print = time.time() rewards_sum = torch.zeros(3, device=FLAGS.device) steps_sum = 0 count_sum = 0 f_log = open(args.log_file, 'w') all_args = {**args_dict, **flags_dict} f_log.write(('meta: %s\n' % json.dumps(all_args))) last_save = time.time() baseline = torch.zeros(3, device=FLAGS.device) term_matches_argmax_count = 0 num_policy_runs = 0 utt_matches_argmax_count = 0 utt_stochastic_draws = 0 prop_matches_argmax_count = 0 prop_stochastic_draws = 0 utt_mask_count = np.array([0, 0]) prop_mask_count = np.array([0, 0]) while (episode < args.episodes): render = ((episode % args.render_every_episode) == 0) split = (2 if FLAGS.randomize_first else 1) agent_losses = [0, 0] both_rewards = [] for i in range(2): agent_opts[i].zero_grad() for initial_agent in range(split): batch = generate_training_batch(batch_size=(args.batch_size // split), test_hashes=test_hashes, random_state=train_r) (actions, rewards, steps, alive_masks, entropy_loss_by_agent, _term_matches_argmax_count, _num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws, _prop_matches_argmax_count, _prop_stochastic_draws, _utt_mask_count, _prop_mask_count) = run_episode(batch=batch, agent_models=agent_models, batch_size=(args.batch_size // split), render=render, initial_agent=initial_agent, testing=args.testing) term_matches_argmax_count += _term_matches_argmax_count utt_matches_argmax_count += _utt_matches_argmax_count utt_stochastic_draws += _utt_stochastic_draws num_policy_runs += _num_policy_runs prop_matches_argmax_count += _prop_matches_argmax_count prop_stochastic_draws += _prop_stochastic_draws utt_mask_count += _utt_mask_count prop_mask_count += _prop_mask_count if (not args.testing): reward_loss_by_agent = [0, 0] baselined_rewards = (rewards - baseline) rewards_by_agent = [] for i in range(2): if FLAGS.prosocial: rewards_by_agent.append(baselined_rewards[:, 2]) else: rewards_by_agent.append(baselined_rewards[:, i]) sieve_playback = SievePlayback(alive_masks) for (t, global_idxes) in sieve_playback: agent = ((initial_agent + t) % 2) if (len(actions[t]) > 0): for action in actions[t]: _rewards = rewards_by_agent[agent] _reward = _rewards[global_idxes].float().contiguous().view(sieve_playback.batch_size, 1) _reward_loss = (- (action * _reward)) _reward_loss = _reward_loss.sum() reward_loss_by_agent[agent] += _reward_loss for i in range(2): loss = (entropy_loss_by_agent[i] + reward_loss_by_agent[i]) loss.backward() rewards_sum += rewards.detach().sum(0) steps_sum += steps.sum() count_sum += (args.batch_size // split) both_rewards.append(rewards) for i in range(2): agent_opts[i].step() rewards = torch.cat(both_rewards).detach() baseline = ((0.7 * baseline) + (0.3 * rewards.mean(0).detach())) if render: '\n run the test batches, print the results\n ' test_rewards_sum = np.zeros(3) test_count_sum = (len(test_batches) * args.batch_size) test_num_policy_runs = 0 test_utt_mask_count = [0, 0] test_prop_mask_count = [0, 0] test_utt_mask_count = np.array([0, 0]) test_prop_mask_count = np.array([0, 0]) for test_batch in test_batches: (actions, test_rewards, steps, alive_masks, entropy_loss_by_agent, _term_matches_argmax_count, _test_num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws, _prop_matches_argmax_count, _prop_stochastic_draws, _test_utt_mask_count, _test_prop_mask_count) = run_episode(batch=test_batch, agent_models=agent_models, batch_size=args.batch_size, render=True, testing=True) test_rewards_sum += test_rewards.sum(0).cpu().numpy() test_num_policy_runs += _test_num_policy_runs test_utt_mask_count += _test_utt_mask_count test_prop_mask_count += _test_prop_mask_count time_since_last = (time.time() - last_print) rewards_str = ('%.2f,%.2f,%.2f' % ((rewards_sum[0] / count_sum), (rewards_sum[1] / count_sum), (rewards_sum[2] / count_sum))) test_rewards_str = ('%.2f,%.2f,%.2f' % ((test_rewards_sum[0] / test_count_sum), (test_rewards_sum[1] / test_count_sum), (test_rewards_sum[2] / test_count_sum))) baseline_str = ('%.2f,%.2f,%.2f' % (baseline[0], baseline[1], baseline[2])) utt_mask_pct = (utt_mask_count / (3 * count_sum)) test_utt_mask_pct = (test_utt_mask_count / (3 * test_count_sum)) prop_mask_pct = (prop_mask_count / (3 * count_sum)) test_prop_mask_pct = (test_prop_mask_count / (3 * test_count_sum)) print('test {}'.format(test_rewards_str)) print('train {}'.format(rewards_str)) print('base {}'.format(baseline_str)) print('ep {}, {} games/sec, {:2.2f} avg steps'.format(episode, int((count_sum / time_since_last)), (steps_sum.item() / count_sum))) print('argmaxp term={:4.4f} utt={:4.4f} prop={:4.4f}'.format((term_matches_argmax_count / num_policy_runs), safe_div(utt_matches_argmax_count, utt_stochastic_draws), (prop_matches_argmax_count / prop_stochastic_draws))) if FLAGS.force_masking_comm: print('utt mask % {:2.2f},{:2.2f} test % {:2.2f},{:2.2f}'.format(*utt_mask_pct, *test_utt_mask_pct)) print('prop mask % {:2.2f},{:2.2f} test % {:2.2f},{:2.2f}'.format(*prop_mask_pct, *test_prop_mask_pct)) episode_log = {'episode': episode, 'avg_reward_A': (rewards_sum[0] / count_sum).item(), 'avg_reward_B': (rewards_sum[1] / count_sum).item(), 'avg_reward_0': (rewards_sum[2] / count_sum).item(), 'test_reward_A': (test_rewards_sum[0] / test_count_sum).item(), 'test_reward_B': (test_rewards_sum[1] / test_count_sum).item(), 'test_reward': (test_rewards_sum[2] / test_count_sum).item(), 'avg_steps': torch.true_divide(steps_sum, count_sum).item(), 'games_sec': (count_sum / time_since_last), 'elapsed': (time.time() - start_time), 'argmaxp_term': (term_matches_argmax_count / num_policy_runs), 'argmaxp_utt': safe_div(utt_matches_argmax_count, utt_stochastic_draws), 'argmaxp_prop': (prop_matches_argmax_count / prop_stochastic_draws), 'utt_unmasked_A': utt_mask_pct[0], 'utt_unmasked_B': utt_mask_pct[1], 'prop_unmasked_A': prop_mask_pct[0], 'prop_unmasked_B': prop_mask_pct[1], 'test_utt_unmasked_A': test_utt_mask_pct[0], 'test_utt_unmasked_B': test_utt_mask_pct[1], 'test_prop_unmasked_A': test_prop_mask_pct[0], 'test_prop_unmasked_B': test_prop_mask_pct[1]} f_log.write((json.dumps(episode_log) + '\n')) f_log.flush() if args.wandb: wandb.log(episode_log) last_print = time.time() steps_sum = 0 rewards_sum.fill_(0) term_matches_argmax_count = 0 num_policy_runs = 0 utt_matches_argmax_count = 0 utt_stochastic_draws = 0 prop_matches_argmax_count = 0 prop_stochastic_draws = 0 count_sum = 0 utt_mask_count.fill(0) prop_mask_count.fill(0) if ((not args.testing) and (not args.no_save) and (episode > 0) and ((episode % args.save_every_episode) == 0)): save_model(model_file=args.model_file, agent_models=agent_models, agent_opts=agent_opts, start_time=start_time, episode=episode) print('saved model') episode += 1 if ((not args.no_save) and (not args.testing)): save_model(model_file=args.model_file, agent_models=agent_models, agent_opts=agent_opts, start_time=start_time, episode=episode) print('saved model') f_log.close()
@cached_property def openapi_types(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' return {'name': (str,), 'query': (str,)}
6,119,635,577,640,988,000
This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type.
src/datadog_api_client/v2/model/security_filter_exclusion_filter.py
openapi_types
rchenzheng/datadog-api-client-python
python
@cached_property def openapi_types(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' return {'name': (str,), 'query': (str,)}
@convert_js_args_to_python_args def __init__(self, name, query, *args, **kwargs): 'SecurityFilterExclusionFilter - a model defined in OpenAPI\n\n Args:\n name (str): Exclusion filter name.\n query (str): Exclusion filter query. Logs that match this query are excluded from the security filter.\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.name = name self.query = query for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value)
412,282,417,252,474,600
SecurityFilterExclusionFilter - a model defined in OpenAPI Args: name (str): Exclusion filter name. query (str): Exclusion filter query. Logs that match this query are excluded from the security filter. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,)
src/datadog_api_client/v2/model/security_filter_exclusion_filter.py
__init__
rchenzheng/datadog-api-client-python
python
@convert_js_args_to_python_args def __init__(self, name, query, *args, **kwargs): 'SecurityFilterExclusionFilter - a model defined in OpenAPI\n\n Args:\n name (str): Exclusion filter name.\n query (str): Exclusion filter query. Logs that match this query are excluded from the security filter.\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.name = name self.query = query for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value)
@property def name(self): 'Plugin name\n \n Single word string describing the image format.\n Typical names: file, dicom, xnat\n ' return self.__name
-2,094,496,552,525,548,800
Plugin name Single word string describing the image format. Typical names: file, dicom, xnat
src/imagedata/transports/abstracttransport.py
name
erling6232/imagedata
python
@property def name(self): 'Plugin name\n \n Single word string describing the image format.\n Typical names: file, dicom, xnat\n ' return self.__name
@property def description(self): 'Plugin description\n \n Single line string describing the transport method.\n ' return self.__description
-8,066,041,259,654,710,000
Plugin description Single line string describing the transport method.
src/imagedata/transports/abstracttransport.py
description
erling6232/imagedata
python
@property def description(self): 'Plugin description\n \n Single line string describing the transport method.\n ' return self.__description
@property def authors(self): 'Plugin authors\n \n Multi-line string naming the author(s) of the plugin.\n ' return self.__authors
95,379,626,103,641,780
Plugin authors Multi-line string naming the author(s) of the plugin.
src/imagedata/transports/abstracttransport.py
authors
erling6232/imagedata
python
@property def authors(self): 'Plugin authors\n \n Multi-line string naming the author(s) of the plugin.\n ' return self.__authors
@property def version(self): 'Plugin version\n \n String giving the plugin version.\n Version scheme: 1.0.0\n ' return self.__version
1,751,831,071,949,942,000
Plugin version String giving the plugin version. Version scheme: 1.0.0
src/imagedata/transports/abstracttransport.py
version
erling6232/imagedata
python
@property def version(self): 'Plugin version\n \n String giving the plugin version.\n Version scheme: 1.0.0\n ' return self.__version
@property def url(self): 'Plugin URL\n \n URL string to the site of the plugin or the author(s).\n ' return self.__url
-8,174,879,225,647,273,000
Plugin URL URL string to the site of the plugin or the author(s).
src/imagedata/transports/abstracttransport.py
url
erling6232/imagedata
python
@property def url(self): 'Plugin URL\n \n URL string to the site of the plugin or the author(s).\n ' return self.__url
@property def schemes(self): 'List of transport schemes supported by this plugin.\n \n List of strings.\n ' return self.__schemes
5,426,225,605,520,074,000
List of transport schemes supported by this plugin. List of strings.
src/imagedata/transports/abstracttransport.py
schemes
erling6232/imagedata
python
@property def schemes(self): 'List of transport schemes supported by this plugin.\n \n List of strings.\n ' return self.__schemes
@abstractmethod def walk(self, top): 'Generate the file names in a directory tree by walking the tree.\n Input:\n - top: starting point for walk (str)\n Return:\n - tuples of (root, dirs, files) \n ' pass
-4,428,548,912,652,581,400
Generate the file names in a directory tree by walking the tree. Input: - top: starting point for walk (str) Return: - tuples of (root, dirs, files)
src/imagedata/transports/abstracttransport.py
walk
erling6232/imagedata
python
@abstractmethod def walk(self, top): 'Generate the file names in a directory tree by walking the tree.\n Input:\n - top: starting point for walk (str)\n Return:\n - tuples of (root, dirs, files) \n ' pass
@abstractmethod def isfile(self, path): 'Return True if path is an existing regular file.\n ' pass
-6,828,466,188,773,404,000
Return True if path is an existing regular file.
src/imagedata/transports/abstracttransport.py
isfile
erling6232/imagedata
python
@abstractmethod def isfile(self, path): '\n ' pass
@abstractmethod def open(self, path, mode='r'): 'Extract a member from the archive as a file-like object.\n ' pass
7,691,118,222,133,349,000
Extract a member from the archive as a file-like object.
src/imagedata/transports/abstracttransport.py
open
erling6232/imagedata
python
@abstractmethod def open(self, path, mode='r'): '\n ' pass
@abstractmethod def close(self): 'Close the transport\n ' pass
190,842,864,219,467,870
Close the transport
src/imagedata/transports/abstracttransport.py
close
erling6232/imagedata
python
@abstractmethod def close(self): '\n ' pass
@abstractmethod def info(self, path) -> str: 'Return info describing the object\n\n Args:\n path (str): object path\n\n Returns:\n description (str): Preferably a one-line string describing the object\n ' pass
-3,224,270,684,163,788,300
Return info describing the object Args: path (str): object path Returns: description (str): Preferably a one-line string describing the object
src/imagedata/transports/abstracttransport.py
info
erling6232/imagedata
python
@abstractmethod def info(self, path) -> str: 'Return info describing the object\n\n Args:\n path (str): object path\n\n Returns:\n description (str): Preferably a one-line string describing the object\n ' pass
def __init__(self, repository_structure=None, last_updated=None): 'TWCRepositoryInfoResponse - a model defined in OpenAPI' self._repository_structure = None self._last_updated = None self.discriminator = None self.repository_structure = repository_structure self.last_updated = last_updated
8,527,874,243,199,928,000
TWCRepositoryInfoResponse - a model defined in OpenAPI
iqs_client/models/twc_repository_info_response.py
__init__
thomas-bc/mms-autocref
python
def __init__(self, repository_structure=None, last_updated=None): self._repository_structure = None self._last_updated = None self.discriminator = None self.repository_structure = repository_structure self.last_updated = last_updated
@property def repository_structure(self): 'Gets the repository_structure of this TWCRepositoryInfoResponse. # noqa: E501\n\n\n :return: The repository_structure of this TWCRepositoryInfoResponse. # noqa: E501\n :rtype: TWCRepositoryStructure\n ' return self._repository_structure
7,800,254,736,595,410,000
Gets the repository_structure of this TWCRepositoryInfoResponse. # noqa: E501 :return: The repository_structure of this TWCRepositoryInfoResponse. # noqa: E501 :rtype: TWCRepositoryStructure
iqs_client/models/twc_repository_info_response.py
repository_structure
thomas-bc/mms-autocref
python
@property def repository_structure(self): 'Gets the repository_structure of this TWCRepositoryInfoResponse. # noqa: E501\n\n\n :return: The repository_structure of this TWCRepositoryInfoResponse. # noqa: E501\n :rtype: TWCRepositoryStructure\n ' return self._repository_structure
@repository_structure.setter def repository_structure(self, repository_structure): 'Sets the repository_structure of this TWCRepositoryInfoResponse.\n\n\n :param repository_structure: The repository_structure of this TWCRepositoryInfoResponse. # noqa: E501\n :type: TWCRepositoryStructure\n ' if (repository_structure is None): raise ValueError('Invalid value for `repository_structure`, must not be `None`') self._repository_structure = repository_structure
-1,284,167,347,242,605,600
Sets the repository_structure of this TWCRepositoryInfoResponse. :param repository_structure: The repository_structure of this TWCRepositoryInfoResponse. # noqa: E501 :type: TWCRepositoryStructure
iqs_client/models/twc_repository_info_response.py
repository_structure
thomas-bc/mms-autocref
python
@repository_structure.setter def repository_structure(self, repository_structure): 'Sets the repository_structure of this TWCRepositoryInfoResponse.\n\n\n :param repository_structure: The repository_structure of this TWCRepositoryInfoResponse. # noqa: E501\n :type: TWCRepositoryStructure\n ' if (repository_structure is None): raise ValueError('Invalid value for `repository_structure`, must not be `None`') self._repository_structure = repository_structure
@property def last_updated(self): 'Gets the last_updated of this TWCRepositoryInfoResponse. # noqa: E501\n\n\n :return: The last_updated of this TWCRepositoryInfoResponse. # noqa: E501\n :rtype: str\n ' return self._last_updated
5,827,634,590,501,253,000
Gets the last_updated of this TWCRepositoryInfoResponse. # noqa: E501 :return: The last_updated of this TWCRepositoryInfoResponse. # noqa: E501 :rtype: str
iqs_client/models/twc_repository_info_response.py
last_updated
thomas-bc/mms-autocref
python
@property def last_updated(self): 'Gets the last_updated of this TWCRepositoryInfoResponse. # noqa: E501\n\n\n :return: The last_updated of this TWCRepositoryInfoResponse. # noqa: E501\n :rtype: str\n ' return self._last_updated
@last_updated.setter def last_updated(self, last_updated): 'Sets the last_updated of this TWCRepositoryInfoResponse.\n\n\n :param last_updated: The last_updated of this TWCRepositoryInfoResponse. # noqa: E501\n :type: str\n ' if (last_updated is None): raise ValueError('Invalid value for `last_updated`, must not be `None`') self._last_updated = last_updated
-2,645,519,415,813,660,000
Sets the last_updated of this TWCRepositoryInfoResponse. :param last_updated: The last_updated of this TWCRepositoryInfoResponse. # noqa: E501 :type: str
iqs_client/models/twc_repository_info_response.py
last_updated
thomas-bc/mms-autocref
python
@last_updated.setter def last_updated(self, last_updated): 'Sets the last_updated of this TWCRepositoryInfoResponse.\n\n\n :param last_updated: The last_updated of this TWCRepositoryInfoResponse. # noqa: E501\n :type: str\n ' if (last_updated is None): raise ValueError('Invalid value for `last_updated`, must not be `None`') self._last_updated = last_updated
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
8,442,519,487,048,767,000
Returns the model properties as a dict
iqs_client/models/twc_repository_info_response.py
to_dict
thomas-bc/mms-autocref
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
5,849,158,643,760,736,000
Returns the string representation of the model
iqs_client/models/twc_repository_info_response.py
to_str
thomas-bc/mms-autocref
python
def to_str(self): return pprint.pformat(self.to_dict())
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
-8,960,031,694,814,905,000
For `print` and `pprint`
iqs_client/models/twc_repository_info_response.py
__repr__
thomas-bc/mms-autocref
python
def __repr__(self): return self.to_str()
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, TWCRepositoryInfoResponse)): return False return (self.__dict__ == other.__dict__)
-3,533,175,412,495,738,400
Returns true if both objects are equal
iqs_client/models/twc_repository_info_response.py
__eq__
thomas-bc/mms-autocref
python
def __eq__(self, other): if (not isinstance(other, TWCRepositoryInfoResponse)): return False return (self.__dict__ == other.__dict__)
def __ne__(self, other): 'Returns true if both objects are not equal' return (not (self == other))
7,764,124,047,908,058,000
Returns true if both objects are not equal
iqs_client/models/twc_repository_info_response.py
__ne__
thomas-bc/mms-autocref
python
def __ne__(self, other): return (not (self == other))
def visualize_scenario(scenario, cps=None): '\n Creates an HTML visualization of the given scenario. Starts a simple web server at localhost:8000 (blocking).\n :param scenario: Either a list of worlds, each world representing a single scene or a single world representing a\n whole scenario\n :param cps: A list of criticality phenomena which optionally to visualize as well.\n :return: The path to the directory in which to find the created HTML visualization.\n ' pl_html = [] scenario_inst = None if (cps is None): cps = [] if (type(scenario) == list): scenes = [scene_world.search(type=auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scene_world).Scene)[0] for scene_world in scenario] elif ((type(scenario) == owlready2.namespace.World) or (type(scenario) == owlready2.World)): tm = auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scenario) scenario_inst = scenario.search(type=tm.Scenario)[0] scenes = list(filter((lambda x: (tm.Scene in x.is_a)), scenario_inst.has_traffic_model)) else: raise ValueError scenes = sorted(scenes, key=(lambda x: x.inTimePosition[0].numericPosition[0])) title = 'Scenario' if (scenario_inst and hasattr(scenario_inst, 'identifier') and (len(scenario_inst.identifier) > 0)): title += (' ' + str(scenario_inst.identifier[0])) scenario_info = (('(' + str(len(scenes))) + ' Scenes)') html_body = (((((((((('<!DOCTYPE html>\n<html>\n <head>\n <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">\n <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.bundle.min.js" integrity="sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p" crossorigin="anonymous"></script>\n <script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>\n <meta charset="utf-8">' + scenario_css) + '\n <title>') + title) + '</title>\n </head>\n <body>\n <div class="d-flex flex-row justify-content-center"><div class="mt-3 py-1 px-6 alert alert-info" style="display: inline-block" role="alert"><center><h5>') + title) + ' ') + scenario_info) + '</h5></center></div></div>\n <div class="slidecontainer m-2">\n <input type="range" min="1" max="') + str(len(scenes))) + '" value="1" class="slider" id="myRange">\n </div>\n <script>\n var slider = document.getElementById("myRange");\n var last_set = 1\n var show_all_cps = true\n slider.oninput = function() {\n var output = document.getElementById("plt" + this.value);\n var last_output = document.getElementById("plt" + last_set);\n last_output.style.display = \'none\';\n output.style.display = \'block\';\n last_set = this.value\n }\n function toggle_cps_all_iframes() {\n show_all_cps = !show_all_cps\n $(".cp-all-button").each(function(i) {\n if (show_all_cps) {\n this.parentElement.classList.add("active")\n this.checked = true\n } else {\n this.parentElement.classList.remove("active")\n this.checked = false\n }\n })\n $(".cp-button").each(function(i) {\n if (show_all_cps) {\n this.parentElement.classList.add("active")\n this.checked = true\n } else {\n this.parentElement.classList.remove("active")\n this.checked = false\n }\n })\n $(".scene-plot").each(function(i) {\n this.contentWindow.toggle_cps(show_all_cps)\n })\n }\n function toggle_cp_class(ele, cp_cls_id) {\n // 0. disable automatically checked checkbox (will be added again at step 3)\n ele.checked = !ele.checked\n // 1. find active scene plot\n active_scene = $(".scene-plot-container").filter(function(i) {\n return this.style.display !== "none"\n })[0]\n // 2. get CP pred. str for given cp_cls_id\n cp_pred = active_scene.getElementsByClassName("scene-plot")[0].contentWindow.cp_predicates[cp_cls_id]\n // 3. Toggle all buttons for this CP pred\n $("label > span:contains(" + cp_pred + ")").each(function(i) {\n this.parentElement.classList.toggle("active")\n this.parentElement.querySelector(".cp-button").checked = !this.parentElement.querySelector(".cp-button").checked\n })\n // 4. check if (and where) CP pred. str is present in cp_predicates, pass the resulting index\n $(".scene-plot").each(function(k) {\n cp_cls_id_scene = -1\n for (var i = 0; i < this.contentWindow.cp_predicates.length; i++) {\n if (cp_pred === this.contentWindow.cp_predicates[i]) {\n cp_cls_id_scene = i\n }\n }\n if (cp_cls_id_scene >= 0) {\n this.contentWindow.toggle_cp_class(cp_cls_id_scene, ele.checked)\n }\n })\n }\n </script>\n ') pl_html.append(html_body) iframes = [] def get_color(p): _LUMA_LIMIT = 170 color = 0 luma = _LUMA_LIMIT while (luma >= _LUMA_LIMIT): color = random.randrange(0, 16777215, 15) luma = (((0.2126 * ((color >> 16) & 255)) + (0.7152 * ((color >> 8) & 255))) + (0.0722 * ((color >> 0) & 255))) return ('#' + ('%06x' % color)) for (i, scene) in enumerate(scenes): logger.info(((('Plotting scene ' + str((i + 1))) + ' / ') + str(len(scenes)))) scene_cps = [cp for cp in cps if cp.is_representable_in_scene(scene)] cp_colors = list(map(get_color, range(len([x for c in scene_cps for x in c.subjects])))) cp_color = 0 no_geo_entities = [] width = 24.5 height = 10 try: primary_screens = list(filter((lambda x: x.is_primary), screeninfo.get_monitors())) if (len(primary_screens) > 0): width = ((primary_screens[0].width_mm / 25.4) * 0.73) height = ((primary_screens[0].height_mm / 25.4) * 0.73) except screeninfo.common.ScreenInfoError: logger.info((((('No screens found, using default plot size of ' + str(width)) + ' in x ') + str(height)) + ' in')) fig = plt.figure(figsize=(width, height)) plt.axis('equal') entity_labels = [] entity_relations = [] relations_per_cp_class = dict() cps_relations = [] cps_for_tooltips = [] centroids_x = [] centroids_y = [] plotted_labels = [] entity_points = dict() traffic_entities = tqdm(scene.has_traffic_entity) for entity in traffic_entities: traffic_entities.set_description(str(entity)) if (len(entity.hasGeometry) > 0): for geo in entity.hasGeometry: shape = wkt.loads(geo.asWKT[0]) entity_cp_relations = [] points = None if hasattr(shape, 'exterior'): points = shape.exterior.xy try: hasattr(shape, 'coords') points = shape.coords.xy except NotImplementedError: pass if points: if (np.isclose(centroids_x, shape.centroid.x) & np.isclose(centroids_y, shape.centroid.y)).any(): x = (shape.centroid.x + 0.0) y = (shape.centroid.y + 0.8) plt.plot((shape.centroid.x, x), (shape.centroid.y, y), 'k-') else: x = shape.centroid.x y = shape.centroid.y entity_points[entity] = (x, y) centroids_x.append(x) centroids_y.append(y) plt.plot(*points, alpha=0.6) if (auto.auto.get_ontology(auto.auto.Ontology.Physics, scenario).Dynamical_Object in entity.INDIRECT_is_a): plt.fill(*points, alpha=0.3) if (entity.has_yaw is not None): x_dir = (0.9 * math.cos(math.radians(entity.has_yaw))) y_dir = (0.9 * math.sin(math.radians(entity.has_yaw))) plt.arrow(shape.centroid.x, shape.centroid.y, dx=x_dir, dy=y_dir, shape='full', length_includes_head=True, color='gray', alpha=0.6, head_width=1) entity_labels.append(_describe_entity(entity)) entity_scene_cps = list(filter((lambda scp: (entity in scp.subjects)), scene_cps)) if (len(entity_scene_cps) > 0): plt.plot(x, y, 'o', color='r', mec='k', markersize=3, alpha=1) ent_color = 'red' else: ent_color = 'black' if (entity.identifier and (len(entity.identifier) > 0) and (not entity.is_persistent) and (not (isinstance(entity.identifier[0], str) and entity.identifier[0].startswith('repr')))): plt.annotate(entity.identifier[0], ((x + 0.2), (y + 0.2)), color=ent_color) already_drawn_cps = [] for cp in entity_scene_cps: if (cp.predicate not in relations_per_cp_class.keys()): relations_per_cp_class[cp.predicate] = [] for cp in entity_scene_cps: if (cp not in already_drawn_cps): same_line_cps = [x for x in entity_scene_cps if ([y for z in x.objects.values() for y in z] == [y for z in cp.objects.values() for y in z])] labels = [(x.predicate.split('(')[0], (x.predicate.split('(')[1].replace(')', ''), str(x))) for x in same_line_cps] already_drawn_cps += same_line_cps subj_x = x subj_y = y for objs in cp.objects.values(): for obj in objs: if (len(obj.hasGeometry) > 0): if (obj in entity_points.keys()): obj_x = entity_points[obj][0] obj_y = entity_points[obj][1] else: geom_o = wkt.loads(obj.hasGeometry[0].asWKT[0]) obj_x = geom_o.centroid.x obj_y = geom_o.centroid.y m = ((obj_y - subj_y) / (obj_x - subj_x)) b = (subj_y - (m * subj_x)) head_width = 0.2 head_length = (1.5 * head_width) arrow = plt.arrow(subj_x, subj_y, dx=(obj_x - subj_x), dy=(obj_y - subj_y), color=cp_colors[cp_color], shape='full', length_includes_head=True, head_width=head_width, head_length=head_length) if (len(labels[0]) > 1): label_row = ' '.join([label[0] for label in labels]) else: label_row = labels[0] x_offset = (((len(label_row) * 0.055) / 2) - 0.055) if (subj_x > obj_x): label_x = ((obj_x + (abs((subj_x - obj_x)) / 2)) - x_offset) else: label_x = ((obj_x - (abs((subj_x - obj_x)) / 2)) - x_offset) a = math.degrees(math.atan(m)) for (l_i, label) in enumerate(labels): label_string = label[0].replace('CP_', '') label_len = ((len(label_string) * 0.09) + 0.1) label_x_offset = abs((math.cos(math.atan(m)) * label_len)) while True: label_y = (((m * label_x) + b) + 0.05) label_x_1 = ((label_x - (label_x_offset / 2)) + 0.05) label_y_1 = ((m * label_x_1) + b) label_x_2 = ((label_x + (label_x_offset / 2)) + 0.05) label_y_2 = ((m * label_x_2) + b) label_line1 = geometry.LineString([(label_x_1, label_y_1), (label_x_2, label_y_2)]) new_bb = label_line1.buffer(0.1, cap_style=2) new_bb_rect = list(zip(*new_bb.exterior.xy))[:(- 1)] if ((not _AVOID_LABEL_COLLISIONS) or (not _has_collision_with_bbs(plotted_labels, new_bb_rect))): break label_x += (label_x_offset / 10) annot = plt.annotate(label_string, (label_x, label_y), color=cp_colors[cp_color], rotation=a, fontsize=2, rotation_mode='anchor') entity_cp_relations.append(annot) cps_relations.append(annot) relations_per_cp_class[same_line_cps[l_i].predicate] += [annot, arrow] cps_for_tooltips.append(same_line_cps[l_i]) plotted_labels.append(new_bb_rect) label_x += label_x_offset subj_x = obj_x subj_y = obj_y entity_cp_relations += [arrow] cp_color = ((cp_color + 1) % len(cp_colors)) entity_relations.append(entity_cp_relations) elif (len(set([str(y) for y in entity.INDIRECT_is_a]).intersection(_NO_PRINTING_CLASSES)) == 0): no_geo_entities.append(_describe_entity(entity)) logger.info('Done with layout, creating MPLD3 plot, JS plugins, and HTML string') pl2 = plt.plot(centroids_x, centroids_y, 'o', color='b', mec='k', markersize=2, mew=1, alpha=0.4) tooltip_individuals = ToolTipAndClickInfo(pl2[0], labels=entity_labels, targets=entity_relations, targets_per_cp=relations_per_cp_class) fig.tight_layout() mpld3.plugins.connect(fig, tooltip_individuals) for (h, cp_text) in enumerate(cps_relations): tooltip_cp = CPTooltip(cp_text, cps_for_tooltips[h]) mpld3.plugins.connect(fig, tooltip_cp) html = (('\n\t\t<div class="container-fluid scene-plot-container" id="plt' + str((i + 1))) + '" style ="') if (i != 0): html += 'display: none;' html += '">' html += '\n <div class="row">\n <div class="col-md-1">\n ' cp_count_total = len([x for x in cps if ((isinstance(x.traffic_model, list) and (scene in x.traffic_model)) or (x.traffic_model == scenario_inst))]) html += ('<div class="">\n <label class="btn btn-primary active" style="margin-bottom: 10px; width: %s">\n <input type="checkbox" class="cp-all-button" id="cp-all-button-%s" autocomplete="off" onclick="toggle_cps_all_iframes();" checked>\n <span>Show all criticality phenomena (%s)</span>\n </label>' % ('100%', str(i), str(cp_count_total))) for (l, pred) in enumerate(sorted(relations_per_cp_class.keys(), key=natural_sort_key)): cp_count = len([x for x in cps if ((x.predicate == pred) and ((isinstance(x.traffic_model, list) and (scene in x.traffic_model)) or (x.traffic_model == scenario_inst)))]) html += ('\n <br />\n <label class="btn btn-secondary active" style="margin-bottom: 5px; width: %s">\n <input type="checkbox" class="cp-button" id="cp-button-%s-%s" autocomplete="off" onclick="toggle_cp_class(this, %s);" checked>\n <span>%s (%s)</span>\n </label>' % ('100%', str(i), str(l), str(l), pred, str(cp_count))) html += '\n </div>\n </div>\n <div class="col-md-11">\n ' html += '<div class="embed-responsive embed-responsive-16by9">\n' html += (((('\t\t\t\t\t\t<iframe class="scene-plot" src="scene' + str((i + 1))) + '.html" class="embed-responsive-item" style="width: 100%; height: ') + str((height * 1.27))) + 'in" allowfullscreen></iframe>\n\t\t\t\t\t</div>\n') iframe_html = '<!DOCTYPE html>\n<html>\n <head>\n <meta charset="utf-8">\n <meta HTTP-EQUIV="Access-Control-Allow-Origin" CONTENT="localhost">\n <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">\n <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.bundle.min.js" integrity="sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p" crossorigin="anonymous"></script>\n <script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>\n </head>\n <body>' iframe_html += scene_css iframe_html += '\n <div class="d-flex flex-row justify-content-center">\n <div class="btn-group btn-group-toggle" data-bs-toggle="buttons">\n <label class="btn btn-secondary active">\n <input type="checkbox" id="tooltip_button" checked autocomplete="off" onclick="toggle_tooltips(this);"> Show tooltip with information of individuals\n </label>\n <label class="btn btn-secondary active">\n <input type="checkbox" id="descr_button" checked autocomplete="off" onclick="toggle_all_ind_relations(this);"> Show full individual relations in tooltip\n </label>\n </div>\n </div>\n <script>\n var show_tooltips = true\n var show_long_ind = true\n cps = []\n cp_targets = []\n cp_targets_per_class = []\n function toggle_tooltips(ele) {\n ele.parentElement.classList.toggle("active")\n show_tooltips = !show_tooltips\n }\n function toggle_all_ind_relations(ele) {\n ele.parentElement.classList.toggle("active")\n show_long_ind = !show_long_ind\n }\n function toggle_cp_targets(targets, state) {\n for (let j = 0; j < targets.length; j++) {\n var x = mpld3.get_element(targets[j])\n if (x) {\n if ("path" in x) {\n tog = x.path\n } else if ("obj" in x) {\n tog = x.obj\n }\n for (var k = 0; k < tog._groups.length; k++) {\n for (var l = 0; l < tog._groups[k].length; l++){\n if (state) {\n tog._groups[k][l].style.display = "block"\n } else {\n tog._groups[k][l].style.display = "none"\n }\n }\n }\n }\n }\n }\n function toggle_cps(state) {\n for (let i = 0; i < cp_targets.length; i++) {\n toggle_cp_targets(cp_targets[i], state)\n }\n }\n function toggle_cp_class(cp_class, state) {\n targets = cp_targets_per_class[cp_class]\n toggle_cp_targets(targets, state)\n }\n </script>\n <div class="card m-2">\n <div class="card-title d-flex flex-row justify-content-center m-1">\n <h5>' if ((len(scene.inTimePosition) > 0) and (len(scene.inTimePosition[0].numericPosition) > 0)): time = ('%.2f s' % scene.inTimePosition[0].numericPosition[0]) if (scenario_inst and (len(scenario_inst.hasEnd) > 0) and (len(scenario_inst.hasEnd[0].inTimePosition) > 0) and (len(scenario_inst.hasEnd[0].inTimePosition[0].numericPosition) > 0)): time += (' / %.2f s' % scenario_inst.hasEnd[0].inTimePosition[0].numericPosition[0]) else: time += (' / ' + str(len(scenes))) else: time = ((str(i) + ' / ') + str(len(scenes))) iframe_html += (('Scene ' + time) + '<br />') iframe_html += '\n </h5>\n </div>\n <div class="card-body m-0 p-0 d-flex justify-content-center">\n ' scene_html = mpld3.fig_to_html(fig) iframe_html += ''.join(((('\t\t' + line) + '\n') for line in scene_html.splitlines())) iframe_html += '\n </div>\n </div>' if (len(no_geo_entities) > 0): iframe_html += ('\n <div class="d-flex flex-row justify-content-center">\n <a class="btn btn-primary" data-bs-toggle="collapse" href="#noGeoCollapse" role="button" aria-expanded="false" aria-controls="noGeoCollapse">\n Show scene individuals with no geometric representation (%s)\n </a>\n </div>\n <div class="container-fluid collapse" id="noGeoCollapse">\n <div class="card card-body m-2">' % str(len(no_geo_entities))) iframe_html += ''.join(no_geo_entities) iframe_html += '\n </div>\n </div>' iframe_html += '\t</body>\n</html>' iframes.append(iframe_html) html += '\t\t\t\t</div>\n\t\t\t</div>\n\t\t</div>' pl_html.append(html) pl_html.append('\n\t</body>\n</html>') tmp_dir = tempfile.mkdtemp() index_path = (tmp_dir + '/index.html') with open(index_path, 'w') as file: for html in pl_html: file.write(html) for (i, iframe) in enumerate(iframes): frame_path = (((tmp_dir + '/scene') + str((i + 1))) + '.html') with open(frame_path, 'w') as file: for html in iframe: file.write(html) os.chdir(tmp_dir) threading.Thread(target=socketserver.TCPServer(('', 8000), http.server.SimpleHTTPRequestHandler).serve_forever).start() logger.info('Visualization is available at: http://localhost:8000') webbrowser.open('http://localhost:8000') return tmp_dir
3,352,478,717,109,895,000
Creates an HTML visualization of the given scenario. Starts a simple web server at localhost:8000 (blocking). :param scenario: Either a list of worlds, each world representing a single scene or a single world representing a whole scenario :param cps: A list of criticality phenomena which optionally to visualize as well. :return: The path to the directory in which to find the created HTML visualization.
auto/auto_visualizer/auto_visualizer.py
visualize_scenario
lu-w/criticality-recognition
python
def visualize_scenario(scenario, cps=None): '\n Creates an HTML visualization of the given scenario. Starts a simple web server at localhost:8000 (blocking).\n :param scenario: Either a list of worlds, each world representing a single scene or a single world representing a\n whole scenario\n :param cps: A list of criticality phenomena which optionally to visualize as well.\n :return: The path to the directory in which to find the created HTML visualization.\n ' pl_html = [] scenario_inst = None if (cps is None): cps = [] if (type(scenario) == list): scenes = [scene_world.search(type=auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scene_world).Scene)[0] for scene_world in scenario] elif ((type(scenario) == owlready2.namespace.World) or (type(scenario) == owlready2.World)): tm = auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scenario) scenario_inst = scenario.search(type=tm.Scenario)[0] scenes = list(filter((lambda x: (tm.Scene in x.is_a)), scenario_inst.has_traffic_model)) else: raise ValueError scenes = sorted(scenes, key=(lambda x: x.inTimePosition[0].numericPosition[0])) title = 'Scenario' if (scenario_inst and hasattr(scenario_inst, 'identifier') and (len(scenario_inst.identifier) > 0)): title += (' ' + str(scenario_inst.identifier[0])) scenario_info = (('(' + str(len(scenes))) + ' Scenes)') html_body = (((((((((('<!DOCTYPE html>\n<html>\n <head>\n <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">\n <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.bundle.min.js" integrity="sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p" crossorigin="anonymous"></script>\n <script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>\n <meta charset="utf-8">' + scenario_css) + '\n <title>') + title) + '</title>\n </head>\n <body>\n <div class="d-flex flex-row justify-content-center"><div class="mt-3 py-1 px-6 alert alert-info" style="display: inline-block" role="alert"><center><h5>') + title) + ' ') + scenario_info) + '</h5></center></div></div>\n <div class="slidecontainer m-2">\n <input type="range" min="1" max="') + str(len(scenes))) + '" value="1" class="slider" id="myRange">\n </div>\n <script>\n var slider = document.getElementById("myRange");\n var last_set = 1\n var show_all_cps = true\n slider.oninput = function() {\n var output = document.getElementById("plt" + this.value);\n var last_output = document.getElementById("plt" + last_set);\n last_output.style.display = \'none\';\n output.style.display = \'block\';\n last_set = this.value\n }\n function toggle_cps_all_iframes() {\n show_all_cps = !show_all_cps\n $(".cp-all-button").each(function(i) {\n if (show_all_cps) {\n this.parentElement.classList.add("active")\n this.checked = true\n } else {\n this.parentElement.classList.remove("active")\n this.checked = false\n }\n })\n $(".cp-button").each(function(i) {\n if (show_all_cps) {\n this.parentElement.classList.add("active")\n this.checked = true\n } else {\n this.parentElement.classList.remove("active")\n this.checked = false\n }\n })\n $(".scene-plot").each(function(i) {\n this.contentWindow.toggle_cps(show_all_cps)\n })\n }\n function toggle_cp_class(ele, cp_cls_id) {\n // 0. disable automatically checked checkbox (will be added again at step 3)\n ele.checked = !ele.checked\n // 1. find active scene plot\n active_scene = $(".scene-plot-container").filter(function(i) {\n return this.style.display !== "none"\n })[0]\n // 2. get CP pred. str for given cp_cls_id\n cp_pred = active_scene.getElementsByClassName("scene-plot")[0].contentWindow.cp_predicates[cp_cls_id]\n // 3. Toggle all buttons for this CP pred\n $("label > span:contains(" + cp_pred + ")").each(function(i) {\n this.parentElement.classList.toggle("active")\n this.parentElement.querySelector(".cp-button").checked = !this.parentElement.querySelector(".cp-button").checked\n })\n // 4. check if (and where) CP pred. str is present in cp_predicates, pass the resulting index\n $(".scene-plot").each(function(k) {\n cp_cls_id_scene = -1\n for (var i = 0; i < this.contentWindow.cp_predicates.length; i++) {\n if (cp_pred === this.contentWindow.cp_predicates[i]) {\n cp_cls_id_scene = i\n }\n }\n if (cp_cls_id_scene >= 0) {\n this.contentWindow.toggle_cp_class(cp_cls_id_scene, ele.checked)\n }\n })\n }\n </script>\n ') pl_html.append(html_body) iframes = [] def get_color(p): _LUMA_LIMIT = 170 color = 0 luma = _LUMA_LIMIT while (luma >= _LUMA_LIMIT): color = random.randrange(0, 16777215, 15) luma = (((0.2126 * ((color >> 16) & 255)) + (0.7152 * ((color >> 8) & 255))) + (0.0722 * ((color >> 0) & 255))) return ('#' + ('%06x' % color)) for (i, scene) in enumerate(scenes): logger.info(((('Plotting scene ' + str((i + 1))) + ' / ') + str(len(scenes)))) scene_cps = [cp for cp in cps if cp.is_representable_in_scene(scene)] cp_colors = list(map(get_color, range(len([x for c in scene_cps for x in c.subjects])))) cp_color = 0 no_geo_entities = [] width = 24.5 height = 10 try: primary_screens = list(filter((lambda x: x.is_primary), screeninfo.get_monitors())) if (len(primary_screens) > 0): width = ((primary_screens[0].width_mm / 25.4) * 0.73) height = ((primary_screens[0].height_mm / 25.4) * 0.73) except screeninfo.common.ScreenInfoError: logger.info((((('No screens found, using default plot size of ' + str(width)) + ' in x ') + str(height)) + ' in')) fig = plt.figure(figsize=(width, height)) plt.axis('equal') entity_labels = [] entity_relations = [] relations_per_cp_class = dict() cps_relations = [] cps_for_tooltips = [] centroids_x = [] centroids_y = [] plotted_labels = [] entity_points = dict() traffic_entities = tqdm(scene.has_traffic_entity) for entity in traffic_entities: traffic_entities.set_description(str(entity)) if (len(entity.hasGeometry) > 0): for geo in entity.hasGeometry: shape = wkt.loads(geo.asWKT[0]) entity_cp_relations = [] points = None if hasattr(shape, 'exterior'): points = shape.exterior.xy try: hasattr(shape, 'coords') points = shape.coords.xy except NotImplementedError: pass if points: if (np.isclose(centroids_x, shape.centroid.x) & np.isclose(centroids_y, shape.centroid.y)).any(): x = (shape.centroid.x + 0.0) y = (shape.centroid.y + 0.8) plt.plot((shape.centroid.x, x), (shape.centroid.y, y), 'k-') else: x = shape.centroid.x y = shape.centroid.y entity_points[entity] = (x, y) centroids_x.append(x) centroids_y.append(y) plt.plot(*points, alpha=0.6) if (auto.auto.get_ontology(auto.auto.Ontology.Physics, scenario).Dynamical_Object in entity.INDIRECT_is_a): plt.fill(*points, alpha=0.3) if (entity.has_yaw is not None): x_dir = (0.9 * math.cos(math.radians(entity.has_yaw))) y_dir = (0.9 * math.sin(math.radians(entity.has_yaw))) plt.arrow(shape.centroid.x, shape.centroid.y, dx=x_dir, dy=y_dir, shape='full', length_includes_head=True, color='gray', alpha=0.6, head_width=1) entity_labels.append(_describe_entity(entity)) entity_scene_cps = list(filter((lambda scp: (entity in scp.subjects)), scene_cps)) if (len(entity_scene_cps) > 0): plt.plot(x, y, 'o', color='r', mec='k', markersize=3, alpha=1) ent_color = 'red' else: ent_color = 'black' if (entity.identifier and (len(entity.identifier) > 0) and (not entity.is_persistent) and (not (isinstance(entity.identifier[0], str) and entity.identifier[0].startswith('repr')))): plt.annotate(entity.identifier[0], ((x + 0.2), (y + 0.2)), color=ent_color) already_drawn_cps = [] for cp in entity_scene_cps: if (cp.predicate not in relations_per_cp_class.keys()): relations_per_cp_class[cp.predicate] = [] for cp in entity_scene_cps: if (cp not in already_drawn_cps): same_line_cps = [x for x in entity_scene_cps if ([y for z in x.objects.values() for y in z] == [y for z in cp.objects.values() for y in z])] labels = [(x.predicate.split('(')[0], (x.predicate.split('(')[1].replace(')', ), str(x))) for x in same_line_cps] already_drawn_cps += same_line_cps subj_x = x subj_y = y for objs in cp.objects.values(): for obj in objs: if (len(obj.hasGeometry) > 0): if (obj in entity_points.keys()): obj_x = entity_points[obj][0] obj_y = entity_points[obj][1] else: geom_o = wkt.loads(obj.hasGeometry[0].asWKT[0]) obj_x = geom_o.centroid.x obj_y = geom_o.centroid.y m = ((obj_y - subj_y) / (obj_x - subj_x)) b = (subj_y - (m * subj_x)) head_width = 0.2 head_length = (1.5 * head_width) arrow = plt.arrow(subj_x, subj_y, dx=(obj_x - subj_x), dy=(obj_y - subj_y), color=cp_colors[cp_color], shape='full', length_includes_head=True, head_width=head_width, head_length=head_length) if (len(labels[0]) > 1): label_row = ' '.join([label[0] for label in labels]) else: label_row = labels[0] x_offset = (((len(label_row) * 0.055) / 2) - 0.055) if (subj_x > obj_x): label_x = ((obj_x + (abs((subj_x - obj_x)) / 2)) - x_offset) else: label_x = ((obj_x - (abs((subj_x - obj_x)) / 2)) - x_offset) a = math.degrees(math.atan(m)) for (l_i, label) in enumerate(labels): label_string = label[0].replace('CP_', ) label_len = ((len(label_string) * 0.09) + 0.1) label_x_offset = abs((math.cos(math.atan(m)) * label_len)) while True: label_y = (((m * label_x) + b) + 0.05) label_x_1 = ((label_x - (label_x_offset / 2)) + 0.05) label_y_1 = ((m * label_x_1) + b) label_x_2 = ((label_x + (label_x_offset / 2)) + 0.05) label_y_2 = ((m * label_x_2) + b) label_line1 = geometry.LineString([(label_x_1, label_y_1), (label_x_2, label_y_2)]) new_bb = label_line1.buffer(0.1, cap_style=2) new_bb_rect = list(zip(*new_bb.exterior.xy))[:(- 1)] if ((not _AVOID_LABEL_COLLISIONS) or (not _has_collision_with_bbs(plotted_labels, new_bb_rect))): break label_x += (label_x_offset / 10) annot = plt.annotate(label_string, (label_x, label_y), color=cp_colors[cp_color], rotation=a, fontsize=2, rotation_mode='anchor') entity_cp_relations.append(annot) cps_relations.append(annot) relations_per_cp_class[same_line_cps[l_i].predicate] += [annot, arrow] cps_for_tooltips.append(same_line_cps[l_i]) plotted_labels.append(new_bb_rect) label_x += label_x_offset subj_x = obj_x subj_y = obj_y entity_cp_relations += [arrow] cp_color = ((cp_color + 1) % len(cp_colors)) entity_relations.append(entity_cp_relations) elif (len(set([str(y) for y in entity.INDIRECT_is_a]).intersection(_NO_PRINTING_CLASSES)) == 0): no_geo_entities.append(_describe_entity(entity)) logger.info('Done with layout, creating MPLD3 plot, JS plugins, and HTML string') pl2 = plt.plot(centroids_x, centroids_y, 'o', color='b', mec='k', markersize=2, mew=1, alpha=0.4) tooltip_individuals = ToolTipAndClickInfo(pl2[0], labels=entity_labels, targets=entity_relations, targets_per_cp=relations_per_cp_class) fig.tight_layout() mpld3.plugins.connect(fig, tooltip_individuals) for (h, cp_text) in enumerate(cps_relations): tooltip_cp = CPTooltip(cp_text, cps_for_tooltips[h]) mpld3.plugins.connect(fig, tooltip_cp) html = (('\n\t\t<div class="container-fluid scene-plot-container" id="plt' + str((i + 1))) + '" style ="') if (i != 0): html += 'display: none;' html += '">' html += '\n <div class="row">\n <div class="col-md-1">\n ' cp_count_total = len([x for x in cps if ((isinstance(x.traffic_model, list) and (scene in x.traffic_model)) or (x.traffic_model == scenario_inst))]) html += ('<div class=>\n <label class="btn btn-primary active" style="margin-bottom: 10px; width: %s">\n <input type="checkbox" class="cp-all-button" id="cp-all-button-%s" autocomplete="off" onclick="toggle_cps_all_iframes();" checked>\n <span>Show all criticality phenomena (%s)</span>\n </label>' % ('100%', str(i), str(cp_count_total))) for (l, pred) in enumerate(sorted(relations_per_cp_class.keys(), key=natural_sort_key)): cp_count = len([x for x in cps if ((x.predicate == pred) and ((isinstance(x.traffic_model, list) and (scene in x.traffic_model)) or (x.traffic_model == scenario_inst)))]) html += ('\n <br />\n <label class="btn btn-secondary active" style="margin-bottom: 5px; width: %s">\n <input type="checkbox" class="cp-button" id="cp-button-%s-%s" autocomplete="off" onclick="toggle_cp_class(this, %s);" checked>\n <span>%s (%s)</span>\n </label>' % ('100%', str(i), str(l), str(l), pred, str(cp_count))) html += '\n </div>\n </div>\n <div class="col-md-11">\n ' html += '<div class="embed-responsive embed-responsive-16by9">\n' html += (((('\t\t\t\t\t\t<iframe class="scene-plot" src="scene' + str((i + 1))) + '.html" class="embed-responsive-item" style="width: 100%; height: ') + str((height * 1.27))) + 'in" allowfullscreen></iframe>\n\t\t\t\t\t</div>\n') iframe_html = '<!DOCTYPE html>\n<html>\n <head>\n <meta charset="utf-8">\n <meta HTTP-EQUIV="Access-Control-Allow-Origin" CONTENT="localhost">\n <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">\n <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.bundle.min.js" integrity="sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p" crossorigin="anonymous"></script>\n <script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>\n </head>\n <body>' iframe_html += scene_css iframe_html += '\n <div class="d-flex flex-row justify-content-center">\n <div class="btn-group btn-group-toggle" data-bs-toggle="buttons">\n <label class="btn btn-secondary active">\n <input type="checkbox" id="tooltip_button" checked autocomplete="off" onclick="toggle_tooltips(this);"> Show tooltip with information of individuals\n </label>\n <label class="btn btn-secondary active">\n <input type="checkbox" id="descr_button" checked autocomplete="off" onclick="toggle_all_ind_relations(this);"> Show full individual relations in tooltip\n </label>\n </div>\n </div>\n <script>\n var show_tooltips = true\n var show_long_ind = true\n cps = []\n cp_targets = []\n cp_targets_per_class = []\n function toggle_tooltips(ele) {\n ele.parentElement.classList.toggle("active")\n show_tooltips = !show_tooltips\n }\n function toggle_all_ind_relations(ele) {\n ele.parentElement.classList.toggle("active")\n show_long_ind = !show_long_ind\n }\n function toggle_cp_targets(targets, state) {\n for (let j = 0; j < targets.length; j++) {\n var x = mpld3.get_element(targets[j])\n if (x) {\n if ("path" in x) {\n tog = x.path\n } else if ("obj" in x) {\n tog = x.obj\n }\n for (var k = 0; k < tog._groups.length; k++) {\n for (var l = 0; l < tog._groups[k].length; l++){\n if (state) {\n tog._groups[k][l].style.display = "block"\n } else {\n tog._groups[k][l].style.display = "none"\n }\n }\n }\n }\n }\n }\n function toggle_cps(state) {\n for (let i = 0; i < cp_targets.length; i++) {\n toggle_cp_targets(cp_targets[i], state)\n }\n }\n function toggle_cp_class(cp_class, state) {\n targets = cp_targets_per_class[cp_class]\n toggle_cp_targets(targets, state)\n }\n </script>\n <div class="card m-2">\n <div class="card-title d-flex flex-row justify-content-center m-1">\n <h5>' if ((len(scene.inTimePosition) > 0) and (len(scene.inTimePosition[0].numericPosition) > 0)): time = ('%.2f s' % scene.inTimePosition[0].numericPosition[0]) if (scenario_inst and (len(scenario_inst.hasEnd) > 0) and (len(scenario_inst.hasEnd[0].inTimePosition) > 0) and (len(scenario_inst.hasEnd[0].inTimePosition[0].numericPosition) > 0)): time += (' / %.2f s' % scenario_inst.hasEnd[0].inTimePosition[0].numericPosition[0]) else: time += (' / ' + str(len(scenes))) else: time = ((str(i) + ' / ') + str(len(scenes))) iframe_html += (('Scene ' + time) + '<br />') iframe_html += '\n </h5>\n </div>\n <div class="card-body m-0 p-0 d-flex justify-content-center">\n ' scene_html = mpld3.fig_to_html(fig) iframe_html += .join(((('\t\t' + line) + '\n') for line in scene_html.splitlines())) iframe_html += '\n </div>\n </div>' if (len(no_geo_entities) > 0): iframe_html += ('\n <div class="d-flex flex-row justify-content-center">\n <a class="btn btn-primary" data-bs-toggle="collapse" href="#noGeoCollapse" role="button" aria-expanded="false" aria-controls="noGeoCollapse">\n Show scene individuals with no geometric representation (%s)\n </a>\n </div>\n <div class="container-fluid collapse" id="noGeoCollapse">\n <div class="card card-body m-2">' % str(len(no_geo_entities))) iframe_html += .join(no_geo_entities) iframe_html += '\n </div>\n </div>' iframe_html += '\t</body>\n</html>' iframes.append(iframe_html) html += '\t\t\t\t</div>\n\t\t\t</div>\n\t\t</div>' pl_html.append(html) pl_html.append('\n\t</body>\n</html>') tmp_dir = tempfile.mkdtemp() index_path = (tmp_dir + '/index.html') with open(index_path, 'w') as file: for html in pl_html: file.write(html) for (i, iframe) in enumerate(iframes): frame_path = (((tmp_dir + '/scene') + str((i + 1))) + '.html') with open(frame_path, 'w') as file: for html in iframe: file.write(html) os.chdir(tmp_dir) threading.Thread(target=socketserver.TCPServer((, 8000), http.server.SimpleHTTPRequestHandler).serve_forever).start() logger.info('Visualization is available at: http://localhost:8000') webbrowser.open('http://localhost:8000') return tmp_dir
def _describe_entity(entity): '\n Describes the given traffic entity as an HTML list.\n :param entity: An object of an owlready2 class.\n :return: The HTML-representation of entity.\n ' cls = phenomena_extraction.get_most_specific_classes([entity]) label = ('<table class="m-2"><thead><tr><th>Individual</th><th>' + str(entity)) label += ((' (' + ', '.join(cls[0][1])) + ')</th></tr></thead><tbody><tr><td>is_a</td><td>') label += ', '.join([str(x) for x in entity.is_a]) label += '</td></tr>' for prop in entity.get_properties(): if (str(prop.python_name) not in _NO_PRINTING_PROPERTIES): label += '<tr>' label += '<td>' label += str(prop.python_name) label += '</td>' label += '<td>' label += ', '.join([str(x) for x in prop[entity][:_MAX_PROPS_DISPLAY]]) if (len(prop[entity]) > _MAX_PROPS_DISPLAY): label += '<text class="extended_ind_props">' label += (', '.join([str(x) for x in prop[entity][_MAX_PROPS_DISPLAY:]]) + '</text>') label += '<text class="extended_ind_props_dots" style="display: none;">...</text>' label += '</td>' label += '</tr>' label += '</tbody></table>' return label
-1,019,858,865,922,286,700
Describes the given traffic entity as an HTML list. :param entity: An object of an owlready2 class. :return: The HTML-representation of entity.
auto/auto_visualizer/auto_visualizer.py
_describe_entity
lu-w/criticality-recognition
python
def _describe_entity(entity): '\n Describes the given traffic entity as an HTML list.\n :param entity: An object of an owlready2 class.\n :return: The HTML-representation of entity.\n ' cls = phenomena_extraction.get_most_specific_classes([entity]) label = ('<table class="m-2"><thead><tr><th>Individual</th><th>' + str(entity)) label += ((' (' + ', '.join(cls[0][1])) + ')</th></tr></thead><tbody><tr><td>is_a</td><td>') label += ', '.join([str(x) for x in entity.is_a]) label += '</td></tr>' for prop in entity.get_properties(): if (str(prop.python_name) not in _NO_PRINTING_PROPERTIES): label += '<tr>' label += '<td>' label += str(prop.python_name) label += '</td>' label += '<td>' label += ', '.join([str(x) for x in prop[entity][:_MAX_PROPS_DISPLAY]]) if (len(prop[entity]) > _MAX_PROPS_DISPLAY): label += '<text class="extended_ind_props">' label += (', '.join([str(x) for x in prop[entity][_MAX_PROPS_DISPLAY:]]) + '</text>') label += '<text class="extended_ind_props_dots" style="display: none;">...</text>' label += '</td>' label += '</tr>' label += '</tbody></table>' return label
def _has_collision_with_bbs(existing_bbs, new_bb): '\n Checks if the new rectangle (new_bb) collides with some existing rectangles.\n ' a_left = min([x[0] for x in new_bb]) a_right = max([x[0] for x in new_bb]) a_bottom = min([x[1] for x in new_bb]) a_top = max([x[1] for x in new_bb]) for bb in existing_bbs: b_left = min([x[0] for x in bb]) b_right = max([x[0] for x in bb]) b_bottom = min([x[1] for x in bb]) b_top = max([x[1] for x in bb]) if ((a_left <= b_right) and (b_left <= a_right) and (a_top >= b_bottom) and (b_top >= a_bottom)): return True return False
-1,134,844,941,343,210,500
Checks if the new rectangle (new_bb) collides with some existing rectangles.
auto/auto_visualizer/auto_visualizer.py
_has_collision_with_bbs
lu-w/criticality-recognition
python
def _has_collision_with_bbs(existing_bbs, new_bb): '\n \n ' a_left = min([x[0] for x in new_bb]) a_right = max([x[0] for x in new_bb]) a_bottom = min([x[1] for x in new_bb]) a_top = max([x[1] for x in new_bb]) for bb in existing_bbs: b_left = min([x[0] for x in bb]) b_right = max([x[0] for x in bb]) b_bottom = min([x[1] for x in bb]) b_top = max([x[1] for x in bb]) if ((a_left <= b_right) and (b_left <= a_right) and (a_top >= b_bottom) and (b_top >= a_bottom)): return True return False
def testGetExtendedContactDetailsStatistics(self): 'Test GetExtendedContactDetailsStatistics' pass
5,856,506,985,205,901,000
Test GetExtendedContactDetailsStatistics
test/test_get_extended_contact_details_statistics.py
testGetExtendedContactDetailsStatistics
Danilka/APIv3-python-library
python
def testGetExtendedContactDetailsStatistics(self): pass
def validate_method(self, method: Callable, params: Optional[Union[(list, dict)]], exclude: Iterable[str]=(), **kwargs: Any) -> Dict[(str, Any)]: '\n Validates params against method using ``pydantic`` validator.\n\n :param method: method to validate parameters against\n :param params: parameters to be validated\n :param exclude: parameter names to be excluded from validation\n\n :returns: coerced parameters if `coerce` flag is ``True`` otherwise parameters as is\n :raises: ValidationError\n ' signature = self.signature(method, exclude) schema = self.build_validation_schema(signature) params_model = pydantic.create_model(method.__name__, **schema, __config__=self._model_config) bound_params = self.bind(signature, params) try: obj = params_model(**bound_params.arguments) except pydantic.ValidationError as e: raise base.ValidationError(*e.errors()) from e return ({attr: getattr(obj, attr) for attr in obj.__fields_set__} if self._coerce else bound_params.arguments)
1,600,686,918,532,966,700
Validates params against method using ``pydantic`` validator. :param method: method to validate parameters against :param params: parameters to be validated :param exclude: parameter names to be excluded from validation :returns: coerced parameters if `coerce` flag is ``True`` otherwise parameters as is :raises: ValidationError
xjsonrpc/server/validators/pydantic.py
validate_method
bernhardkaindl/pjrpc
python
def validate_method(self, method: Callable, params: Optional[Union[(list, dict)]], exclude: Iterable[str]=(), **kwargs: Any) -> Dict[(str, Any)]: '\n Validates params against method using ``pydantic`` validator.\n\n :param method: method to validate parameters against\n :param params: parameters to be validated\n :param exclude: parameter names to be excluded from validation\n\n :returns: coerced parameters if `coerce` flag is ``True`` otherwise parameters as is\n :raises: ValidationError\n ' signature = self.signature(method, exclude) schema = self.build_validation_schema(signature) params_model = pydantic.create_model(method.__name__, **schema, __config__=self._model_config) bound_params = self.bind(signature, params) try: obj = params_model(**bound_params.arguments) except pydantic.ValidationError as e: raise base.ValidationError(*e.errors()) from e return ({attr: getattr(obj, attr) for attr in obj.__fields_set__} if self._coerce else bound_params.arguments)
@ft.lru_cache(maxsize=None) def build_validation_schema(self, signature: inspect.Signature) -> Dict[(str, Any)]: '\n Builds pydantic model based validation schema from method signature.\n\n :param signature: method signature to build schema for\n :returns: validation schema\n ' field_definitions = {} for param in signature.parameters.values(): if (param.kind is inspect.Parameter.VAR_KEYWORD): field_definitions[param.name] = ((Optional[Dict[(str, param.annotation)]] if (param.annotation is not inspect.Parameter.empty) else Any), (param.default if (param.default is not inspect.Parameter.empty) else None)) elif (param.kind is inspect.Parameter.VAR_POSITIONAL): field_definitions[param.name] = ((Optional[List[param.annotation]] if (param.annotation is not inspect.Parameter.empty) else Any), (param.default if (param.default is not inspect.Parameter.empty) else None)) else: field_definitions[param.name] = ((param.annotation if (param.annotation is not inspect.Parameter.empty) else Any), (param.default if (param.default is not inspect.Parameter.empty) else ...)) return field_definitions
773,218,417,402,197,600
Builds pydantic model based validation schema from method signature. :param signature: method signature to build schema for :returns: validation schema
xjsonrpc/server/validators/pydantic.py
build_validation_schema
bernhardkaindl/pjrpc
python
@ft.lru_cache(maxsize=None) def build_validation_schema(self, signature: inspect.Signature) -> Dict[(str, Any)]: '\n Builds pydantic model based validation schema from method signature.\n\n :param signature: method signature to build schema for\n :returns: validation schema\n ' field_definitions = {} for param in signature.parameters.values(): if (param.kind is inspect.Parameter.VAR_KEYWORD): field_definitions[param.name] = ((Optional[Dict[(str, param.annotation)]] if (param.annotation is not inspect.Parameter.empty) else Any), (param.default if (param.default is not inspect.Parameter.empty) else None)) elif (param.kind is inspect.Parameter.VAR_POSITIONAL): field_definitions[param.name] = ((Optional[List[param.annotation]] if (param.annotation is not inspect.Parameter.empty) else Any), (param.default if (param.default is not inspect.Parameter.empty) else None)) else: field_definitions[param.name] = ((param.annotation if (param.annotation is not inspect.Parameter.empty) else Any), (param.default if (param.default is not inspect.Parameter.empty) else ...)) return field_definitions
def enum_lookup(enum_type_name: str, value: int) -> str: 'return a string which is the short name of the enum value\n (truncating off the common prefix) ' fields = sdb.get_type(enum_type_name).type.enumerators enum_string: str = fields[value].name prefix = os.path.commonprefix([f[0] for f in fields]) return enum_string[(prefix.rfind('_') + 1):]
3,412,445,798,018,359,300
return a string which is the short name of the enum value (truncating off the common prefix)
sdb/commands/zfs/internal/__init__.py
enum_lookup
PaulZ-98/sdb
python
def enum_lookup(enum_type_name: str, value: int) -> str: 'return a string which is the short name of the enum value\n (truncating off the common prefix) ' fields = sdb.get_type(enum_type_name).type.enumerators enum_string: str = fields[value].name prefix = os.path.commonprefix([f[0] for f in fields]) return enum_string[(prefix.rfind('_') + 1):]
def __iter__(cls): 'Getting subclasses which usually represent resolutions' for attr in vars(cls): if (not attr.startswith('_')): (yield cls[attr])
-6,765,760,589,805,338,000
Getting subclasses which usually represent resolutions
wetterdienst/util/parameter.py
__iter__
earthobservations/python_dwd
python
def __iter__(cls): for attr in vars(cls): if (not attr.startswith('_')): (yield cls[attr])
def notas(*n, sit=False): '\n Função para analisar notas e situação de varios alunos.\n :param n: Uma ou mais notas dos alunos (aceita varias)\n :param sit: Valor opcional, indicando se deve ou não adicionar a situação.\n :return: Dicionario com varias informações sobre a situação da turma.\n ' dic = dict() dic['total'] = len(n) dic['maior'] = max(n) dic['menor'] = min(n) dic['media'] = (sum(n) / len(n)) if sit: if (media < 5): dic['situação'] = 'Critica' elif (media < 7): dic['situação'] = 'Rasoavel' else: dic['situação'] = 'Boa' return dic
575,371,391,845,530,560
Função para analisar notas e situação de varios alunos. :param n: Uma ou mais notas dos alunos (aceita varias) :param sit: Valor opcional, indicando se deve ou não adicionar a situação. :return: Dicionario com varias informações sobre a situação da turma.
Modulo-03/ex105/ex105.py
notas
Matheus-Henrique-Burey/Curso-de-Python
python
def notas(*n, sit=False): '\n Função para analisar notas e situação de varios alunos.\n :param n: Uma ou mais notas dos alunos (aceita varias)\n :param sit: Valor opcional, indicando se deve ou não adicionar a situação.\n :return: Dicionario com varias informações sobre a situação da turma.\n ' dic = dict() dic['total'] = len(n) dic['maior'] = max(n) dic['menor'] = min(n) dic['media'] = (sum(n) / len(n)) if sit: if (media < 5): dic['situação'] = 'Critica' elif (media < 7): dic['situação'] = 'Rasoavel' else: dic['situação'] = 'Boa' return dic
def main(): '\n ----------\n Author: Damon Gwinn\n ----------\n Entry point. Trains a model specified by command line arguments\n ----------\n ' args = parse_train_args() print_train_args(args) if args.force_cpu: use_cuda(False) print('WARNING: Forced CPU usage, expect model to perform slower') print('') eventid = f"{datetime.now().strftime('MusicTransformer-%Y.%m.%d')}_gan_{args.gan}_creative_{args.creative}_ce_{args.ce_smoothing}" args.output_dir = ((args.output_dir + '/') + eventid) os.makedirs(args.output_dir, exist_ok=True) params_file = os.path.join(args.output_dir, 'model_params.txt') write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, 'weights') os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, 'results') os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, 'results.csv') best_loss_file = os.path.join(results_folder, 'best_loss_weights.pickle') best_acc_file = os.path.join(results_folder, 'best_acc_weights.pickle') best_loss_critic_file = os.path.join(results_folder, 'best_loss_critic_weights.pickle') best_acc_critic_file = os.path.join(results_folder, 'best_acc_critic_weights.pickle') best_loss_classifier_file = os.path.join(results_folder, 'best_loss_classifier_weights.pickle') best_acc_classifier_file = os.path.join(results_folder, 'best_acc_classifier_weights.pickle') best_text = os.path.join(results_folder, 'best_epochs.txt') if args.no_tensorboard: tensorboard_summary = None else: from torch.utils.tensorboard import SummaryWriter tensorboad_dir = os.path.join(args.output_dir, ('tensorboard/' + eventid)) tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) if (args.interval and args.octave): print('octave interval dataset!!') (classic_train, classic_val, classic_test) = create_epiano_datasets('./dataset/octave_interval_e_piano', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave) pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave) (pop_train, pop_valid, pop_test) = torch.utils.data.random_split(pop909_dataset, [int((len(pop909_dataset) * 0.8)), int((len(pop909_dataset) * 0.1)), ((len(pop909_dataset) - int((len(pop909_dataset) * 0.8))) - int((len(pop909_dataset) * 0.1)))], generator=torch.Generator().manual_seed(42)) elif (args.octave and args.fusion_encoding and args.absolute): print('absolute dataset!!') (classic_train, classic_val, classic_test) = create_epiano_datasets('./dataset/octave_fusion_absolute_e_piano', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave, fusion=args.fusion_encoding, absolute=args.absolute) pop909_dataset = create_pop909_datasets('./dataset/pop909_absolute', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave, fusion=args.fusion_encoding, absolute=args.absolute) (pop_train, pop_valid, pop_test) = torch.utils.data.random_split(pop909_dataset, [int((len(pop909_dataset) * 0.8)), int((len(pop909_dataset) * 0.1)), ((len(pop909_dataset) - int((len(pop909_dataset) * 0.8))) - int((len(pop909_dataset) * 0.1)))], generator=torch.Generator().manual_seed(42)) elif (args.interval and (not args.octave)): print('interval dataset!!') (classic_train, classic_val, classic_test) = create_epiano_datasets('./dataset/logscale_e_piano', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave) pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave) (pop_train, pop_valid, pop_test) = torch.utils.data.random_split(pop909_dataset, [int((len(pop909_dataset) * 0.8)), int((len(pop909_dataset) * 0.1)), ((len(pop909_dataset) - int((len(pop909_dataset) * 0.8))) - int((len(pop909_dataset) * 0.1)))], generator=torch.Generator().manual_seed(42)) elif (args.octave and args.fusion_encoding): print('Octave_fusion dataset!!') (classic_train, classic_val, classic_test) = create_epiano_datasets('./dataset/octave_fusion_e_piano', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave, fusion=args.fusion_encoding) pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave, fusion=args.fusion_encoding) (pop_train, pop_valid, pop_test) = torch.utils.data.random_split(pop909_dataset, [int((len(pop909_dataset) * 0.8)), int((len(pop909_dataset) * 0.1)), ((len(pop909_dataset) - int((len(pop909_dataset) * 0.8))) - int((len(pop909_dataset) * 0.1)))], generator=torch.Generator().manual_seed(42)) elif ((not args.interval) and args.octave and (not args.fusion_encoding)): print('Octave dataset!!') (classic_train, classic_val, classic_test) = create_epiano_datasets('./dataset/octave_e_piano', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave) pop909_dataset = create_pop909_datasets('./dataset/pop909_octave', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave) (pop_train, pop_valid, pop_test) = torch.utils.data.random_split(pop909_dataset, [int((len(pop909_dataset) * 0.8)), int((len(pop909_dataset) * 0.1)), ((len(pop909_dataset) - int((len(pop909_dataset) * 0.8))) - int((len(pop909_dataset) * 0.1)))], generator=torch.Generator().manual_seed(42)) elif args.logscale: print('logscvale dataset') (classic_train, classic_val, classic_test) = create_epiano_datasets('./dataset/logscale_epiano0420', args.max_sequence, random_seq=True, condition_token=args.condition_token, interval=args.interval, octave=args.octave, logscale=args.logscale, absolute=args.absolute) pop909_dataset = create_pop909_datasets('./dataset/logscale_pop0420', args.max_sequence, random_seq=True, condition_token=args.condition_token, interval=args.interval, octave=args.octave, logscale=args.logscale, absolute=args.absolute) (pop_train, pop_valid, pop_test) = torch.utils.data.random_split(pop909_dataset, [int((len(pop909_dataset) * 0.8)), int((len(pop909_dataset) * 0.1)), ((len(pop909_dataset) - int((len(pop909_dataset) * 0.8))) - int((len(pop909_dataset) * 0.1)))], generator=torch.Generator().manual_seed(42)) else: (classic_train, classic_val, classic_test) = create_epiano_datasets(args.classic_input_dir, args.max_sequence, condition_token=args.condition_token, octave=args.octave) pop909_dataset = create_pop909_datasets('dataset/pop_pickle/', args.max_sequence, condition_token=args.condition_token, octave=args.octave) (pop_train, pop_valid, pop_test) = torch.utils.data.random_split(pop909_dataset, [int((len(pop909_dataset) * 0.8)), int((len(pop909_dataset) * 0.1)), ((len(pop909_dataset) - int((len(pop909_dataset) * 0.8))) - int((len(pop909_dataset) * 0.1)))], generator=torch.Generator().manual_seed(42)) if (args.data == 'both'): print('Dataset: both') train_dataset = torch.utils.data.ConcatDataset([classic_train, pop_train]) val_dataset = torch.utils.data.ConcatDataset([classic_val, pop_valid]) elif (args.data == 'classic'): print('Dataset: classic') train_dataset = torch.utils.data.ConcatDataset([classic_train]) val_dataset = torch.utils.data.ConcatDataset([classic_val]) else: print('Dataset: pop') train_dataset = torch.utils.data.ConcatDataset([pop_train]) val_dataset = torch.utils.data.ConcatDataset([pop_valid]) test_dataset = torch.utils.data.ConcatDataset([classic_test, pop_test]) train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers) model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, condition_token=args.condition_token, interval=args.interval, octave=args.octave, fusion=args.fusion_encoding, absolute=args.absolute, logscale=args.logscale).to(get_device()) critic = MusicDiscriminator(n_layers=(args.n_layers // 2), num_heads=(args.num_heads // 2), d_model=(args.d_model // 2), dim_feedforward=(args.dim_feedforward // 2), dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr).to(get_device()) classifier = MusicDiscriminator(n_layers=(args.n_layers // 2), num_heads=(args.num_heads // 2), d_model=(args.d_model // 2), dim_feedforward=(args.dim_feedforward // 2), dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr).to(get_device()) if args.creative: classifier.load_state_dict(torch.load('best_classifier_acc_0.9883.pickle')) start_epoch = BASELINE_EPOCH if (args.continue_weights is not None): if (args.continue_epoch is None): print('ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights') return else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif (args.continue_epoch is not None): print('ERROR: Need continue weights (-continue_weights) when using continue_epoch') return if (args.lr is None): if (args.continue_epoch is None): init_step = 0 else: init_step = (args.continue_epoch * len(train_loader)) lr = LR_DEFAULT_START lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step) else: lr = args.lr if (args.interval and args.octave): eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_INTERVAL) elif (args.interval and (not args.octave)): eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_INTERVAL) elif (args.octave and args.fusion_encoding and args.absolute): eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE) elif (args.octave and args.fusion_encoding): eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION) elif ((not args.interval) and args.octave and (not args.fusion_encoding)): eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE) elif args.logscale: eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_RELATIVE) else: eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD) if (args.ce_smoothing is None): train_loss_func = eval_loss_func elif (args.interval and args.octave): train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL) elif (args.interval and (not args.octave)): train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL) elif ((not args.interval) and args.octave and args.fusion_encoding and args.absolute): train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION_ABSOLUTE, ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE) elif ((not args.interval) and args.octave and args.fusion_encoding): train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION, ignore_index=TOKEN_PAD_OCTAVE_FUSION) elif ((not args.interval) and args.octave and (not args.fusion_encoding)): train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE, ignore_index=TOKEN_PAD_OCTAVE) elif args.logscale: train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_RELATIVE, ignore_index=TOKEN_PAD_RELATIVE) else: train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE, ignore_index=TOKEN_PAD) classifier_loss_func = nn.MSELoss() opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) critic_opt = Adam(critic.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) classifier_opt = Adam(classifier.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) if (args.lr is None): lr_scheduler = LambdaLR(opt, lr_stepper.step) critic_lr_scheduler = LambdaLR(critic_opt, lr_stepper.step) classifier_lr_scheduler = LambdaLR(classifier_opt, lr_stepper.step) else: lr_scheduler = None best_eval_acc = 0.0 best_eval_acc_epoch = (- 1) best_eval_loss = float('inf') best_eval_loss_epoch = (- 1) if (not os.path.isfile(results_file)): with open(results_file, 'w', newline='') as o_stream: writer = csv.writer(o_stream) writer.writerow(CSV_HEADER) for epoch in range(start_epoch, args.epochs): if (epoch >= BASELINE_EPOCH): print(SEPERATOR) print('NEW EPOCH:', (epoch + 1)) print(SEPERATOR) print('') (train_loss, train_acc, dis_loss, gen_loss, cre_loss, gan_accuracy, class_accuracy, creativity) = train_epoch((epoch + 1), model, critic, classifier, train_loader, train_loss_func, classifier_loss_func, opt, critic_opt, classifier_opt, lr_scheduler, critic_lr_scheduler, classifier_lr_scheduler, args) print(SEPERATOR) print('Evaluating:') else: print(SEPERATOR) print('Baseline model evaluation (Epoch 0):') (eval_loss, eval_acc) = eval_model(model, val_loader, eval_loss_func, args) lr = get_lr(opt) print('Epoch:', (epoch + 1)) print('Avg train loss:', train_loss) print('Avg train acc:', train_acc) print('Avg eval loss:', eval_loss) print('Avg eval acc:', eval_acc) print(SEPERATOR) print('') new_best = False if (eval_acc > best_eval_acc): best_eval_acc = eval_acc best_eval_acc_epoch = (epoch + 1) torch.save(model.state_dict(), best_acc_file) torch.save(critic.state_dict(), best_acc_critic_file) torch.save(classifier.state_dict(), best_acc_classifier_file) new_best = True if (eval_loss < best_eval_loss): best_eval_loss = eval_loss best_eval_loss_epoch = (epoch + 1) torch.save(model.state_dict(), best_loss_file) torch.save(critic.state_dict(), best_loss_critic_file) torch.save(classifier.state_dict(), best_loss_classifier_file) new_best = True if new_best: with open(best_text, 'w') as o_stream: print('Best eval acc epoch:', best_eval_acc_epoch, file=o_stream) print('Best eval acc:', best_eval_acc, file=o_stream) print('') print('Best eval loss epoch:', best_eval_loss_epoch, file=o_stream) print('Best eval loss:', best_eval_loss, file=o_stream) if (not args.no_tensorboard): tensorboard_summary.add_scalar('Avg_CE_loss/train', train_loss, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Avg_CE_loss/eval', eval_loss, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Accuracy/train', train_acc, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Accuracy/eval', eval_acc, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Learn_rate/train', lr, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Critic_loss/train', dis_loss, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Gen_loss/train', gen_loss, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Creativity_loss/train', cre_loss, global_step=(epoch + 1)) tensorboard_summary.add_scalar('GAN_accuracy/train', gan_accuracy, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Class_accuracy/train', class_accuracy, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Creativity/train', creativity, global_step=(epoch + 1)) tensorboard_summary.flush() if (((epoch + 1) % args.weight_modulus) == 0): epoch_str = str((epoch + 1)).zfill(PREPEND_ZEROS_WIDTH) path = os.path.join(weights_folder, (('epoch_' + epoch_str) + '.pickle')) torch.save(model.state_dict(), path) with open(results_file, 'a', newline='') as o_stream: writer = csv.writer(o_stream) writer.writerow([(epoch + 1), lr, train_loss, train_acc, eval_loss, eval_acc]) if (not args.no_tensorboard): tensorboard_summary.flush() return
3,237,435,461,219,423,700
---------- Author: Damon Gwinn ---------- Entry point. Trains a model specified by command line arguments ----------
train.py
main
yeong35/MusicTransformer-Pytorch
python
def main(): '\n ----------\n Author: Damon Gwinn\n ----------\n Entry point. Trains a model specified by command line arguments\n ----------\n ' args = parse_train_args() print_train_args(args) if args.force_cpu: use_cuda(False) print('WARNING: Forced CPU usage, expect model to perform slower') print() eventid = f"{datetime.now().strftime('MusicTransformer-%Y.%m.%d')}_gan_{args.gan}_creative_{args.creative}_ce_{args.ce_smoothing}" args.output_dir = ((args.output_dir + '/') + eventid) os.makedirs(args.output_dir, exist_ok=True) params_file = os.path.join(args.output_dir, 'model_params.txt') write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, 'weights') os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, 'results') os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, 'results.csv') best_loss_file = os.path.join(results_folder, 'best_loss_weights.pickle') best_acc_file = os.path.join(results_folder, 'best_acc_weights.pickle') best_loss_critic_file = os.path.join(results_folder, 'best_loss_critic_weights.pickle') best_acc_critic_file = os.path.join(results_folder, 'best_acc_critic_weights.pickle') best_loss_classifier_file = os.path.join(results_folder, 'best_loss_classifier_weights.pickle') best_acc_classifier_file = os.path.join(results_folder, 'best_acc_classifier_weights.pickle') best_text = os.path.join(results_folder, 'best_epochs.txt') if args.no_tensorboard: tensorboard_summary = None else: from torch.utils.tensorboard import SummaryWriter tensorboad_dir = os.path.join(args.output_dir, ('tensorboard/' + eventid)) tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) if (args.interval and args.octave): print('octave interval dataset!!') (classic_train, classic_val, classic_test) = create_epiano_datasets('./dataset/octave_interval_e_piano', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave) pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave) (pop_train, pop_valid, pop_test) = torch.utils.data.random_split(pop909_dataset, [int((len(pop909_dataset) * 0.8)), int((len(pop909_dataset) * 0.1)), ((len(pop909_dataset) - int((len(pop909_dataset) * 0.8))) - int((len(pop909_dataset) * 0.1)))], generator=torch.Generator().manual_seed(42)) elif (args.octave and args.fusion_encoding and args.absolute): print('absolute dataset!!') (classic_train, classic_val, classic_test) = create_epiano_datasets('./dataset/octave_fusion_absolute_e_piano', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave, fusion=args.fusion_encoding, absolute=args.absolute) pop909_dataset = create_pop909_datasets('./dataset/pop909_absolute', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave, fusion=args.fusion_encoding, absolute=args.absolute) (pop_train, pop_valid, pop_test) = torch.utils.data.random_split(pop909_dataset, [int((len(pop909_dataset) * 0.8)), int((len(pop909_dataset) * 0.1)), ((len(pop909_dataset) - int((len(pop909_dataset) * 0.8))) - int((len(pop909_dataset) * 0.1)))], generator=torch.Generator().manual_seed(42)) elif (args.interval and (not args.octave)): print('interval dataset!!') (classic_train, classic_val, classic_test) = create_epiano_datasets('./dataset/logscale_e_piano', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave) pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave) (pop_train, pop_valid, pop_test) = torch.utils.data.random_split(pop909_dataset, [int((len(pop909_dataset) * 0.8)), int((len(pop909_dataset) * 0.1)), ((len(pop909_dataset) - int((len(pop909_dataset) * 0.8))) - int((len(pop909_dataset) * 0.1)))], generator=torch.Generator().manual_seed(42)) elif (args.octave and args.fusion_encoding): print('Octave_fusion dataset!!') (classic_train, classic_val, classic_test) = create_epiano_datasets('./dataset/octave_fusion_e_piano', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave, fusion=args.fusion_encoding) pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave, fusion=args.fusion_encoding) (pop_train, pop_valid, pop_test) = torch.utils.data.random_split(pop909_dataset, [int((len(pop909_dataset) * 0.8)), int((len(pop909_dataset) * 0.1)), ((len(pop909_dataset) - int((len(pop909_dataset) * 0.8))) - int((len(pop909_dataset) * 0.1)))], generator=torch.Generator().manual_seed(42)) elif ((not args.interval) and args.octave and (not args.fusion_encoding)): print('Octave dataset!!') (classic_train, classic_val, classic_test) = create_epiano_datasets('./dataset/octave_e_piano', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave) pop909_dataset = create_pop909_datasets('./dataset/pop909_octave', args.max_sequence, condition_token=args.condition_token, interval=args.interval, octave=args.octave) (pop_train, pop_valid, pop_test) = torch.utils.data.random_split(pop909_dataset, [int((len(pop909_dataset) * 0.8)), int((len(pop909_dataset) * 0.1)), ((len(pop909_dataset) - int((len(pop909_dataset) * 0.8))) - int((len(pop909_dataset) * 0.1)))], generator=torch.Generator().manual_seed(42)) elif args.logscale: print('logscvale dataset') (classic_train, classic_val, classic_test) = create_epiano_datasets('./dataset/logscale_epiano0420', args.max_sequence, random_seq=True, condition_token=args.condition_token, interval=args.interval, octave=args.octave, logscale=args.logscale, absolute=args.absolute) pop909_dataset = create_pop909_datasets('./dataset/logscale_pop0420', args.max_sequence, random_seq=True, condition_token=args.condition_token, interval=args.interval, octave=args.octave, logscale=args.logscale, absolute=args.absolute) (pop_train, pop_valid, pop_test) = torch.utils.data.random_split(pop909_dataset, [int((len(pop909_dataset) * 0.8)), int((len(pop909_dataset) * 0.1)), ((len(pop909_dataset) - int((len(pop909_dataset) * 0.8))) - int((len(pop909_dataset) * 0.1)))], generator=torch.Generator().manual_seed(42)) else: (classic_train, classic_val, classic_test) = create_epiano_datasets(args.classic_input_dir, args.max_sequence, condition_token=args.condition_token, octave=args.octave) pop909_dataset = create_pop909_datasets('dataset/pop_pickle/', args.max_sequence, condition_token=args.condition_token, octave=args.octave) (pop_train, pop_valid, pop_test) = torch.utils.data.random_split(pop909_dataset, [int((len(pop909_dataset) * 0.8)), int((len(pop909_dataset) * 0.1)), ((len(pop909_dataset) - int((len(pop909_dataset) * 0.8))) - int((len(pop909_dataset) * 0.1)))], generator=torch.Generator().manual_seed(42)) if (args.data == 'both'): print('Dataset: both') train_dataset = torch.utils.data.ConcatDataset([classic_train, pop_train]) val_dataset = torch.utils.data.ConcatDataset([classic_val, pop_valid]) elif (args.data == 'classic'): print('Dataset: classic') train_dataset = torch.utils.data.ConcatDataset([classic_train]) val_dataset = torch.utils.data.ConcatDataset([classic_val]) else: print('Dataset: pop') train_dataset = torch.utils.data.ConcatDataset([pop_train]) val_dataset = torch.utils.data.ConcatDataset([pop_valid]) test_dataset = torch.utils.data.ConcatDataset([classic_test, pop_test]) train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers) model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, condition_token=args.condition_token, interval=args.interval, octave=args.octave, fusion=args.fusion_encoding, absolute=args.absolute, logscale=args.logscale).to(get_device()) critic = MusicDiscriminator(n_layers=(args.n_layers // 2), num_heads=(args.num_heads // 2), d_model=(args.d_model // 2), dim_feedforward=(args.dim_feedforward // 2), dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr).to(get_device()) classifier = MusicDiscriminator(n_layers=(args.n_layers // 2), num_heads=(args.num_heads // 2), d_model=(args.d_model // 2), dim_feedforward=(args.dim_feedforward // 2), dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr).to(get_device()) if args.creative: classifier.load_state_dict(torch.load('best_classifier_acc_0.9883.pickle')) start_epoch = BASELINE_EPOCH if (args.continue_weights is not None): if (args.continue_epoch is None): print('ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights') return else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif (args.continue_epoch is not None): print('ERROR: Need continue weights (-continue_weights) when using continue_epoch') return if (args.lr is None): if (args.continue_epoch is None): init_step = 0 else: init_step = (args.continue_epoch * len(train_loader)) lr = LR_DEFAULT_START lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step) else: lr = args.lr if (args.interval and args.octave): eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_INTERVAL) elif (args.interval and (not args.octave)): eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_INTERVAL) elif (args.octave and args.fusion_encoding and args.absolute): eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE) elif (args.octave and args.fusion_encoding): eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION) elif ((not args.interval) and args.octave and (not args.fusion_encoding)): eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE) elif args.logscale: eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_RELATIVE) else: eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD) if (args.ce_smoothing is None): train_loss_func = eval_loss_func elif (args.interval and args.octave): train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL) elif (args.interval and (not args.octave)): train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL) elif ((not args.interval) and args.octave and args.fusion_encoding and args.absolute): train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION_ABSOLUTE, ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE) elif ((not args.interval) and args.octave and args.fusion_encoding): train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION, ignore_index=TOKEN_PAD_OCTAVE_FUSION) elif ((not args.interval) and args.octave and (not args.fusion_encoding)): train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE, ignore_index=TOKEN_PAD_OCTAVE) elif args.logscale: train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_RELATIVE, ignore_index=TOKEN_PAD_RELATIVE) else: train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE, ignore_index=TOKEN_PAD) classifier_loss_func = nn.MSELoss() opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) critic_opt = Adam(critic.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) classifier_opt = Adam(classifier.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) if (args.lr is None): lr_scheduler = LambdaLR(opt, lr_stepper.step) critic_lr_scheduler = LambdaLR(critic_opt, lr_stepper.step) classifier_lr_scheduler = LambdaLR(classifier_opt, lr_stepper.step) else: lr_scheduler = None best_eval_acc = 0.0 best_eval_acc_epoch = (- 1) best_eval_loss = float('inf') best_eval_loss_epoch = (- 1) if (not os.path.isfile(results_file)): with open(results_file, 'w', newline=) as o_stream: writer = csv.writer(o_stream) writer.writerow(CSV_HEADER) for epoch in range(start_epoch, args.epochs): if (epoch >= BASELINE_EPOCH): print(SEPERATOR) print('NEW EPOCH:', (epoch + 1)) print(SEPERATOR) print() (train_loss, train_acc, dis_loss, gen_loss, cre_loss, gan_accuracy, class_accuracy, creativity) = train_epoch((epoch + 1), model, critic, classifier, train_loader, train_loss_func, classifier_loss_func, opt, critic_opt, classifier_opt, lr_scheduler, critic_lr_scheduler, classifier_lr_scheduler, args) print(SEPERATOR) print('Evaluating:') else: print(SEPERATOR) print('Baseline model evaluation (Epoch 0):') (eval_loss, eval_acc) = eval_model(model, val_loader, eval_loss_func, args) lr = get_lr(opt) print('Epoch:', (epoch + 1)) print('Avg train loss:', train_loss) print('Avg train acc:', train_acc) print('Avg eval loss:', eval_loss) print('Avg eval acc:', eval_acc) print(SEPERATOR) print() new_best = False if (eval_acc > best_eval_acc): best_eval_acc = eval_acc best_eval_acc_epoch = (epoch + 1) torch.save(model.state_dict(), best_acc_file) torch.save(critic.state_dict(), best_acc_critic_file) torch.save(classifier.state_dict(), best_acc_classifier_file) new_best = True if (eval_loss < best_eval_loss): best_eval_loss = eval_loss best_eval_loss_epoch = (epoch + 1) torch.save(model.state_dict(), best_loss_file) torch.save(critic.state_dict(), best_loss_critic_file) torch.save(classifier.state_dict(), best_loss_classifier_file) new_best = True if new_best: with open(best_text, 'w') as o_stream: print('Best eval acc epoch:', best_eval_acc_epoch, file=o_stream) print('Best eval acc:', best_eval_acc, file=o_stream) print() print('Best eval loss epoch:', best_eval_loss_epoch, file=o_stream) print('Best eval loss:', best_eval_loss, file=o_stream) if (not args.no_tensorboard): tensorboard_summary.add_scalar('Avg_CE_loss/train', train_loss, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Avg_CE_loss/eval', eval_loss, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Accuracy/train', train_acc, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Accuracy/eval', eval_acc, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Learn_rate/train', lr, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Critic_loss/train', dis_loss, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Gen_loss/train', gen_loss, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Creativity_loss/train', cre_loss, global_step=(epoch + 1)) tensorboard_summary.add_scalar('GAN_accuracy/train', gan_accuracy, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Class_accuracy/train', class_accuracy, global_step=(epoch + 1)) tensorboard_summary.add_scalar('Creativity/train', creativity, global_step=(epoch + 1)) tensorboard_summary.flush() if (((epoch + 1) % args.weight_modulus) == 0): epoch_str = str((epoch + 1)).zfill(PREPEND_ZEROS_WIDTH) path = os.path.join(weights_folder, (('epoch_' + epoch_str) + '.pickle')) torch.save(model.state_dict(), path) with open(results_file, 'a', newline=) as o_stream: writer = csv.writer(o_stream) writer.writerow([(epoch + 1), lr, train_loss, train_acc, eval_loss, eval_acc]) if (not args.no_tensorboard): tensorboard_summary.flush() return
def recreation_check(self, metadata): 'Check that compileall recreates bytecode when the new metadata is\n used.' if os.environ.get('SOURCE_DATE_EPOCH'): raise unittest.SkipTest('SOURCE_DATE_EPOCH is set') py_compile.compile(self.source_path) self.assertEqual(*self.timestamp_metadata()) with open(self.bc_path, 'rb') as file: bc = file.read()[len(metadata):] with open(self.bc_path, 'wb') as file: file.write(metadata) file.write(bc) self.assertNotEqual(*self.timestamp_metadata()) compileall.compile_dir(self.directory, force=False, quiet=True) self.assertTrue(*self.timestamp_metadata())
-8,420,914,637,662,615,000
Check that compileall recreates bytecode when the new metadata is used.
python/Lib/test/test_compileall.py
recreation_check
jasam/ciclo_vida_datos_scraping
python
def recreation_check(self, metadata): 'Check that compileall recreates bytecode when the new metadata is\n used.' if os.environ.get('SOURCE_DATE_EPOCH'): raise unittest.SkipTest('SOURCE_DATE_EPOCH is set') py_compile.compile(self.source_path) self.assertEqual(*self.timestamp_metadata()) with open(self.bc_path, 'rb') as file: bc = file.read()[len(metadata):] with open(self.bc_path, 'wb') as file: file.write(metadata) file.write(bc) self.assertNotEqual(*self.timestamp_metadata()) compileall.compile_dir(self.directory, force=False, quiet=True) self.assertTrue(*self.timestamp_metadata())
def resample(df, rule, on=None, groupby=(), aggregation='mean', reset_index=True, time_index=None): 'pd.DataFrame.resample adapter.\n\n Call the `df.resample` method on the given time_index\n and afterwards call the indicated aggregation.\n\n Optionally group the dataframe by the indicated columns before\n performing the resampling.\n\n If groupby option is used, the result is a multi-index datagrame.\n\n Args:\n df (pandas.DataFrame):\n DataFrame to resample.\n rule (str or int):\n The offset string or object representing target conversion or an\n integer value that will be interpreted as the number of seconds.\n on (str or None):\n Name of the column to use as the time index. If ``None`` is given, the\n DataFrame index is used.\n groupby (list):\n Optional list of columns to group by.\n aggregation (callable or str):\n Function or name of the function to use for the aggregation. If a name is given, it\n can either be one of the standard pandas aggregation functions or the fully qualified\n name of a python function that will be imported and used.\n reset_index (bool):\n Whether to reset the index after aggregating\n time_index (str or None):\n Deprecated: This has been renamed to `on`.\n Name of the column to use as the time index. If ``None`` is given, the\n DataFrame is index is used.\n\n Returns:\n pandas.Dataframe:\n resampled dataframe\n ' if ((on is None) and (time_index is not None)): message = 'resample `time_series` argument deprecated and will be removed in future versions of MLPrimitives. Please use `on` instead.' warnings.warn(message, DeprecationWarning, stacklevel=2) on = time_index if groupby: df = df.groupby(groupby) if isinstance(rule, int): rule = '{}s'.format(rule) dtir = df.resample(rule, on=on) if ((not callable(aggregation)) and (aggregation not in _RESAMPLE_AGGS)): try: aggregation = import_object(aggregation) except (AttributeError, ImportError, ValueError): pass df = dtir.aggregate(aggregation) for name in df.index.names: if (name in df): del df[name] if reset_index: df.reset_index(inplace=True) return df
-5,069,791,470,592,100,000
pd.DataFrame.resample adapter. Call the `df.resample` method on the given time_index and afterwards call the indicated aggregation. Optionally group the dataframe by the indicated columns before performing the resampling. If groupby option is used, the result is a multi-index datagrame. Args: df (pandas.DataFrame): DataFrame to resample. rule (str or int): The offset string or object representing target conversion or an integer value that will be interpreted as the number of seconds. on (str or None): Name of the column to use as the time index. If ``None`` is given, the DataFrame index is used. groupby (list): Optional list of columns to group by. aggregation (callable or str): Function or name of the function to use for the aggregation. If a name is given, it can either be one of the standard pandas aggregation functions or the fully qualified name of a python function that will be imported and used. reset_index (bool): Whether to reset the index after aggregating time_index (str or None): Deprecated: This has been renamed to `on`. Name of the column to use as the time index. If ``None`` is given, the DataFrame is index is used. Returns: pandas.Dataframe: resampled dataframe
mlprimitives/adapters/pandas.py
resample
AlexanderGeiger/MLPrimitives
python
def resample(df, rule, on=None, groupby=(), aggregation='mean', reset_index=True, time_index=None): 'pd.DataFrame.resample adapter.\n\n Call the `df.resample` method on the given time_index\n and afterwards call the indicated aggregation.\n\n Optionally group the dataframe by the indicated columns before\n performing the resampling.\n\n If groupby option is used, the result is a multi-index datagrame.\n\n Args:\n df (pandas.DataFrame):\n DataFrame to resample.\n rule (str or int):\n The offset string or object representing target conversion or an\n integer value that will be interpreted as the number of seconds.\n on (str or None):\n Name of the column to use as the time index. If ``None`` is given, the\n DataFrame index is used.\n groupby (list):\n Optional list of columns to group by.\n aggregation (callable or str):\n Function or name of the function to use for the aggregation. If a name is given, it\n can either be one of the standard pandas aggregation functions or the fully qualified\n name of a python function that will be imported and used.\n reset_index (bool):\n Whether to reset the index after aggregating\n time_index (str or None):\n Deprecated: This has been renamed to `on`.\n Name of the column to use as the time index. If ``None`` is given, the\n DataFrame is index is used.\n\n Returns:\n pandas.Dataframe:\n resampled dataframe\n ' if ((on is None) and (time_index is not None)): message = 'resample `time_series` argument deprecated and will be removed in future versions of MLPrimitives. Please use `on` instead.' warnings.warn(message, DeprecationWarning, stacklevel=2) on = time_index if groupby: df = df.groupby(groupby) if isinstance(rule, int): rule = '{}s'.format(rule) dtir = df.resample(rule, on=on) if ((not callable(aggregation)) and (aggregation not in _RESAMPLE_AGGS)): try: aggregation = import_object(aggregation) except (AttributeError, ImportError, ValueError): pass df = dtir.aggregate(aggregation) for name in df.index.names: if (name in df): del df[name] if reset_index: df.reset_index(inplace=True) return df
def _join_names(names): 'Join the names of a multi-level index with an underscore.' levels = (str(name) for name in names if (name != '')) return '_'.join(levels)
-7,429,011,445,859,655,000
Join the names of a multi-level index with an underscore.
mlprimitives/adapters/pandas.py
_join_names
AlexanderGeiger/MLPrimitives
python
def _join_names(names): levels = (str(name) for name in names if (name != )) return '_'.join(levels)